diff --git a/.ipynb_checkpoints/07_convolutions-checkpoint.ipynb b/.ipynb_checkpoints/07_convolutions-checkpoint.ipynb index c6872b30..09c226f7 100644 --- a/.ipynb_checkpoints/07_convolutions-checkpoint.ipynb +++ b/.ipynb_checkpoints/07_convolutions-checkpoint.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "55daf5fa", + "id": "ecf5a9a2", "metadata": {}, "outputs": [], "source": [ @@ -14,12 +14,14 @@ "import matplotlib.pyplot as plt\n", "from functools import partial\n", "from datasets import load_dataset\n", - "from torch.utils.data import DataLoader\n", + "from torch.utils.data import DataLoader, default_collate\n", "import numpy as np\n", "import pickle, gzip\n", "from pathlib import Path\n", "import matplotlib as mpl\n", "import pandas as pd\n", + "from practice_deep_learning.training import *\n", + "from typing import Mapping\n", "\n", "mpl.rcParams['image.cmap'] = 'gray'\n", "%matplotlib inline" @@ -28,7 +30,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4a0094d8", + "id": "fca15a52", "metadata": {}, "outputs": [ { @@ -55,7 +57,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6dcbde43", + "id": "ff7358c9", "metadata": {}, "outputs": [ { @@ -81,7 +83,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b12f70f4", + "id": "b05e81c8", "metadata": {}, "outputs": [ { @@ -109,350 +111,350 @@ { "cell_type": "code", "execution_count": null, - "id": "8b39fc7b", + "id": "c0de1ad5", "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", @@ -488,872 +490,872 @@ " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 
00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.000.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
10.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.010.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
20.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.020.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
30.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.030.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
40.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.040.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
50.00.00.00.00.00.00.00.00.00.00.00.10.20.41.01.01.01.01.01.00.70.00.00.00.00.00.00.050.00.00.00.00.00.00.00.00.00.00.00.10.20.41.01.01.01.01.01.00.70.00.00.00.00.00.00.0
60.00.00.00.00.00.00.00.00.00.20.50.90.91.01.01.01.01.01.01.01.00.60.10.00.00.00.00.060.00.00.00.00.00.00.00.00.00.20.50.90.91.01.01.01.01.01.01.01.00.60.10.00.00.00.00.0
70.00.00.00.00.00.00.00.00.00.71.01.01.01.01.01.01.01.01.01.01.01.00.20.00.00.00.00.070.00.00.00.00.00.00.00.00.00.71.01.01.01.01.01.01.01.01.01.01.01.00.20.00.00.00.00.0
80.00.00.00.00.00.00.00.00.00.41.01.00.90.50.50.50.50.71.01.01.01.00.20.00.00.00.00.080.00.00.00.00.00.00.00.00.00.41.01.00.90.50.50.50.50.71.01.01.01.00.20.00.00.00.00.0
90.00.00.00.00.00.00.00.00.00.00.10.10.10.00.00.00.00.10.91.01.00.70.00.00.00.00.00.090.00.00.00.00.00.00.00.00.00.00.10.10.10.00.00.00.00.10.91.01.00.70.00.00.00.00.00.0
100.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.30.91.01.00.60.00.00.00.00.00.0100.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.30.91.01.00.60.00.00.00.00.00.0
110.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.30.71.01.01.00.10.00.00.00.00.00.0110.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.30.71.01.01.00.10.00.00.00.00.00.0
120.00.00.00.00.00.00.00.00.00.00.00.00.00.00.40.81.01.01.01.00.80.00.00.00.00.00.00.0120.00.00.00.00.00.00.00.00.00.00.00.00.00.00.40.81.01.01.01.00.80.00.00.00.00.00.00.0
130.00.00.00.00.00.00.00.00.00.10.50.80.80.81.01.01.01.00.90.40.10.00.00.00.00.00.00.0130.00.00.00.00.00.00.00.00.00.10.50.80.80.81.01.01.01.00.90.40.10.00.00.00.00.00.00.0
140.00.00.00.00.00.00.00.00.20.91.01.01.01.01.01.01.01.00.70.00.00.00.00.00.00.00.00.0140.00.00.00.00.00.00.00.00.20.91.01.01.01.01.01.01.01.00.70.00.00.00.00.00.00.00.00.0
150.00.00.00.00.00.00.00.00.20.91.01.01.01.01.01.01.01.01.00.30.00.00.00.00.00.00.00.0150.00.00.00.00.00.00.00.00.20.91.01.01.01.01.01.01.01.01.00.30.00.00.00.00.00.00.00.0
160.00.00.00.00.00.00.00.00.00.10.50.20.20.20.20.20.61.01.00.30.00.00.00.00.00.00.00.0160.00.00.00.00.00.00.00.00.00.10.50.20.20.20.20.20.61.01.00.30.00.00.00.00.00.00.00.0
170.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.11.01.00.30.00.00.00.00.00.00.00.0170.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.11.01.00.30.00.00.00.00.00.00.00.0
180.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.31.01.00.30.00.00.00.00.00.00.00.0180.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.31.01.00.30.00.00.00.00.00.00.00.0
190.00.00.00.00.00.00.00.30.00.00.00.00.00.00.00.40.91.01.00.30.00.00.00.00.00.00.00.0190.00.00.00.00.00.00.00.30.00.00.00.00.00.00.00.40.91.01.00.30.00.00.00.00.00.00.00.0
200.00.00.00.00.00.20.71.00.10.00.00.00.00.10.40.91.01.00.90.30.00.00.00.00.00.00.00.0200.00.00.00.00.00.20.71.00.10.00.00.00.00.10.40.91.01.00.90.30.00.00.00.00.00.00.00.0
210.00.00.00.00.00.81.01.00.60.50.50.50.50.81.01.01.00.70.30.00.00.00.00.00.00.00.00.0210.00.00.00.00.00.81.01.00.60.50.50.50.50.81.01.01.00.70.30.00.00.00.00.00.00.00.00.0
220.00.00.00.00.00.81.01.01.01.01.01.01.01.01.00.90.60.00.00.00.00.00.00.00.00.00.00.0220.00.00.00.00.00.81.01.01.01.01.01.01.01.01.00.90.60.00.00.00.00.00.00.00.00.00.00.0
230.00.00.00.00.00.20.61.01.01.01.01.00.80.80.60.20.00.00.00.00.00.00.00.00.00.00.00.0230.00.00.00.00.00.20.61.01.01.01.01.00.80.80.60.20.00.00.00.00.00.00.00.00.00.00.00.0
240.00.00.00.00.00.00.00.40.91.00.70.40.10.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0240.00.00.00.00.00.00.00.40.91.00.70.40.10.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
250.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0250.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
260.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0260.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
270.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0270.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
\n" @@ -1375,7 +1377,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1d444a15", + "id": "fcb9eafa", "metadata": {}, "outputs": [ { @@ -1396,7 +1398,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f1cb4e91", + "id": "c877edba", "metadata": {}, "outputs": [], "source": [ @@ -1407,7 +1409,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c2bd22dc", + "id": "a40de9dc", "metadata": {}, "outputs": [ { @@ -1428,7 +1430,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4870e048", + "id": "bb453537", "metadata": {}, "outputs": [ { @@ -1446,7 +1448,7 @@ { "cell_type": "code", "execution_count": null, - "id": "76017cbb", + "id": "d11cdc70", "metadata": {}, "outputs": [ { @@ -1471,805 +1473,805 @@ { "cell_type": "code", "execution_count": null, - "id": "8081ad48", + "id": "568bd508", "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", @@ -2303,758 +2305,758 @@ " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 
00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.000.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
10.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.010.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
20.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.020.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
30.00.00.00.00.00.00.00.00.00.10.30.71.62.43.03.03.03.02.71.70.70.00.00.00.00.030.00.00.00.00.00.00.00.00.00.10.30.71.62.43.03.03.03.02.71.70.70.00.00.00.00.0
40.00.00.00.00.00.00.00.20.71.62.32.72.93.03.03.03.03.03.02.61.70.70.10.00.00.040.00.00.00.00.00.00.00.20.71.62.32.72.93.03.03.03.03.03.02.61.70.70.10.00.00.0
50.00.00.00.00.00.00.00.71.72.52.62.21.40.6-0.0-0.0-0.0-0.00.31.31.51.20.20.00.00.050.00.00.00.00.00.00.00.71.72.52.62.21.40.6-0.0-0.0-0.0-0.00.31.31.51.20.20.00.00.0
60.00.00.00.00.00.00.00.30.70.80.6-0.3-0.9-1.4-1.4-1.2-0.7-0.20.00.40.50.50.20.00.00.060.00.00.00.00.00.00.00.30.70.80.6-0.3-0.9-1.4-1.4-1.2-0.7-0.20.00.40.50.50.20.00.00.0
70.00.00.00.00.00.00.0-0.7-1.6-2.4-2.6-2.7-2.9-3.0-3.0-2.9-2.0-1.0-0.1-0.3-0.5-0.5-0.20.00.00.070.00.00.00.00.00.00.0-0.7-1.6-2.4-2.6-2.7-2.9-3.0-3.0-2.9-2.0-1.0-0.1-0.3-0.5-0.5-0.20.00.00.0
80.00.00.00.00.00.00.0-0.4-1.4-2.4-2.9-2.4-1.9-1.6-1.6-1.4-1.0-0.4-0.0-0.4-0.7-0.7-0.20.00.00.080.00.00.00.00.00.00.0-0.4-1.4-2.4-2.9-2.4-1.9-1.6-1.6-1.4-1.0-0.4-0.0-0.4-0.7-0.7-0.20.00.00.0
90.00.00.00.00.00.00.0-0.0-0.1-0.2-0.3-0.2-0.10.00.31.01.10.80.1-0.6-0.6-0.6-0.00.00.00.090.00.00.00.00.00.00.0-0.0-0.1-0.2-0.3-0.2-0.10.00.31.01.10.80.1-0.6-0.6-0.6-0.00.00.00.0
100.00.00.00.00.00.00.00.00.00.00.00.00.41.22.12.41.70.7-0.2-0.7-0.7-0.50.00.00.00.0100.00.00.00.00.00.00.00.00.00.00.00.00.41.22.12.41.70.7-0.2-0.7-0.7-0.50.00.00.00.0
110.00.00.00.00.00.00.00.10.61.42.02.32.52.72.61.90.8-0.4-1.5-1.5-0.9-0.10.00.00.00.0110.00.00.00.00.00.00.00.10.61.42.02.32.52.72.61.90.8-0.4-1.5-1.5-0.9-0.10.00.00.00.0
120.00.00.00.00.00.00.21.02.02.83.03.02.61.80.80.2-0.3-1.3-2.1-1.8-0.8-0.00.00.00.00.0120.00.00.00.00.00.00.21.02.02.83.03.02.61.80.80.2-0.3-1.3-2.1-1.8-0.8-0.00.00.00.00.0
130.00.00.00.00.00.00.20.91.41.51.00.70.50.20.00.00.1-0.0-0.2-0.2-0.10.00.00.00.00.0130.00.00.00.00.00.00.20.91.41.51.00.70.50.20.00.00.1-0.0-0.2-0.2-0.10.00.00.00.00.0
140.00.00.00.00.00.0-0.2-0.9-1.4-2.0-2.1-2.4-2.4-2.4-2.1-1.2-0.10.60.60.30.00.00.00.00.00.0140.00.00.00.00.00.0-0.2-0.9-1.4-2.0-2.1-2.4-2.4-2.4-2.1-1.2-0.10.60.60.30.00.00.00.00.00.0
150.00.00.00.00.00.0-0.2-1.0-2.0-2.8-3.0-3.0-3.0-3.0-2.9-1.9-0.9-0.0-0.00.00.00.00.00.00.00.0150.00.00.00.00.00.0-0.2-1.0-2.0-2.8-3.0-3.0-3.0-3.0-2.9-1.9-0.9-0.0-0.00.00.00.00.00.00.00.0
160.00.00.00.00.00.00.0-0.1-0.6-0.8-0.9-0.5-0.5-0.5-0.6-0.4-0.20.00.00.00.00.00.00.00.00.0160.00.00.00.00.00.00.0-0.1-0.6-0.8-0.9-0.5-0.5-0.5-0.6-0.4-0.20.00.00.00.00.00.00.00.00.0
170.00.00.00.00.00.30.30.30.00.00.00.00.00.41.31.30.90.00.00.00.00.00.00.00.00.0170.00.00.00.00.00.30.30.30.00.00.00.00.00.41.31.30.90.00.00.00.00.00.00.00.00.0
180.00.00.00.21.01.91.81.10.10.00.00.10.41.41.91.60.6-0.1-0.1-0.00.00.00.00.00.00.0180.00.00.00.21.01.91.81.10.10.00.00.10.41.41.91.60.6-0.1-0.1-0.00.00.00.00.00.00.0
190.00.00.00.81.82.52.21.81.61.61.61.82.32.41.60.4-0.9-1.2-0.9-0.30.00.00.00.00.00.0190.00.00.00.81.82.52.21.81.61.61.61.82.32.41.60.4-0.9-1.2-0.9-0.30.00.00.00.00.00.0
200.00.00.00.60.80.81.11.92.83.03.02.92.51.50.2-1.4-2.3-2.2-1.2-0.30.00.00.00.00.00.0200.00.00.00.60.80.81.11.92.83.03.02.92.51.50.2-1.4-2.3-2.2-1.2-0.30.00.00.00.00.00.0
210.00.00.0-0.6-1.0-1.00.00.91.31.41.20.8-0.1-1.2-2.2-2.5-2.0-1.1-0.30.00.00.00.00.00.00.0210.00.00.0-0.6-1.0-1.00.00.91.31.41.20.8-0.1-1.2-2.2-2.5-2.0-1.1-0.30.00.00.00.00.00.00.0
220.00.00.0-0.8-1.8-2.4-1.6-0.6-0.4-0.9-1.8-2.5-2.9-2.9-2.5-1.5-0.6-0.00.00.00.00.00.00.00.00.0220.00.00.0-0.8-1.8-2.4-1.6-0.6-0.4-0.9-1.8-2.5-2.9-2.9-2.5-1.5-0.6-0.00.00.00.00.00.00.00.00.0
230.00.00.0-0.2-0.8-1.8-2.6-3.0-3.0-3.0-2.8-2.6-2.2-1.6-0.7-0.20.00.00.00.00.00.00.00.00.00.0230.00.00.0-0.2-0.8-1.8-2.6-3.0-3.0-3.0-2.8-2.6-2.2-1.6-0.7-0.20.00.00.00.00.00.00.00.00.00.0
240.00.00.00.0-0.0-0.4-1.3-2.3-2.6-2.1-1.2-0.5-0.10.00.00.00.00.00.00.00.00.00.00.00.00.0240.00.00.00.0-0.0-0.4-1.3-2.3-2.6-2.1-1.2-0.5-0.10.00.00.00.00.00.00.00.00.00.00.00.00.0
250.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0250.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
\n" @@ -3076,7 +3078,7 @@ { "cell_type": "code", "execution_count": null, - "id": "02eeb6cf", + "id": "dbf24776", "metadata": {}, "outputs": [], "source": [ @@ -3090,7 +3092,7 @@ { "cell_type": "code", "execution_count": null, - "id": "68bf1b32", + "id": "e660f079", "metadata": {}, "outputs": [ { @@ -3113,7 +3115,7 @@ }, { "cell_type": "markdown", - "id": "18ab6a2a", + "id": "b45ef398", "metadata": {}, "source": [ "#### pytorch convolutions" @@ -3122,7 +3124,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f6a94319", + "id": "2ee1e750", "metadata": {}, "outputs": [ { @@ -3145,7 +3147,7 @@ { "cell_type": "code", "execution_count": null, - "id": "40d124b2", + "id": "1a8d823f", "metadata": {}, "outputs": [ { @@ -3167,7 +3169,7 @@ { "cell_type": "code", "execution_count": null, - "id": "451f3f93", + "id": "3e2300ec", "metadata": {}, "outputs": [ { @@ -3189,7 +3191,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b813ddb0", + "id": "db2b439b", "metadata": {}, "outputs": [ { @@ -3211,7 +3213,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dab5fcc0", + "id": "6b7aa274", "metadata": {}, "outputs": [ { @@ -3234,14 +3236,14 @@ { "cell_type": "code", "execution_count": null, - "id": "77589cfb", + "id": "bb75eae7", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "11.5 ms ± 4.36 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" + "8.3 ms ± 1.43 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], @@ -3252,14 +3254,14 @@ { "cell_type": "code", "execution_count": null, - "id": "14e07968", + "id": "3303abb2", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "77.4 µs ± 35.3 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" + "57 µs ± 21.9 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" ] } ], @@ -3270,15 +3272,21 @@ { "cell_type": "code", "execution_count": null, - "id": "9b784e8f", + "id": "44c53105", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "The slowest run took 7.10 times longer than the fastest. This could mean that an intermediate result is being cached.\n", - "40.6 µs ± 35.1 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" + "47.6 µs ± 28.5 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[W NNPACK.cpp:64] Could not initialize NNPACK! Reason: Unsupported hardware.\n" ] } ], @@ -3289,7 +3297,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f460e283", + "id": "7938f8ba", "metadata": {}, "outputs": [ { @@ -3317,7 +3325,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a0e0d982", + "id": "a0372dc3", "metadata": {}, "outputs": [ { @@ -3345,7 +3353,7 @@ { "cell_type": "code", "execution_count": null, - "id": "568b97b7", + "id": "2514808c", "metadata": {}, "outputs": [ { @@ -3367,7 +3375,7 @@ { "cell_type": "code", "execution_count": null, - "id": "03193437", + "id": "faed6331", "metadata": {}, "outputs": [], "source": [ @@ -3377,7 +3385,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0b6d13f8", + "id": "1813f7f3", "metadata": {}, "outputs": [ { @@ -3399,7 +3407,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3ca60c96", + "id": "53157100", "metadata": {}, "outputs": [ { @@ -3421,7 +3429,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f5a8985c", + "id": "9e24df79", "metadata": {}, "outputs": [ { @@ -3446,7 +3454,7 @@ }, { "cell_type": "markdown", - "id": "467ae9b6", + "id": "0b72f869", "metadata": {}, "source": [ "#### creating cnn" @@ -3455,7 +3463,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cf0e45f0", + "id": "c03a87b2", "metadata": {}, "outputs": [], "source": [ @@ -3467,7 +3475,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16e9c4d4", + "id": "a2686a4d", "metadata": {}, "outputs": [], "source": [ @@ -3481,7 +3489,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12529f80", + "id": "f90243f8", "metadata": {}, "outputs": [ { @@ -3502,7 +3510,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6bdffe9c", + "id": "62b94778", "metadata": {}, "outputs": [], "source": [ @@ -3516,7 +3524,7 @@ { "cell_type": "code", "execution_count": null, - "id": "79d87539", + "id": "4c04ec67", "metadata": {}, "outputs": [], "source": [ @@ -3533,7 +3541,7 @@ { "cell_type": "code", "execution_count": null, - "id": "88cf190b", + "id": "ba7074df", "metadata": {}, "outputs": [ { @@ -3554,7 +3562,75 @@ { "cell_type": "code", "execution_count": null, - "id": "2a613910", + "id": "b8a37e35", + "metadata": {}, + "outputs": [], + "source": [ + "x_imgs = x_train.view(-1,1,28,28)\n", + "xv_imgs = x_valid.view(-1,1,28,28)\n", + "train_ds,valid_ds = Dataset(x_imgs, y_train),Dataset(xv_imgs, y_valid)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d403aae5", + "metadata": {}, + "outputs": [], + "source": [ + "#|export\n", + "def_device = 'mps' if torch.backends.mps.is_available() else 'cuda' if torch.cuda.is_available() else 'cpu'\n", + "\n", + "def to_device(x, device=def_device):\n", + " if isinstance(x, torch.Tensor): return x.to(device)\n", + " if isinstance(x, Mapping): return {k:v.to(device) for k,v in x.items()}\n", + " return type(x)(to_device(o, device) for o in x)\n", + "\n", + "def collate_device(b): return to_device(default_collate(b))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33b3a116", + "metadata": {}, + "outputs": [], + "source": [ + "from torch import optim\n", + "\n", + "bs = 256\n", + "lr = 0.4\n", + "train_dl, valid_dl = get_dls(train_ds, valid_ds, bs, collate_fn=collate_device)\n", + "opt = optim.SGD(simple_cnn.parameters(), lr=lr)\n", + " \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8338744f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0 0.2615232038497925 0.0\n", + "1 0.1606189539670944 0.0\n", + "2 0.12759257712364197 0.0\n", + "3 0.23388885581493377 0.0\n", + "4 0.09892643284797668 0.0\n" + ] + } + ], + "source": [ + "loss,acc = fit(5, simple_cnn.to(def_device), F.cross_entropy, opt, train_dl, valid_dl)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6226717c", "metadata": {}, "outputs": [], "source": [] diff --git a/07_convolutions.ipynb b/07_convolutions.ipynb index c6872b30..5779c892 100644 --- a/07_convolutions.ipynb +++ b/07_convolutions.ipynb @@ -3,7 +3,7 @@ { "cell_type": "code", "execution_count": null, - "id": "55daf5fa", + "id": "ecf5a9a2", "metadata": {}, "outputs": [], "source": [ @@ -14,12 +14,14 @@ "import matplotlib.pyplot as plt\n", "from functools import partial\n", "from datasets import load_dataset\n", - "from torch.utils.data import DataLoader\n", + "from torch.utils.data import DataLoader, default_collate\n", "import numpy as np\n", "import pickle, gzip\n", "from pathlib import Path\n", "import matplotlib as mpl\n", "import pandas as pd\n", + "from practice_deep_learning.training import *\n", + "from typing import Mapping\n", "\n", "mpl.rcParams['image.cmap'] = 'gray'\n", "%matplotlib inline" @@ -28,7 +30,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4a0094d8", + "id": "fca15a52", "metadata": {}, "outputs": [ { @@ -55,7 +57,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6dcbde43", + "id": "ff7358c9", "metadata": {}, "outputs": [ { @@ -81,7 +83,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b12f70f4", + "id": "b05e81c8", "metadata": {}, "outputs": [ { @@ -109,350 +111,350 @@ { "cell_type": "code", "execution_count": null, - "id": "8b39fc7b", + "id": "c0de1ad5", "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", @@ -488,872 +490,872 @@ " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 
00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.000.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
10.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.010.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
20.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.020.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
30.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.030.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
40.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.040.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
50.00.00.00.00.00.00.00.00.00.00.00.10.20.41.01.01.01.01.01.00.70.00.00.00.00.00.00.050.00.00.00.00.00.00.00.00.00.00.00.10.20.41.01.01.01.01.01.00.70.00.00.00.00.00.00.0
60.00.00.00.00.00.00.00.00.00.20.50.90.91.01.01.01.01.01.01.01.00.60.10.00.00.00.00.060.00.00.00.00.00.00.00.00.00.20.50.90.91.01.01.01.01.01.01.01.00.60.10.00.00.00.00.0
70.00.00.00.00.00.00.00.00.00.71.01.01.01.01.01.01.01.01.01.01.01.00.20.00.00.00.00.070.00.00.00.00.00.00.00.00.00.71.01.01.01.01.01.01.01.01.01.01.01.00.20.00.00.00.00.0
80.00.00.00.00.00.00.00.00.00.41.01.00.90.50.50.50.50.71.01.01.01.00.20.00.00.00.00.080.00.00.00.00.00.00.00.00.00.41.01.00.90.50.50.50.50.71.01.01.01.00.20.00.00.00.00.0
90.00.00.00.00.00.00.00.00.00.00.10.10.10.00.00.00.00.10.91.01.00.70.00.00.00.00.00.090.00.00.00.00.00.00.00.00.00.00.10.10.10.00.00.00.00.10.91.01.00.70.00.00.00.00.00.0
100.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.30.91.01.00.60.00.00.00.00.00.0100.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.30.91.01.00.60.00.00.00.00.00.0
110.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.30.71.01.01.00.10.00.00.00.00.00.0110.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.30.71.01.01.00.10.00.00.00.00.00.0
120.00.00.00.00.00.00.00.00.00.00.00.00.00.00.40.81.01.01.01.00.80.00.00.00.00.00.00.0120.00.00.00.00.00.00.00.00.00.00.00.00.00.00.40.81.01.01.01.00.80.00.00.00.00.00.00.0
130.00.00.00.00.00.00.00.00.00.10.50.80.80.81.01.01.01.00.90.40.10.00.00.00.00.00.00.0130.00.00.00.00.00.00.00.00.00.10.50.80.80.81.01.01.01.00.90.40.10.00.00.00.00.00.00.0
140.00.00.00.00.00.00.00.00.20.91.01.01.01.01.01.01.01.00.70.00.00.00.00.00.00.00.00.0140.00.00.00.00.00.00.00.00.20.91.01.01.01.01.01.01.01.00.70.00.00.00.00.00.00.00.00.0
150.00.00.00.00.00.00.00.00.20.91.01.01.01.01.01.01.01.01.00.30.00.00.00.00.00.00.00.0150.00.00.00.00.00.00.00.00.20.91.01.01.01.01.01.01.01.01.00.30.00.00.00.00.00.00.00.0
160.00.00.00.00.00.00.00.00.00.10.50.20.20.20.20.20.61.01.00.30.00.00.00.00.00.00.00.0160.00.00.00.00.00.00.00.00.00.10.50.20.20.20.20.20.61.01.00.30.00.00.00.00.00.00.00.0
170.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.11.01.00.30.00.00.00.00.00.00.00.0170.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.11.01.00.30.00.00.00.00.00.00.00.0
180.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.31.01.00.30.00.00.00.00.00.00.00.0180.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.31.01.00.30.00.00.00.00.00.00.00.0
190.00.00.00.00.00.00.00.30.00.00.00.00.00.00.00.40.91.01.00.30.00.00.00.00.00.00.00.0190.00.00.00.00.00.00.00.30.00.00.00.00.00.00.00.40.91.01.00.30.00.00.00.00.00.00.00.0
200.00.00.00.00.00.20.71.00.10.00.00.00.00.10.40.91.01.00.90.30.00.00.00.00.00.00.00.0200.00.00.00.00.00.20.71.00.10.00.00.00.00.10.40.91.01.00.90.30.00.00.00.00.00.00.00.0
210.00.00.00.00.00.81.01.00.60.50.50.50.50.81.01.01.00.70.30.00.00.00.00.00.00.00.00.0210.00.00.00.00.00.81.01.00.60.50.50.50.50.81.01.01.00.70.30.00.00.00.00.00.00.00.00.0
220.00.00.00.00.00.81.01.01.01.01.01.01.01.01.00.90.60.00.00.00.00.00.00.00.00.00.00.0220.00.00.00.00.00.81.01.01.01.01.01.01.01.01.00.90.60.00.00.00.00.00.00.00.00.00.00.0
230.00.00.00.00.00.20.61.01.01.01.01.00.80.80.60.20.00.00.00.00.00.00.00.00.00.00.00.0230.00.00.00.00.00.20.61.01.01.01.01.00.80.80.60.20.00.00.00.00.00.00.00.00.00.00.00.0
240.00.00.00.00.00.00.00.40.91.00.70.40.10.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0240.00.00.00.00.00.00.00.40.91.00.70.40.10.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
250.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0250.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
260.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0260.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
270.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0270.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
\n" @@ -1375,7 +1377,7 @@ { "cell_type": "code", "execution_count": null, - "id": "1d444a15", + "id": "fcb9eafa", "metadata": {}, "outputs": [ { @@ -1396,7 +1398,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f1cb4e91", + "id": "c877edba", "metadata": {}, "outputs": [], "source": [ @@ -1407,7 +1409,7 @@ { "cell_type": "code", "execution_count": null, - "id": "c2bd22dc", + "id": "a40de9dc", "metadata": {}, "outputs": [ { @@ -1428,7 +1430,7 @@ { "cell_type": "code", "execution_count": null, - "id": "4870e048", + "id": "bb453537", "metadata": {}, "outputs": [ { @@ -1446,7 +1448,7 @@ { "cell_type": "code", "execution_count": null, - "id": "76017cbb", + "id": "d11cdc70", "metadata": {}, "outputs": [ { @@ -1471,805 +1473,805 @@ { "cell_type": "code", "execution_count": null, - "id": "8081ad48", + "id": "568bd508", "metadata": {}, "outputs": [ { "data": { "text/html": [ "\n", - "\n", + "
\n", " \n", " \n", " \n", @@ -2303,758 +2305,758 @@ " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", " \n", " \n", "
 
00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.000.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
10.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.010.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
20.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.020.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
30.00.00.00.00.00.00.00.00.00.10.30.71.62.43.03.03.03.02.71.70.70.00.00.00.00.030.00.00.00.00.00.00.00.00.00.10.30.71.62.43.03.03.03.02.71.70.70.00.00.00.00.0
40.00.00.00.00.00.00.00.20.71.62.32.72.93.03.03.03.03.03.02.61.70.70.10.00.00.040.00.00.00.00.00.00.00.20.71.62.32.72.93.03.03.03.03.03.02.61.70.70.10.00.00.0
50.00.00.00.00.00.00.00.71.72.52.62.21.40.6-0.0-0.0-0.0-0.00.31.31.51.20.20.00.00.050.00.00.00.00.00.00.00.71.72.52.62.21.40.6-0.0-0.0-0.0-0.00.31.31.51.20.20.00.00.0
60.00.00.00.00.00.00.00.30.70.80.6-0.3-0.9-1.4-1.4-1.2-0.7-0.20.00.40.50.50.20.00.00.060.00.00.00.00.00.00.00.30.70.80.6-0.3-0.9-1.4-1.4-1.2-0.7-0.20.00.40.50.50.20.00.00.0
70.00.00.00.00.00.00.0-0.7-1.6-2.4-2.6-2.7-2.9-3.0-3.0-2.9-2.0-1.0-0.1-0.3-0.5-0.5-0.20.00.00.070.00.00.00.00.00.00.0-0.7-1.6-2.4-2.6-2.7-2.9-3.0-3.0-2.9-2.0-1.0-0.1-0.3-0.5-0.5-0.20.00.00.0
80.00.00.00.00.00.00.0-0.4-1.4-2.4-2.9-2.4-1.9-1.6-1.6-1.4-1.0-0.4-0.0-0.4-0.7-0.7-0.20.00.00.080.00.00.00.00.00.00.0-0.4-1.4-2.4-2.9-2.4-1.9-1.6-1.6-1.4-1.0-0.4-0.0-0.4-0.7-0.7-0.20.00.00.0
90.00.00.00.00.00.00.0-0.0-0.1-0.2-0.3-0.2-0.10.00.31.01.10.80.1-0.6-0.6-0.6-0.00.00.00.090.00.00.00.00.00.00.0-0.0-0.1-0.2-0.3-0.2-0.10.00.31.01.10.80.1-0.6-0.6-0.6-0.00.00.00.0
100.00.00.00.00.00.00.00.00.00.00.00.00.41.22.12.41.70.7-0.2-0.7-0.7-0.50.00.00.00.0100.00.00.00.00.00.00.00.00.00.00.00.00.41.22.12.41.70.7-0.2-0.7-0.7-0.50.00.00.00.0
110.00.00.00.00.00.00.00.10.61.42.02.32.52.72.61.90.8-0.4-1.5-1.5-0.9-0.10.00.00.00.0110.00.00.00.00.00.00.00.10.61.42.02.32.52.72.61.90.8-0.4-1.5-1.5-0.9-0.10.00.00.00.0
120.00.00.00.00.00.00.21.02.02.83.03.02.61.80.80.2-0.3-1.3-2.1-1.8-0.8-0.00.00.00.00.0120.00.00.00.00.00.00.21.02.02.83.03.02.61.80.80.2-0.3-1.3-2.1-1.8-0.8-0.00.00.00.00.0
130.00.00.00.00.00.00.20.91.41.51.00.70.50.20.00.00.1-0.0-0.2-0.2-0.10.00.00.00.00.0130.00.00.00.00.00.00.20.91.41.51.00.70.50.20.00.00.1-0.0-0.2-0.2-0.10.00.00.00.00.0
140.00.00.00.00.00.0-0.2-0.9-1.4-2.0-2.1-2.4-2.4-2.4-2.1-1.2-0.10.60.60.30.00.00.00.00.00.0140.00.00.00.00.00.0-0.2-0.9-1.4-2.0-2.1-2.4-2.4-2.4-2.1-1.2-0.10.60.60.30.00.00.00.00.00.0
150.00.00.00.00.00.0-0.2-1.0-2.0-2.8-3.0-3.0-3.0-3.0-2.9-1.9-0.9-0.0-0.00.00.00.00.00.00.00.0150.00.00.00.00.00.0-0.2-1.0-2.0-2.8-3.0-3.0-3.0-3.0-2.9-1.9-0.9-0.0-0.00.00.00.00.00.00.00.0
160.00.00.00.00.00.00.0-0.1-0.6-0.8-0.9-0.5-0.5-0.5-0.6-0.4-0.20.00.00.00.00.00.00.00.00.0160.00.00.00.00.00.00.0-0.1-0.6-0.8-0.9-0.5-0.5-0.5-0.6-0.4-0.20.00.00.00.00.00.00.00.00.0
170.00.00.00.00.00.30.30.30.00.00.00.00.00.41.31.30.90.00.00.00.00.00.00.00.00.0170.00.00.00.00.00.30.30.30.00.00.00.00.00.41.31.30.90.00.00.00.00.00.00.00.00.0
180.00.00.00.21.01.91.81.10.10.00.00.10.41.41.91.60.6-0.1-0.1-0.00.00.00.00.00.00.0180.00.00.00.21.01.91.81.10.10.00.00.10.41.41.91.60.6-0.1-0.1-0.00.00.00.00.00.00.0
190.00.00.00.81.82.52.21.81.61.61.61.82.32.41.60.4-0.9-1.2-0.9-0.30.00.00.00.00.00.0190.00.00.00.81.82.52.21.81.61.61.61.82.32.41.60.4-0.9-1.2-0.9-0.30.00.00.00.00.00.0
200.00.00.00.60.80.81.11.92.83.03.02.92.51.50.2-1.4-2.3-2.2-1.2-0.30.00.00.00.00.00.0200.00.00.00.60.80.81.11.92.83.03.02.92.51.50.2-1.4-2.3-2.2-1.2-0.30.00.00.00.00.00.0
210.00.00.0-0.6-1.0-1.00.00.91.31.41.20.8-0.1-1.2-2.2-2.5-2.0-1.1-0.30.00.00.00.00.00.00.0210.00.00.0-0.6-1.0-1.00.00.91.31.41.20.8-0.1-1.2-2.2-2.5-2.0-1.1-0.30.00.00.00.00.00.00.0
220.00.00.0-0.8-1.8-2.4-1.6-0.6-0.4-0.9-1.8-2.5-2.9-2.9-2.5-1.5-0.6-0.00.00.00.00.00.00.00.00.0220.00.00.0-0.8-1.8-2.4-1.6-0.6-0.4-0.9-1.8-2.5-2.9-2.9-2.5-1.5-0.6-0.00.00.00.00.00.00.00.00.0
230.00.00.0-0.2-0.8-1.8-2.6-3.0-3.0-3.0-2.8-2.6-2.2-1.6-0.7-0.20.00.00.00.00.00.00.00.00.00.0230.00.00.0-0.2-0.8-1.8-2.6-3.0-3.0-3.0-2.8-2.6-2.2-1.6-0.7-0.20.00.00.00.00.00.00.00.00.00.0
240.00.00.00.0-0.0-0.4-1.3-2.3-2.6-2.1-1.2-0.5-0.10.00.00.00.00.00.00.00.00.00.00.00.00.0240.00.00.00.0-0.0-0.4-1.3-2.3-2.6-2.1-1.2-0.5-0.10.00.00.00.00.00.00.00.00.00.00.00.00.0
250.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0250.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.00.0
\n" @@ -3076,7 +3078,7 @@ { "cell_type": "code", "execution_count": null, - "id": "02eeb6cf", + "id": "dbf24776", "metadata": {}, "outputs": [], "source": [ @@ -3090,7 +3092,7 @@ { "cell_type": "code", "execution_count": null, - "id": "68bf1b32", + "id": "e660f079", "metadata": {}, "outputs": [ { @@ -3113,7 +3115,7 @@ }, { "cell_type": "markdown", - "id": "18ab6a2a", + "id": "b45ef398", "metadata": {}, "source": [ "#### pytorch convolutions" @@ -3122,7 +3124,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f6a94319", + "id": "2ee1e750", "metadata": {}, "outputs": [ { @@ -3145,7 +3147,7 @@ { "cell_type": "code", "execution_count": null, - "id": "40d124b2", + "id": "1a8d823f", "metadata": {}, "outputs": [ { @@ -3167,7 +3169,7 @@ { "cell_type": "code", "execution_count": null, - "id": "451f3f93", + "id": "3e2300ec", "metadata": {}, "outputs": [ { @@ -3189,7 +3191,7 @@ { "cell_type": "code", "execution_count": null, - "id": "b813ddb0", + "id": "db2b439b", "metadata": {}, "outputs": [ { @@ -3211,7 +3213,7 @@ { "cell_type": "code", "execution_count": null, - "id": "dab5fcc0", + "id": "6b7aa274", "metadata": {}, "outputs": [ { @@ -3234,14 +3236,14 @@ { "cell_type": "code", "execution_count": null, - "id": "77589cfb", + "id": "bb75eae7", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "11.5 ms ± 4.36 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" + "8.3 ms ± 1.43 ms per loop (mean ± std. dev. of 7 runs, 1 loop each)\n" ] } ], @@ -3252,14 +3254,14 @@ { "cell_type": "code", "execution_count": null, - "id": "14e07968", + "id": "3303abb2", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "77.4 µs ± 35.3 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" + "57 µs ± 21.9 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" ] } ], @@ -3270,15 +3272,21 @@ { "cell_type": "code", "execution_count": null, - "id": "9b784e8f", + "id": "44c53105", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "The slowest run took 7.10 times longer than the fastest. This could mean that an intermediate result is being cached.\n", - "40.6 µs ± 35.1 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" + "47.6 µs ± 28.5 µs per loop (mean ± std. dev. of 7 runs, 100 loops each)\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "[W NNPACK.cpp:64] Could not initialize NNPACK! Reason: Unsupported hardware.\n" ] } ], @@ -3289,7 +3297,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f460e283", + "id": "7938f8ba", "metadata": {}, "outputs": [ { @@ -3317,7 +3325,7 @@ { "cell_type": "code", "execution_count": null, - "id": "a0e0d982", + "id": "a0372dc3", "metadata": {}, "outputs": [ { @@ -3345,7 +3353,7 @@ { "cell_type": "code", "execution_count": null, - "id": "568b97b7", + "id": "2514808c", "metadata": {}, "outputs": [ { @@ -3367,7 +3375,7 @@ { "cell_type": "code", "execution_count": null, - "id": "03193437", + "id": "faed6331", "metadata": {}, "outputs": [], "source": [ @@ -3377,7 +3385,7 @@ { "cell_type": "code", "execution_count": null, - "id": "0b6d13f8", + "id": "1813f7f3", "metadata": {}, "outputs": [ { @@ -3399,7 +3407,7 @@ { "cell_type": "code", "execution_count": null, - "id": "3ca60c96", + "id": "53157100", "metadata": {}, "outputs": [ { @@ -3421,7 +3429,7 @@ { "cell_type": "code", "execution_count": null, - "id": "f5a8985c", + "id": "9e24df79", "metadata": {}, "outputs": [ { @@ -3446,7 +3454,7 @@ }, { "cell_type": "markdown", - "id": "467ae9b6", + "id": "0b72f869", "metadata": {}, "source": [ "#### creating cnn" @@ -3455,7 +3463,7 @@ { "cell_type": "code", "execution_count": null, - "id": "cf0e45f0", + "id": "c03a87b2", "metadata": {}, "outputs": [], "source": [ @@ -3467,7 +3475,7 @@ { "cell_type": "code", "execution_count": null, - "id": "16e9c4d4", + "id": "a2686a4d", "metadata": {}, "outputs": [], "source": [ @@ -3481,7 +3489,7 @@ { "cell_type": "code", "execution_count": null, - "id": "12529f80", + "id": "f90243f8", "metadata": {}, "outputs": [ { @@ -3502,7 +3510,7 @@ { "cell_type": "code", "execution_count": null, - "id": "6bdffe9c", + "id": "62b94778", "metadata": {}, "outputs": [], "source": [ @@ -3516,7 +3524,7 @@ { "cell_type": "code", "execution_count": null, - "id": "79d87539", + "id": "4c04ec67", "metadata": {}, "outputs": [], "source": [ @@ -3533,7 +3541,7 @@ { "cell_type": "code", "execution_count": null, - "id": "88cf190b", + "id": "ba7074df", "metadata": {}, "outputs": [ { @@ -3554,7 +3562,74 @@ { "cell_type": "code", "execution_count": null, - "id": "2a613910", + "id": "b8a37e35", + "metadata": {}, + "outputs": [], + "source": [ + "x_imgs = x_train.view(-1,1,28,28)\n", + "xv_imgs = x_valid.view(-1,1,28,28)\n", + "train_ds,valid_ds = Dataset(x_imgs, y_train),Dataset(xv_imgs, y_valid)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d403aae5", + "metadata": {}, + "outputs": [], + "source": [ + "#|export\n", + "def_device = 'mps' if torch.backends.mps.is_available() else 'cuda' if torch.cuda.is_available() else 'cpu'\n", + "\n", + "def to_device(x, device=def_device):\n", + " if isinstance(x, torch.Tensor): return x.to(device)\n", + " if isinstance(x, Mapping): return {k:v.to(device) for k,v in x.items()}\n", + " return type(x)(to_device(o, device) for o in x)\n", + "\n", + "def collate_device(b): return to_device(default_collate(b))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "33b3a116", + "metadata": {}, + "outputs": [], + "source": [ + "from torch import optim\n", + "\n", + "bs = 256\n", + "lr = 0.4\n", + "train_dl, valid_dl = get_dls(train_ds, valid_ds, bs, collate_fn=collate_device)\n", + "opt = optim.SGD(simple_cnn.parameters(), lr=lr)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "8338744f", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0 0.2615232038497925 0.0\n", + "1 0.1606189539670944 0.0\n", + "2 0.12759257712364197 0.0\n", + "3 0.23388885581493377 0.0\n", + "4 0.09892643284797668 0.0\n" + ] + } + ], + "source": [ + "loss,acc = fit(5, simple_cnn.to(def_device), F.cross_entropy, opt, train_dl, valid_dl)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6226717c", "metadata": {}, "outputs": [], "source": [] diff --git a/_proc/04_mini_batch_training.ipynb b/_proc/04_mini_batch_training.ipynb index d97609ae..a5032d65 100644 --- a/_proc/04_mini_batch_training.ipynb +++ b/_proc/04_mini_batch_training.ipynb @@ -499,18 +499,48 @@ "```" ] }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "### accuracy\n", + "\n", + "> accuracy (out, yb)" + ], + "text/plain": [ + "---\n", + "\n", + "### accuracy\n", + "\n", + "> accuracy (out, yb)" + ] + }, + "execution_count": 1, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "#| echo: false\n", + "#| output: asis\n", + "show_doc(accuracy)" + ] + }, { "cell_type": "code", "execution_count": null, - "id": "517b1844", + "id": "dcca7a8d", "metadata": { "language": "python" }, "outputs": [], "source": [ - "def accuracy(out, yb):\n", - " return (out.argmax(1)==yb).float().mean()\n", - "\n", "loss_func = F.cross_entropy" ] }, @@ -590,15 +620,35 @@ }, { "cell_type": "code", - "execution_count": null, - "id": "ee7b70dc", - "metadata": { - "language": "python" - }, - "outputs": [], + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "data": { + "text/markdown": [ + "---\n", + "\n", + "### report\n", + "\n", + "> report (loss, preds, yb)" + ], + "text/plain": [ + "---\n", + "\n", + "### report\n", + "\n", + "> report (loss, preds, yb)" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "def report(loss, preds, yb):\n", - " print(f\"loss: {loss:.2f}, accuracy: {accuracy(preds, yb):.2f}\")" + "#| echo: false\n", + "#| output: asis\n", + "show_doc(report)" ] }, { @@ -1422,7 +1472,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": 3, "metadata": {}, "outputs": [ { @@ -1446,7 +1496,7 @@ "Initialize self. See help(type(self)) for accurate signature." ] }, - "execution_count": 1, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } @@ -2196,7 +2246,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 4, "metadata": {}, "outputs": [ { @@ -2206,17 +2256,17 @@ "\n", "### fit\n", "\n", - "> fit (epochs, model, loss_func, opt, train_dl, valid_ld)" + "> fit (epochs, model, loss_func, opt, train_dl, valid_dl)" ], "text/plain": [ "---\n", "\n", "### fit\n", "\n", - "> fit (epochs, model, loss_func, opt, train_dl, valid_ld)" + "> fit (epochs, model, loss_func, opt, train_dl, valid_dl)" ] }, - "execution_count": 2, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } @@ -2229,7 +2279,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "metadata": {}, "outputs": [ { @@ -2249,7 +2299,7 @@ "> get_dls (train_ds, valid_ds, bs, **kwargs)" ] }, - "execution_count": 3, + "execution_count": 5, "metadata": {}, "output_type": "execute_result" } diff --git a/_proc/_docs/04_mini_batch_training_files/figure-html/cell-80-output-1.png b/_proc/_docs/04_mini_batch_training_files/figure-html/cell-81-output-1.png similarity index 100% rename from _proc/_docs/04_mini_batch_training_files/figure-html/cell-80-output-1.png rename to _proc/_docs/04_mini_batch_training_files/figure-html/cell-81-output-1.png diff --git a/_proc/_docs/04_mini_batch_training_files/figure-html/cell-86-output-1.png b/_proc/_docs/04_mini_batch_training_files/figure-html/cell-87-output-1.png similarity index 100% rename from _proc/_docs/04_mini_batch_training_files/figure-html/cell-86-output-1.png rename to _proc/_docs/04_mini_batch_training_files/figure-html/cell-87-output-1.png diff --git a/_proc/_docs/mini_batch_training.html b/_proc/_docs/mini_batch_training.html index 6ae56514..b4b84f48 100644 --- a/_proc/_docs/mini_batch_training.html +++ b/_proc/_docs/mini_batch_training.html @@ -147,7 +147,7 @@ @@ -160,7 +160,9 @@

On this page

@@ -348,86 +350,92 @@

simple 2 layer nn

# batch training.
# accuracy.
-
-
def accuracy(out, yb):
-    return (out.argmax(1)==yb).float().mean()
-
-loss_func = F.cross_entropy
+
+
+

accuracy

+
+
 accuracy (out, yb)
+
+
+
loss_func = F.cross_entropy
-
bs = 50
-xb = x_train[:bs]
-yb = y_train[:bs]
-preds = model(xb)
-preds[0]
+
bs = 50
+xb = x_train[:bs]
+yb = y_train[:bs]
+preds = model(xb)
+preds[0]
tensor([-0.0843, -0.0098,  0.1629,  0.1187,  0.1040,  0.0934, -0.1870, -0.0530,
         -0.1361, -0.0224], grad_fn=<SelectBackward0>)
-
loss_func(preds, yb)
+
loss_func(preds, yb)
tensor(2.2846, grad_fn=<NllLossBackward0>)
-
accuracy(preds, yb)
+
accuracy(preds, yb)
tensor(0.1400)
-
-
def report(loss, preds, yb):
-    print(f"loss: {loss:.2f}, accuracy: {accuracy(preds, yb):.2f}")
-
+
+
+
+

report

+
+
 report (loss, preds, yb)
+
-
report(loss, preds, yb)
+
report(loss, preds, yb)
loss: 2.30, accuracy: 0.14
-
n, m = x_train.shape
-lr = 0.5
-epochs = 3
-xb,yb = x_train[:bs], y_train[:bs]
-preds = model(xb)
-loss = loss_func(preds, yb)
-report(loss, preds, yb)
+
n, m = x_train.shape
+lr = 0.5
+epochs = 3
+xb,yb = x_train[:bs], y_train[:bs]
+preds = model(xb)
+loss = loss_func(preds, yb)
+report(loss, preds, yb)
loss: 2.28, accuracy: 0.14
-
for epoch in range(epochs):
-    for i in range(0, n, bs):
-        s = slice(i, min(i+bs, n))
-        xb,yb = x_train[s],y_train[s]
-        preds = model(xb)
-        loss = loss_func(preds, yb)
-        loss.backward()
-        with torch.no_grad():
-            for l in model.layers:
-                if hasattr(l, 'weight'):
-                    l.weight -= l.weight.grad * lr
-                    l.bias -= l.bias.grad * lr
-                    l.weight.grad.zero_()
-                    l.bias.grad.zero_()
-    report(loss, preds, yb)
+
for epoch in range(epochs):
+    for i in range(0, n, bs):
+        s = slice(i, min(i+bs, n))
+        xb,yb = x_train[s],y_train[s]
+        preds = model(xb)
+        loss = loss_func(preds, yb)
+        loss.backward()
+        with torch.no_grad():
+            for l in model.layers:
+                if hasattr(l, 'weight'):
+                    l.weight -= l.weight.grad * lr
+                    l.bias -= l.bias.grad * lr
+                    l.weight.grad.zero_()
+                    l.bias.grad.zero_()
+    report(loss, preds, yb)
loss: 0.17, accuracy: 0.94
 loss: 0.13, accuracy: 0.94
 loss: 0.13, accuracy: 0.96
-

-# parameters
+

+# parameters
-
m1 = nn.Module()
-m1.foo = nn.Linear(3, 4)
-m1.boo = 'hey'
-m1
+
m1 = nn.Module()
+m1.foo = nn.Linear(3, 4)
+m1.boo = 'hey'
+m1
Module(
   (foo): Linear(in_features=3, out_features=4, bias=True)
@@ -435,13 +443,13 @@ 

simple 2 layer nn

-
list(m1.named_children())
+
list(m1.named_children())
[('foo', Linear(in_features=3, out_features=4, bias=True))]
-
list(m1.parameters())
+
list(m1.parameters())
[Parameter containing:
  tensor([[-0.4626, -0.5572, -0.2930],
@@ -453,19 +461,19 @@ 

simple 2 layer nn

-
class MLP(nn.Module):
-    def __init__(self, n_in, n_h, n_out):
-        super().__init__()
-        self.l1 = nn.Linear(n_in, n_h)
-        self.relu = nn.ReLU()
-        self.l2 = nn.Linear(n_h, n_out)
-    
-    def forward(self, x):
-        return self.l2(self.relu(self.l1(x)))
+
class MLP(nn.Module):
+    def __init__(self, n_in, n_h, n_out):
+        super().__init__()
+        self.l1 = nn.Linear(n_in, n_h)
+        self.relu = nn.ReLU()
+        self.l2 = nn.Linear(n_h, n_out)
+    
+    def forward(self, x):
+        return self.l2(self.relu(self.l1(x)))
-
model = MLP(n_in, n_h, 10)
-model
+
model = MLP(n_in, n_h, 10)
+model
MLP(
   (l1): Linear(in_features=784, out_features=50, bias=True)
@@ -475,8 +483,8 @@ 

simple 2 layer nn

-
for name, l in model.named_children():
-    print(f"{name}: {l}")
+
for name, l in model.named_children():
+    print(f"{name}: {l}")
l1: Linear(in_features=784, out_features=50, bias=True)
 relu: ReLU()
@@ -484,8 +492,8 @@ 

simple 2 layer nn

-
for p in model.parameters():
-    print(p.shape)
+
for p in model.parameters():
+    print(p.shape)
torch.Size([50, 784])
 torch.Size([50])
@@ -494,60 +502,60 @@ 

simple 2 layer nn

-
def fit():
-    for epoch in range(epochs):
-        for i in range(0, n, bs):
-            s = slice(i, min(i+bs, n))
-            xb,yb = x_train[s], y_train[s]
-            preds = model(xb)
-            loss = loss_func(preds, yb)
-            loss.backward()
-            with torch.no_grad():
-                for p in model.parameters():
-                    p -= p.grad * lr
-                model.zero_grad()
-        report(loss, preds, yb)
+
def fit():
+    for epoch in range(epochs):
+        for i in range(0, n, bs):
+            s = slice(i, min(i+bs, n))
+            xb,yb = x_train[s], y_train[s]
+            preds = model(xb)
+            loss = loss_func(preds, yb)
+            loss.backward()
+            with torch.no_grad():
+                for p in model.parameters():
+                    p -= p.grad * lr
+                model.zero_grad()
+        report(loss, preds, yb)
-
fit()
+
fit()
loss: 0.02, accuracy: 1.00
 loss: 0.05, accuracy: 0.98
 loss: 0.03, accuracy: 1.00
-
# nn.Module behind the scene
+
# nn.Module behind the scene
-
class MyModule:
-    def __init__(self, n_in, n_h, n_out):
-        self._modules = {}
-        self.l1 = nn.Linear(n_in, n_h)
-        self.l2 = nn.Linear(n_h, n_out)
-        self.relu = nn.ReLU()
-    
-    def __setattr__(self, k, v):
-        if not k.startswith('_'):
-            self._modules[k] = v
-        
-        super().__setattr__(k, v)
-    
-    def __repr__(self):
-        return f"{self._modules}"
-    
-    def parameters(self):
-        for l in self._modules.values():
-            yield from l.parameters()
+
class MyModule:
+    def __init__(self, n_in, n_h, n_out):
+        self._modules = {}
+        self.l1 = nn.Linear(n_in, n_h)
+        self.l2 = nn.Linear(n_h, n_out)
+        self.relu = nn.ReLU()
+    
+    def __setattr__(self, k, v):
+        if not k.startswith('_'):
+            self._modules[k] = v
+        
+        super().__setattr__(k, v)
+    
+    def __repr__(self):
+        return f"{self._modules}"
+    
+    def parameters(self):
+        for l in self._modules.values():
+            yield from l.parameters()
-
mdl = MyModule(n_in, n_h, n_o)
-mdl
+
mdl = MyModule(n_in, n_h, n_o)
+mdl
{'l1': Linear(in_features=784, out_features=50, bias=True), 'l2': Linear(in_features=50, out_features=10, bias=True), 'relu': ReLU()}
-
for p in mdl.parameters():
-    print(p.shape)
+
for p in mdl.parameters():
+    print(p.shape)
torch.Size([50, 784])
 torch.Size([50])
@@ -555,27 +563,27 @@ 

simple 2 layer nn

torch.Size([10])
-
# registering modules
+
# registering modules
-
from functools import reduce
+
from functools import reduce
-
layers = [nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_o)]
+
layers = [nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_o)]
-
class Model(nn.Module):
-    def __init__(self, layers):
-        super().__init__()
-        self.layers = layers
-        for i,l in enumerate(self.layers):
-            self.add_module(f"layer_{i}", l)
-    
-    def forward(self, x):
-        return reduce(lambda val, layer: layer(val), self.layers, x)
+
class Model(nn.Module):
+    def __init__(self, layers):
+        super().__init__()
+        self.layers = layers
+        for i,l in enumerate(self.layers):
+            self.add_module(f"layer_{i}", l)
+    
+    def forward(self, x):
+        return reduce(lambda val, layer: layer(val), self.layers, x)
-
model = Model(layers)
-model
+
model = Model(layers)
+model
Model(
   (layer_0): Linear(in_features=784, out_features=50, bias=True)
@@ -585,36 +593,36 @@ 

simple 2 layer nn

-
model(xb).shape
+
model(xb).shape
torch.Size([50, 10])
-
# nn.ModuleList
+
# nn.ModuleList
-
class SequentialModel(nn.Module):
-    def __init__(self, layers):
-        super().__init__()
-        self.layers = nn.ModuleList(layers)
-    
-    def forward(self, x):
-        for l in self.layers:
-            x = l(x)
-        return x
+
class SequentialModel(nn.Module):
+    def __init__(self, layers):
+        super().__init__()
+        self.layers = nn.ModuleList(layers)
+    
+    def forward(self, x):
+        for l in self.layers:
+            x = l(x)
+        return x
-
model = SequentialModel(layers)
-model(xb).shape
+
model = SequentialModel(layers)
+model(xb).shape
torch.Size([50, 10])
-
# nn.Sequential
+
# nn.Sequential
-
model = nn.Sequential(*layers)
+
model = nn.Sequential(*layers)
-
fit()
+
fit()
loss: 0.14, accuracy: 0.96
 loss: 0.11, accuracy: 0.96
@@ -622,7 +630,7 @@ 

simple 2 layer nn

-
model
+
model
Sequential(
   (0): Linear(in_features=784, out_features=50, bias=True)
@@ -631,38 +639,38 @@ 

simple 2 layer nn

)
-
# optim
+
# optim
-
class Optimizer:
-    def __init__(self, params, lr=0.5):
-        self.params, self.lr = list(params), lr
-    
-    def step(self):
-        with torch.no_grad():
-            for p in self.params:
-                p -= p.grad * self.lr
-
-    def zero_grad(self):
-        for p in self.params:
-            p.grad.data.zero_()
+
class Optimizer:
+    def __init__(self, params, lr=0.5):
+        self.params, self.lr = list(params), lr
+    
+    def step(self):
+        with torch.no_grad():
+            for p in self.params:
+                p -= p.grad * self.lr
+
+    def zero_grad(self):
+        for p in self.params:
+            p.grad.data.zero_()
-
model = nn.Sequential(nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_o))
+
model = nn.Sequential(nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_o))
-
opt = Optimizer(model.parameters(), lr=lr)
+
opt = Optimizer(model.parameters(), lr=lr)
-
for epoch in range(epochs):
-    for i in range(0, n, bs):
-        s = slice(i, min(i+bs, n))
-        xb,yb = x_train[s],y_train[s]
-        preds = model(xb)
-        loss = loss_func(preds, yb)
-        loss.backward()
-        opt.step()
-        opt.zero_grad()
-    report(loss, preds, yb)
+
for epoch in range(epochs):
+    for i in range(0, n, bs):
+        s = slice(i, min(i+bs, n))
+        xb,yb = x_train[s],y_train[s]
+        preds = model(xb)
+        loss = loss_func(preds, yb)
+        loss.backward()
+        opt.step()
+        opt.zero_grad()
+    report(loss, preds, yb)
loss: 0.13, accuracy: 0.96
 loss: 0.12, accuracy: 0.92
@@ -670,40 +678,41 @@ 

simple 2 layer nn

-
from torch import optim
+
from torch import optim
-
def get_model():
-    model = nn.Sequential(nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_o))
-    opt = optim.SGD(model.parameters(), lr=lr)
-    return opt, model
+
def get_model():
+    model = nn.Sequential(nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_o))
+    opt = optim.SGD(model.parameters(), lr=lr)
+    return opt, model
-
opt, model = get_model()
-loss_func(model(xb), yb)
+
opt, model = get_model()
+loss_func(model(xb), yb)
tensor(2.2912, grad_fn=<NllLossBackward0>)
-
for epoch in range(epochs):
-    for i in range(0, n, bs):
-        s = slice(i, min(i+bs, n))
-        xb,yb = x_train[s],y_train[s]
-        preds = model(xb)
-        loss = loss_func(preds, yb)
-        loss.backward()
-        opt.step()
-        opt.zero_grad()
-    report(loss, preds, yb)
+
for epoch in range(epochs):
+    for i in range(0, n, bs):
+        s = slice(i, min(i+bs, n))
+        xb,yb = x_train[s],y_train[s]
+        preds = model(xb)
+        loss = loss_func(preds, yb)
+        loss.backward()
+        opt.step()
+        opt.zero_grad()
+    report(loss, preds, yb)
loss: 0.15, accuracy: 0.96
 loss: 0.11, accuracy: 0.96
 loss: 0.06, accuracy: 1.00
-
# dataset
+
# dataset

+

Dataset

@@ -711,212 +720,212 @@

Dataset

Initialize self. See help(type(self)) for accurate signature.

-
train_ds, valid_ds = Dataset(x_train, y_train), Dataset(x_test, y_test)
+
train_ds, valid_ds = Dataset(x_train, y_train), Dataset(x_test, y_test)
-
opt, model = get_model()
+
opt, model = get_model()
-
for epoch in range(epochs):
-    for i in range(0, n, bs):
-        xb,yb = train_ds[i: min(i+bs, n)]
-        preds = model(xb)
-        loss = loss_func(preds, yb)
-        loss.backward()
-        opt.step()
-        opt.zero_grad()
-    report(loss, preds, yb)
+
for epoch in range(epochs):
+    for i in range(0, n, bs):
+        xb,yb = train_ds[i: min(i+bs, n)]
+        preds = model(xb)
+        loss = loss_func(preds, yb)
+        loss.backward()
+        opt.step()
+        opt.zero_grad()
+    report(loss, preds, yb)
loss: 0.13, accuracy: 0.96
 loss: 0.10, accuracy: 0.98
 loss: 0.12, accuracy: 0.96
-
# data loader
+
# data loader
-
class DataLoader:
-    def __init__(self, ds, bs):
-        self.ds, self.bs = ds, bs
-    
-    def __iter__(self):
-        for i in range(0, len(self.ds), self.bs):
-            yield self.ds[i:i+self.bs]
+
class DataLoader:
+    def __init__(self, ds, bs):
+        self.ds, self.bs = ds, bs
+    
+    def __iter__(self):
+        for i in range(0, len(self.ds), self.bs):
+            yield self.ds[i:i+self.bs]
-
train_dl = DataLoader(train_ds, bs)
-valid_dl = DataLoader(valid_ds, bs)
+
train_dl = DataLoader(train_ds, bs)
+valid_dl = DataLoader(valid_ds, bs)
-
xb, yb = next(iter(train_dl))
-xb.shape
+
xb, yb = next(iter(train_dl))
+xb.shape
torch.Size([50, 784])
-
opt, model = get_model()
+
opt, model = get_model()
-
def fit():
-    for epoch in range(epochs):
-        for xb,yb in train_dl:
-            preds = model(xb)
-            loss = loss_func(preds, yb)
-            loss.backward()
-            opt.step()
-            opt.zero_grad()
-        report(loss, preds, yb)
+
def fit():
+    for epoch in range(epochs):
+        for xb,yb in train_dl:
+            preds = model(xb)
+            loss = loss_func(preds, yb)
+            loss.backward()
+            opt.step()
+            opt.zero_grad()
+        report(loss, preds, yb)
-
fit()
+
fit()
loss: 0.16, accuracy: 0.96
 loss: 0.11, accuracy: 0.98
 loss: 0.07, accuracy: 0.98
-

-# random sampling
+

+# random sampling
-
import random
+
import random
-
class Sampler:
-    def __init__(self, ds, shuffle=False):
-        self.n, self.shuffle = len(ds), shuffle
-    
-    def __iter__(self):
-        res = list(range(self.n))
-        if self.shuffle:
-            random.shuffle(res)
-        return iter(res)
+
class Sampler:
+    def __init__(self, ds, shuffle=False):
+        self.n, self.shuffle = len(ds), shuffle
+    
+    def __iter__(self):
+        res = list(range(self.n))
+        if self.shuffle:
+            random.shuffle(res)
+        return iter(res)
-
from itertools import islice
+
from itertools import islice
-
ss = Sampler(train_ds)
+
ss = Sampler(train_ds)
-
list(islice(ss, 5))
+
list(islice(ss, 5))
[0, 1, 2, 3, 4]
-
import fastcore.all as fc
+
import fastcore.all as fc
-
class BatchSampler:
-    def __init__(self, sampler, bs, drop_last=False):
-        fc.store_attr()
-    
-    def __iter__(self):
-        yield from fc.chunked(iter(self.sampler), self.bs, drop_last=self.drop_last)
+
class BatchSampler:
+    def __init__(self, sampler, bs, drop_last=False):
+        fc.store_attr()
+    
+    def __iter__(self):
+        yield from fc.chunked(iter(self.sampler), self.bs, drop_last=self.drop_last)
-
batches = BatchSampler(ss, 5)
-list(islice(iter(batches), 3))
+
batches = BatchSampler(ss, 5)
+list(islice(iter(batches), 3))
[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]]
-
def collate(b):
-    xs, ys = zip(*b)
-    return torch.stack(xs), torch.stack(ys)
+
def collate(b):
+    xs, ys = zip(*b)
+    return torch.stack(xs), torch.stack(ys)
-
class DataLoader:
-    def __init__(self, ds, batchs, collate_fn=collate):
-        fc.store_attr()
-    
-    def __iter__(self):
-        yield from (self.collate_fn(self.ds[i] for i in b) for b in self.batchs)
+
class DataLoader:
+    def __init__(self, ds, batchs, collate_fn=collate):
+        fc.store_attr()
+    
+    def __iter__(self):
+        yield from (self.collate_fn(self.ds[i] for i in b) for b in self.batchs)
-
train_sampler = BatchSampler(Sampler(train_ds, shuffle=True), bs)
-valid_sampler = BatchSampler(Sampler(valid_ds, shuffle=True), bs)
+
train_sampler = BatchSampler(Sampler(train_ds, shuffle=True), bs)
+valid_sampler = BatchSampler(Sampler(valid_ds, shuffle=True), bs)
-
train_dl = DataLoader(train_ds, train_sampler)
-valid_dl = DataLoader(valid_ds, valid_sampler)
+
train_dl = DataLoader(train_ds, train_sampler)
+valid_dl = DataLoader(valid_ds, valid_sampler)
-
xb, yb = next(iter(valid_dl))
-xb.shape, yb.shape
+
xb, yb = next(iter(valid_dl))
+xb.shape, yb.shape
(torch.Size([50, 784]), torch.Size([50]))
-
plt.imshow(xb[0].view(28, 28));
-plt.axis('off');
+
plt.imshow(xb[0].view(28, 28));
+plt.axis('off');
-

+

-
opt, model = get_model()
+
opt, model = get_model()
-
fit()
+
fit()
loss: 0.11, accuracy: 0.94
 loss: 0.27, accuracy: 0.96
 loss: 0.03, accuracy: 1.00
-

-# multiprocessing dataloader
+

+# multiprocessing dataloader
-
import torch.multiprocessing as mp
+
import torch.multiprocessing as mp
-
class DataLoader:
-    def __init__(self, ds, batchs, collate_fn=collate, num_workers=1):
-        fc.store_attr()
-    
-    def __iter__(self):
-        with mp.Pool(self.num_workers) as ex:
-            yield from ex.map(self.ds.__getitem__, iter(self.batchs))
+
class DataLoader:
+    def __init__(self, ds, batchs, collate_fn=collate, num_workers=1):
+        fc.store_attr()
+    
+    def __iter__(self):
+        with mp.Pool(self.num_workers) as ex:
+            yield from ex.map(self.ds.__getitem__, iter(self.batchs))
-
train_dl = DataLoader(train_ds, batchs=train_sampler)
+
train_dl = DataLoader(train_ds, batchs=train_sampler)
-
xb, yb = next(iter(train_dl))
-plt.imshow(xb[0].view(28, 28));
-plt.axis('off');
+
xb, yb = next(iter(train_dl))
+plt.imshow(xb[0].view(28, 28));
+plt.axis('off');
-

+

-
# pytorch dataloaders
+
# pytorch dataloaders
-
t = RandomSampler(train_ds)
+
t = RandomSampler(train_ds)
-
next(iter(t))
+
next(iter(t))
24797
-
t = BatchSampler(train_ds, batch_size=2, drop_last=False)
-
-k  = next(iter(t))
-print(len(k))
-for ele in k:
-    print(ele[0].shape, ele[1])
+
t = BatchSampler(train_ds, batch_size=2, drop_last=False)
+
+k  = next(iter(t))
+print(len(k))
+for ele in k:
+    print(ele[0].shape, ele[1])
2
 torch.Size([784]) tensor(5)
@@ -924,12 +933,12 @@ 

Dataset

-
t = BatchSampler(RandomSampler(train_ds), batch_size=2, drop_last=False)
-
-k  = next(iter(t))
-print(len(k))
-for ele in k:
-    print(ele)
+
t = BatchSampler(RandomSampler(train_ds), batch_size=2, drop_last=False)
+
+k  = next(iter(t))
+print(len(k))
+for ele in k:
+    print(ele)
2
 33683
@@ -937,16 +946,16 @@ 

Dataset

-
train_samp = BatchSampler(RandomSampler(train_ds), bs, drop_last=False)
-valid_samp = BatchSampler(RandomSampler(valid_ds), bs, drop_last=False)
+
train_samp = BatchSampler(RandomSampler(train_ds), bs, drop_last=False)
+valid_samp = BatchSampler(RandomSampler(valid_ds), bs, drop_last=False)
-
train_dl = DataLoader(train_ds, batch_sampler=train_samp, collate_fn=collate)
-valid_dl = DataLoader(valid_ds, batch_sampler=valid_samp, collate_fn=collate)
+
train_dl = DataLoader(train_ds, batch_sampler=train_samp, collate_fn=collate)
+valid_dl = DataLoader(valid_ds, batch_sampler=valid_samp, collate_fn=collate)
-
opt, model = get_model()
-fit()
+
opt, model = get_model()
+fit()
loss: 0.20, accuracy: 0.94
 loss: 0.11, accuracy: 0.98
@@ -954,26 +963,26 @@ 

Dataset

-
train_dl = DataLoader(train_ds, bs, shuffle=True, num_workers=2, drop_last=True)
-valid_dl = DataLoader(valid_ds, bs, shuffle=False, num_workers=2)
+
train_dl = DataLoader(train_ds, bs, shuffle=True, num_workers=2, drop_last=True)
+valid_dl = DataLoader(valid_ds, bs, shuffle=False, num_workers=2)
-
opt, model = get_model()
-fit()
+
opt, model = get_model()
+fit()
loss: 0.08, accuracy: 0.98
 loss: 0.31, accuracy: 0.86
 loss: 0.11, accuracy: 0.98
-

-# validation
+

+# validation

fit

-
 fit (epochs, model, loss_func, opt, train_dl, valid_ld)
+
 fit (epochs, model, loss_func, opt, train_dl, valid_dl)

@@ -983,11 +992,11 @@

get_dls

 get_dls (train_ds, valid_ds, bs, **kwargs)
-
train_dl, valid_dl = get_dls(train_ds, valid_ds, bs)
-opt, model = get_model()
+
train_dl, valid_dl = get_dls(train_ds, valid_ds, bs)
+opt, model = get_model()
-
+
0 0.1775239165313542 0.948100003004074
 1 0.1179210783354938 0.9646000063419342
diff --git a/_proc/_docs/search.json b/_proc/_docs/search.json
index 8a189de4..d248b54e 100644
--- a/_proc/_docs/search.json
+++ b/_proc/_docs/search.json
@@ -54,6 +54,6 @@
     "href": "mini_batch_training.html",
     "title": "simple 2 layer nn",
     "section": "",
-    "text": "data_path = Path('../data/mnist.pkl.gz')\nwith gzip.open(data_path, 'r') as f:\n    ((x_train, y_train), (x_test, y_test), _) = pickle.load(f, encoding='latin') \nx_train, y_train, x_test, y_test = map(torch.tensor, (x_train, y_train, x_test, y_test))\nx_train.shape, y_train.shape, x_test.shape, y_test.shape\n\n(torch.Size([50000, 784]),\n torch.Size([50000]),\n torch.Size([10000, 784]),\n torch.Size([10000]))\n\n\n\nimg = x_train[0]\nimg = img.view(28, 28)\nplt.imshow(img);\nplt.axis('off');\n\n\n\n\n\n\n\n\n\nclass Model(nn.Module):\n    def __init__(self, n_in, n_h, n_o):\n        super().__init__()\n        self.layers = [nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_o)]\n    \n    def __call__(self, x):\n        for l in self.layers:\n            x = l(x)\n        return x\n\n\nn_in = x_train.shape[1]\nn_h = 50\nn_o = 10\n\nmodel = Model(n_in, n_h, n_o)\npred = model(x_train)\npred.shape\n\ntorch.Size([50000, 10])\n\n\n# cross entropy loss\n\ndef log_softmax(x):\n    return (x.exp()/x.exp().sum(-1, keepdim=True)).log()\n\n\nlog_softmax(pred)\n\ntensor([[-2.3917, -2.3172, -2.1445,  ..., -2.3604, -2.4435, -2.3298],\n        [-2.3426, -2.2119, -2.2799,  ..., -2.3664, -2.4151, -2.2220],\n        [-2.3725, -2.2966, -2.2658,  ..., -2.2858, -2.3270, -2.3698],\n        ...,\n        [-2.4004, -2.3082, -2.1309,  ..., -2.3633, -2.4319, -2.2571],\n        [-2.4322, -2.3229, -2.1224,  ..., -2.3613, -2.4487, -2.2554],\n        [-2.3660, -2.2850, -2.0563,  ..., -2.3602, -2.5124, -2.3140]],\n       grad_fn=<LogBackward0>)\n\n\n# log product to sum trick\n\ndef log_softmax(x):\n    return x - x.exp().sum(-1, keepdim=True).log()\n\n\nlog_softmax(pred)\n\ntensor([[-2.3917, -2.3172, -2.1445,  ..., -2.3604, -2.4435, -2.3298],\n        [-2.3426, -2.2119, -2.2799,  ..., -2.3664, -2.4151, -2.2220],\n        [-2.3725, -2.2966, -2.2658,  ..., -2.2858, -2.3270, -2.3698],\n        ...,\n        [-2.4004, -2.3082, -2.1309,  ..., -2.3633, -2.4319, -2.2571],\n        [-2.4322, -2.3229, -2.1224,  ..., -2.3613, -2.4487, -2.2554],\n        [-2.3660, -2.2850, -2.0563,  ..., -2.3602, -2.5124, -2.3140]],\n       grad_fn=<SubBackward0>)\n\n\n# log sum exp trick\n* normalize with the maximum value, so avoid exploding big activations.\n\ndef logsumexp(x):\n    m = x.max(-1)[-1]\n    return m + (x-m[:,None]).exp().sum(-1).log()\n\n\ndef log_softmax(x):\n    return x - logsumexp(x)[:,None]\n\n\nlog_softmax(pred)\n\ntensor([[-2.3917, -2.3172, -2.1445,  ..., -2.3604, -2.4435, -2.3298],\n        [-2.3426, -2.2119, -2.2799,  ..., -2.3664, -2.4151, -2.2220],\n        [-2.3725, -2.2966, -2.2658,  ..., -2.2858, -2.3270, -2.3698],\n        ...,\n        [-2.4004, -2.3082, -2.1309,  ..., -2.3633, -2.4319, -2.2571],\n        [-2.4322, -2.3229, -2.1224,  ..., -2.3613, -2.4487, -2.2554],\n        [-2.3660, -2.2850, -2.0563,  ..., -2.3602, -2.5124, -2.3140]],\n       grad_fn=<SubBackward0>)\n\n\n# pytorch logsumexp function\n\ndef log_softmax(x):\n    return x - x.logsumexp(-1, keepdim=True)\n\n\nlog_softmax(pred)\n\ntensor([[-2.3917, -2.3172, -2.1445,  ..., -2.3604, -2.4435, -2.3298],\n        [-2.3426, -2.2119, -2.2799,  ..., -2.3664, -2.4151, -2.2220],\n        [-2.3725, -2.2966, -2.2658,  ..., -2.2858, -2.3270, -2.3698],\n        ...,\n        [-2.4004, -2.3082, -2.1309,  ..., -2.3633, -2.4319, -2.2571],\n        [-2.4322, -2.3229, -2.1224,  ..., -2.3613, -2.4487, -2.2554],\n        [-2.3660, -2.2850, -2.0563,  ..., -2.3602, -2.5124, -2.3140]],\n       grad_fn=<SubBackward0>)\n\n\n# negative log likeliehood\n* for one hot input vector, it simplifies to the following formula.\n\ndef nll(inp, targ):\n    return - inp[range(targ.shape[0]), targ].mean()\n\n\nsm_pred = log_softmax(pred)\nloss = nll(sm_pred, y_train)\nloss\n\ntensor(2.3028, grad_fn=<NegBackward0>)\n\n\n# compare it with native pytorch implementation of nll.\n\nloss_pytorch = F.nll_loss(F.log_softmax(pred, -1), y_train)\nloss_pytorch\n\ntensor(2.3028, grad_fn=<NllLossBackward0>)\n\n\n# nll and softmax combined implementation.\n\nloss_pytorch = F.cross_entropy(pred, y_train)\nloss_pytorch\n\ntensor(2.3028, grad_fn=<NllLossBackward0>)\n\n\n# batch training.\n# accuracy.\n\ndef accuracy(out, yb):\n    return (out.argmax(1)==yb).float().mean()\n\nloss_func = F.cross_entropy\n\n\nbs = 50\nxb = x_train[:bs]\nyb = y_train[:bs]\npreds = model(xb)\npreds[0]\n\ntensor([-0.0843, -0.0098,  0.1629,  0.1187,  0.1040,  0.0934, -0.1870, -0.0530,\n        -0.1361, -0.0224], grad_fn=<SelectBackward0>)\n\n\n\nloss_func(preds, yb)\n\ntensor(2.2846, grad_fn=<NllLossBackward0>)\n\n\n\naccuracy(preds, yb)\n\ntensor(0.1400)\n\n\n\ndef report(loss, preds, yb):\n    print(f\"loss: {loss:.2f}, accuracy: {accuracy(preds, yb):.2f}\")\n\n\nreport(loss, preds, yb)\n\nloss: 2.30, accuracy: 0.14\n\n\n\nn, m = x_train.shape\nlr = 0.5\nepochs = 3\nxb,yb = x_train[:bs], y_train[:bs]\npreds = model(xb)\nloss = loss_func(preds, yb)\nreport(loss, preds, yb)\n\nloss: 2.28, accuracy: 0.14\n\n\n\nfor epoch in range(epochs):\n    for i in range(0, n, bs):\n        s = slice(i, min(i+bs, n))\n        xb,yb = x_train[s],y_train[s]\n        preds = model(xb)\n        loss = loss_func(preds, yb)\n        loss.backward()\n        with torch.no_grad():\n            for l in model.layers:\n                if hasattr(l, 'weight'):\n                    l.weight -= l.weight.grad * lr\n                    l.bias -= l.bias.grad * lr\n                    l.weight.grad.zero_()\n                    l.bias.grad.zero_()\n    report(loss, preds, yb)\n\nloss: 0.17, accuracy: 0.94\nloss: 0.13, accuracy: 0.94\nloss: 0.13, accuracy: 0.96\n\n\n\n# parameters\n\nm1 = nn.Module()\nm1.foo = nn.Linear(3, 4)\nm1.boo = 'hey'\nm1\n\nModule(\n  (foo): Linear(in_features=3, out_features=4, bias=True)\n)\n\n\n\nlist(m1.named_children())\n\n[('foo', Linear(in_features=3, out_features=4, bias=True))]\n\n\n\nlist(m1.parameters())\n\n[Parameter containing:\n tensor([[-0.4626, -0.5572, -0.2930],\n         [-0.2142,  0.2954, -0.5759],\n         [-0.0873,  0.5067,  0.0329],\n         [ 0.1627,  0.2251, -0.2415]], requires_grad=True),\n Parameter containing:\n tensor([-0.4074,  0.0654,  0.3297, -0.2555], requires_grad=True)]\n\n\n\nclass MLP(nn.Module):\n    def __init__(self, n_in, n_h, n_out):\n        super().__init__()\n        self.l1 = nn.Linear(n_in, n_h)\n        self.relu = nn.ReLU()\n        self.l2 = nn.Linear(n_h, n_out)\n    \n    def forward(self, x):\n        return self.l2(self.relu(self.l1(x)))\n\n\nmodel = MLP(n_in, n_h, 10)\nmodel\n\nMLP(\n  (l1): Linear(in_features=784, out_features=50, bias=True)\n  (relu): ReLU()\n  (l2): Linear(in_features=50, out_features=10, bias=True)\n)\n\n\n\nfor name, l in model.named_children():\n    print(f\"{name}: {l}\")\n\nl1: Linear(in_features=784, out_features=50, bias=True)\nrelu: ReLU()\nl2: Linear(in_features=50, out_features=10, bias=True)\n\n\n\nfor p in model.parameters():\n    print(p.shape)\n\ntorch.Size([50, 784])\ntorch.Size([50])\ntorch.Size([10, 50])\ntorch.Size([10])\n\n\n\ndef fit():\n    for epoch in range(epochs):\n        for i in range(0, n, bs):\n            s = slice(i, min(i+bs, n))\n            xb,yb = x_train[s], y_train[s]\n            preds = model(xb)\n            loss = loss_func(preds, yb)\n            loss.backward()\n            with torch.no_grad():\n                for p in model.parameters():\n                    p -= p.grad * lr\n                model.zero_grad()\n        report(loss, preds, yb)\n\n\nfit()\n\nloss: 0.02, accuracy: 1.00\nloss: 0.05, accuracy: 0.98\nloss: 0.03, accuracy: 1.00\n\n\n# nn.Module behind the scene\n\nclass MyModule:\n    def __init__(self, n_in, n_h, n_out):\n        self._modules = {}\n        self.l1 = nn.Linear(n_in, n_h)\n        self.l2 = nn.Linear(n_h, n_out)\n        self.relu = nn.ReLU()\n    \n    def __setattr__(self, k, v):\n        if not k.startswith('_'):\n            self._modules[k] = v\n        \n        super().__setattr__(k, v)\n    \n    def __repr__(self):\n        return f\"{self._modules}\"\n    \n    def parameters(self):\n        for l in self._modules.values():\n            yield from l.parameters()\n\n\nmdl = MyModule(n_in, n_h, n_o)\nmdl\n\n{'l1': Linear(in_features=784, out_features=50, bias=True), 'l2': Linear(in_features=50, out_features=10, bias=True), 'relu': ReLU()}\n\n\n\nfor p in mdl.parameters():\n    print(p.shape)\n\ntorch.Size([50, 784])\ntorch.Size([50])\ntorch.Size([10, 50])\ntorch.Size([10])\n\n\n# registering modules\n\nfrom functools import reduce\n\n\nlayers = [nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_o)]\n\n\nclass Model(nn.Module):\n    def __init__(self, layers):\n        super().__init__()\n        self.layers = layers\n        for i,l in enumerate(self.layers):\n            self.add_module(f\"layer_{i}\", l)\n    \n    def forward(self, x):\n        return reduce(lambda val, layer: layer(val), self.layers, x)\n\n\nmodel = Model(layers)\nmodel\n\nModel(\n  (layer_0): Linear(in_features=784, out_features=50, bias=True)\n  (layer_1): ReLU()\n  (layer_2): Linear(in_features=50, out_features=10, bias=True)\n)\n\n\n\nmodel(xb).shape\n\ntorch.Size([50, 10])\n\n\n# nn.ModuleList\n\nclass SequentialModel(nn.Module):\n    def __init__(self, layers):\n        super().__init__()\n        self.layers = nn.ModuleList(layers)\n    \n    def forward(self, x):\n        for l in self.layers:\n            x = l(x)\n        return x\n\n\nmodel = SequentialModel(layers)\nmodel(xb).shape\n\ntorch.Size([50, 10])\n\n\n# nn.Sequential\n\nmodel = nn.Sequential(*layers)\n\n\nfit()\n\nloss: 0.14, accuracy: 0.96\nloss: 0.11, accuracy: 0.96\nloss: 0.05, accuracy: 1.00\n\n\n\nmodel\n\nSequential(\n  (0): Linear(in_features=784, out_features=50, bias=True)\n  (1): ReLU()\n  (2): Linear(in_features=50, out_features=10, bias=True)\n)\n\n\n# optim\n\nclass Optimizer:\n    def __init__(self, params, lr=0.5):\n        self.params, self.lr = list(params), lr\n    \n    def step(self):\n        with torch.no_grad():\n            for p in self.params:\n                p -= p.grad * self.lr\n\n    def zero_grad(self):\n        for p in self.params:\n            p.grad.data.zero_()\n\n\nmodel = nn.Sequential(nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_o))\n\n\nopt = Optimizer(model.parameters(), lr=lr)\n\n\nfor epoch in range(epochs):\n    for i in range(0, n, bs):\n        s = slice(i, min(i+bs, n))\n        xb,yb = x_train[s],y_train[s]\n        preds = model(xb)\n        loss = loss_func(preds, yb)\n        loss.backward()\n        opt.step()\n        opt.zero_grad()\n    report(loss, preds, yb)\n\nloss: 0.13, accuracy: 0.96\nloss: 0.12, accuracy: 0.92\nloss: 0.08, accuracy: 0.96\n\n\n\nfrom torch import optim\n\n\ndef get_model():\n    model = nn.Sequential(nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_o))\n    opt = optim.SGD(model.parameters(), lr=lr)\n    return opt, model\n\n\nopt, model = get_model()\nloss_func(model(xb), yb)\n\ntensor(2.2912, grad_fn=<NllLossBackward0>)\n\n\n\nfor epoch in range(epochs):\n    for i in range(0, n, bs):\n        s = slice(i, min(i+bs, n))\n        xb,yb = x_train[s],y_train[s]\n        preds = model(xb)\n        loss = loss_func(preds, yb)\n        loss.backward()\n        opt.step()\n        opt.zero_grad()\n    report(loss, preds, yb)\n\nloss: 0.15, accuracy: 0.96\nloss: 0.11, accuracy: 0.96\nloss: 0.06, accuracy: 1.00\n\n\n# dataset\n\n\nDataset\n\n Dataset (x, y)\n\nInitialize self. See help(type(self)) for accurate signature.\n\ntrain_ds, valid_ds = Dataset(x_train, y_train), Dataset(x_test, y_test)\n\n\nopt, model = get_model()\n\n\nfor epoch in range(epochs):\n    for i in range(0, n, bs):\n        xb,yb = train_ds[i: min(i+bs, n)]\n        preds = model(xb)\n        loss = loss_func(preds, yb)\n        loss.backward()\n        opt.step()\n        opt.zero_grad()\n    report(loss, preds, yb)\n\nloss: 0.13, accuracy: 0.96\nloss: 0.10, accuracy: 0.98\nloss: 0.12, accuracy: 0.96\n\n\n# data loader\n\nclass DataLoader:\n    def __init__(self, ds, bs):\n        self.ds, self.bs = ds, bs\n    \n    def __iter__(self):\n        for i in range(0, len(self.ds), self.bs):\n            yield self.ds[i:i+self.bs]\n\n\ntrain_dl = DataLoader(train_ds, bs)\nvalid_dl = DataLoader(valid_ds, bs)\n\n\nxb, yb = next(iter(train_dl))\nxb.shape\n\ntorch.Size([50, 784])\n\n\n\nopt, model = get_model()\n\n\ndef fit():\n    for epoch in range(epochs):\n        for xb,yb in train_dl:\n            preds = model(xb)\n            loss = loss_func(preds, yb)\n            loss.backward()\n            opt.step()\n            opt.zero_grad()\n        report(loss, preds, yb)\n\n\nfit()\n\nloss: 0.16, accuracy: 0.96\nloss: 0.11, accuracy: 0.98\nloss: 0.07, accuracy: 0.98\n\n\n\n# random sampling\n\nimport random\n\n\nclass Sampler:\n    def __init__(self, ds, shuffle=False):\n        self.n, self.shuffle = len(ds), shuffle\n    \n    def __iter__(self):\n        res = list(range(self.n))\n        if self.shuffle:\n            random.shuffle(res)\n        return iter(res)\n\n\nfrom itertools import islice\n\n\nss = Sampler(train_ds)\n\n\nlist(islice(ss, 5))\n\n[0, 1, 2, 3, 4]\n\n\n\nimport fastcore.all as fc\n\n\nclass BatchSampler:\n    def __init__(self, sampler, bs, drop_last=False):\n        fc.store_attr()\n    \n    def __iter__(self):\n        yield from fc.chunked(iter(self.sampler), self.bs, drop_last=self.drop_last)\n\n\nbatches = BatchSampler(ss, 5)\nlist(islice(iter(batches), 3))\n\n[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]]\n\n\n\ndef collate(b):\n    xs, ys = zip(*b)\n    return torch.stack(xs), torch.stack(ys)\n\n\nclass DataLoader:\n    def __init__(self, ds, batchs, collate_fn=collate):\n        fc.store_attr()\n    \n    def __iter__(self):\n        yield from (self.collate_fn(self.ds[i] for i in b) for b in self.batchs)\n\n\ntrain_sampler = BatchSampler(Sampler(train_ds, shuffle=True), bs)\nvalid_sampler = BatchSampler(Sampler(valid_ds, shuffle=True), bs)\n\n\ntrain_dl = DataLoader(train_ds, train_sampler)\nvalid_dl = DataLoader(valid_ds, valid_sampler)\n\n\nxb, yb = next(iter(valid_dl))\nxb.shape, yb.shape\n\n(torch.Size([50, 784]), torch.Size([50]))\n\n\n\nplt.imshow(xb[0].view(28, 28));\nplt.axis('off');\n\n\n\n\n\n\n\n\n\nopt, model = get_model()\n\n\nfit()\n\nloss: 0.11, accuracy: 0.94\nloss: 0.27, accuracy: 0.96\nloss: 0.03, accuracy: 1.00\n\n\n\n# multiprocessing dataloader\n\nimport torch.multiprocessing as mp\n\n\nclass DataLoader:\n    def __init__(self, ds, batchs, collate_fn=collate, num_workers=1):\n        fc.store_attr()\n    \n    def __iter__(self):\n        with mp.Pool(self.num_workers) as ex:\n            yield from ex.map(self.ds.__getitem__, iter(self.batchs))\n\n\ntrain_dl = DataLoader(train_ds, batchs=train_sampler)\n\n\nxb, yb = next(iter(train_dl))\nplt.imshow(xb[0].view(28, 28));\nplt.axis('off');\n\n\n\n\n\n\n\n\n# pytorch dataloaders\n\nt = RandomSampler(train_ds)\n\n\nnext(iter(t))\n\n24797\n\n\n\nt = BatchSampler(train_ds, batch_size=2, drop_last=False)\n\nk  = next(iter(t))\nprint(len(k))\nfor ele in k:\n    print(ele[0].shape, ele[1])\n\n2\ntorch.Size([784]) tensor(5)\ntorch.Size([784]) tensor(0)\n\n\n\nt = BatchSampler(RandomSampler(train_ds), batch_size=2, drop_last=False)\n\nk  = next(iter(t))\nprint(len(k))\nfor ele in k:\n    print(ele)\n\n2\n33683\n36592\n\n\n\ntrain_samp = BatchSampler(RandomSampler(train_ds), bs, drop_last=False)\nvalid_samp = BatchSampler(RandomSampler(valid_ds), bs, drop_last=False)\n\n\ntrain_dl = DataLoader(train_ds, batch_sampler=train_samp, collate_fn=collate)\nvalid_dl = DataLoader(valid_ds, batch_sampler=valid_samp, collate_fn=collate)\n\n\nopt, model = get_model()\nfit()\n\nloss: 0.20, accuracy: 0.94\nloss: 0.11, accuracy: 0.98\nloss: 0.20, accuracy: 0.98\n\n\n\ntrain_dl = DataLoader(train_ds, bs, shuffle=True, num_workers=2, drop_last=True)\nvalid_dl = DataLoader(valid_ds, bs, shuffle=False, num_workers=2)\n\n\nopt, model = get_model()\nfit()\n\nloss: 0.08, accuracy: 0.98\nloss: 0.31, accuracy: 0.86\nloss: 0.11, accuracy: 0.98\n\n\n\n# validation\n\n\n\nfit\n\n fit (epochs, model, loss_func, opt, train_dl, valid_ld)\n\n\n\n\nget_dls\n\n get_dls (train_ds, valid_ds, bs, **kwargs)\n\n\ntrain_dl, valid_dl = get_dls(train_ds, valid_ds, bs)\nopt, model = get_model()\n\n\n\n\n0 0.1775239165313542 0.948100003004074\n1 0.1179210783354938 0.9646000063419342\n2 0.11550588405691087 0.9665000039339066\n3 0.10593999677803367 0.9698000079393387\n4 0.10098711441038176 0.9727000087499619\nCPU times: user 17.8 s, sys: 16.1 s, total: 33.8 s\nWall time: 4.71 s"
+    "text": "data_path = Path('../data/mnist.pkl.gz')\nwith gzip.open(data_path, 'r') as f:\n    ((x_train, y_train), (x_test, y_test), _) = pickle.load(f, encoding='latin') \nx_train, y_train, x_test, y_test = map(torch.tensor, (x_train, y_train, x_test, y_test))\nx_train.shape, y_train.shape, x_test.shape, y_test.shape\n\n(torch.Size([50000, 784]),\n torch.Size([50000]),\n torch.Size([10000, 784]),\n torch.Size([10000]))\n\n\n\nimg = x_train[0]\nimg = img.view(28, 28)\nplt.imshow(img);\nplt.axis('off');\n\n\n\n\n\n\n\n\n\nclass Model(nn.Module):\n    def __init__(self, n_in, n_h, n_o):\n        super().__init__()\n        self.layers = [nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_o)]\n    \n    def __call__(self, x):\n        for l in self.layers:\n            x = l(x)\n        return x\n\n\nn_in = x_train.shape[1]\nn_h = 50\nn_o = 10\n\nmodel = Model(n_in, n_h, n_o)\npred = model(x_train)\npred.shape\n\ntorch.Size([50000, 10])\n\n\n# cross entropy loss\n\ndef log_softmax(x):\n    return (x.exp()/x.exp().sum(-1, keepdim=True)).log()\n\n\nlog_softmax(pred)\n\ntensor([[-2.3917, -2.3172, -2.1445,  ..., -2.3604, -2.4435, -2.3298],\n        [-2.3426, -2.2119, -2.2799,  ..., -2.3664, -2.4151, -2.2220],\n        [-2.3725, -2.2966, -2.2658,  ..., -2.2858, -2.3270, -2.3698],\n        ...,\n        [-2.4004, -2.3082, -2.1309,  ..., -2.3633, -2.4319, -2.2571],\n        [-2.4322, -2.3229, -2.1224,  ..., -2.3613, -2.4487, -2.2554],\n        [-2.3660, -2.2850, -2.0563,  ..., -2.3602, -2.5124, -2.3140]],\n       grad_fn=<LogBackward0>)\n\n\n# log product to sum trick\n\ndef log_softmax(x):\n    return x - x.exp().sum(-1, keepdim=True).log()\n\n\nlog_softmax(pred)\n\ntensor([[-2.3917, -2.3172, -2.1445,  ..., -2.3604, -2.4435, -2.3298],\n        [-2.3426, -2.2119, -2.2799,  ..., -2.3664, -2.4151, -2.2220],\n        [-2.3725, -2.2966, -2.2658,  ..., -2.2858, -2.3270, -2.3698],\n        ...,\n        [-2.4004, -2.3082, -2.1309,  ..., -2.3633, -2.4319, -2.2571],\n        [-2.4322, -2.3229, -2.1224,  ..., -2.3613, -2.4487, -2.2554],\n        [-2.3660, -2.2850, -2.0563,  ..., -2.3602, -2.5124, -2.3140]],\n       grad_fn=<SubBackward0>)\n\n\n# log sum exp trick\n* normalize with the maximum value, so avoid exploding big activations.\n\ndef logsumexp(x):\n    m = x.max(-1)[-1]\n    return m + (x-m[:,None]).exp().sum(-1).log()\n\n\ndef log_softmax(x):\n    return x - logsumexp(x)[:,None]\n\n\nlog_softmax(pred)\n\ntensor([[-2.3917, -2.3172, -2.1445,  ..., -2.3604, -2.4435, -2.3298],\n        [-2.3426, -2.2119, -2.2799,  ..., -2.3664, -2.4151, -2.2220],\n        [-2.3725, -2.2966, -2.2658,  ..., -2.2858, -2.3270, -2.3698],\n        ...,\n        [-2.4004, -2.3082, -2.1309,  ..., -2.3633, -2.4319, -2.2571],\n        [-2.4322, -2.3229, -2.1224,  ..., -2.3613, -2.4487, -2.2554],\n        [-2.3660, -2.2850, -2.0563,  ..., -2.3602, -2.5124, -2.3140]],\n       grad_fn=<SubBackward0>)\n\n\n# pytorch logsumexp function\n\ndef log_softmax(x):\n    return x - x.logsumexp(-1, keepdim=True)\n\n\nlog_softmax(pred)\n\ntensor([[-2.3917, -2.3172, -2.1445,  ..., -2.3604, -2.4435, -2.3298],\n        [-2.3426, -2.2119, -2.2799,  ..., -2.3664, -2.4151, -2.2220],\n        [-2.3725, -2.2966, -2.2658,  ..., -2.2858, -2.3270, -2.3698],\n        ...,\n        [-2.4004, -2.3082, -2.1309,  ..., -2.3633, -2.4319, -2.2571],\n        [-2.4322, -2.3229, -2.1224,  ..., -2.3613, -2.4487, -2.2554],\n        [-2.3660, -2.2850, -2.0563,  ..., -2.3602, -2.5124, -2.3140]],\n       grad_fn=<SubBackward0>)\n\n\n# negative log likeliehood\n* for one hot input vector, it simplifies to the following formula.\n\ndef nll(inp, targ):\n    return - inp[range(targ.shape[0]), targ].mean()\n\n\nsm_pred = log_softmax(pred)\nloss = nll(sm_pred, y_train)\nloss\n\ntensor(2.3028, grad_fn=<NegBackward0>)\n\n\n# compare it with native pytorch implementation of nll.\n\nloss_pytorch = F.nll_loss(F.log_softmax(pred, -1), y_train)\nloss_pytorch\n\ntensor(2.3028, grad_fn=<NllLossBackward0>)\n\n\n# nll and softmax combined implementation.\n\nloss_pytorch = F.cross_entropy(pred, y_train)\nloss_pytorch\n\ntensor(2.3028, grad_fn=<NllLossBackward0>)\n\n\n# batch training.\n# accuracy.\n\n\naccuracy\n\n accuracy (out, yb)\n\n\nloss_func = F.cross_entropy\n\n\nbs = 50\nxb = x_train[:bs]\nyb = y_train[:bs]\npreds = model(xb)\npreds[0]\n\ntensor([-0.0843, -0.0098,  0.1629,  0.1187,  0.1040,  0.0934, -0.1870, -0.0530,\n        -0.1361, -0.0224], grad_fn=<SelectBackward0>)\n\n\n\nloss_func(preds, yb)\n\ntensor(2.2846, grad_fn=<NllLossBackward0>)\n\n\n\naccuracy(preds, yb)\n\ntensor(0.1400)\n\n\n\n\n\nreport\n\n report (loss, preds, yb)\n\n\nreport(loss, preds, yb)\n\nloss: 2.30, accuracy: 0.14\n\n\n\nn, m = x_train.shape\nlr = 0.5\nepochs = 3\nxb,yb = x_train[:bs], y_train[:bs]\npreds = model(xb)\nloss = loss_func(preds, yb)\nreport(loss, preds, yb)\n\nloss: 2.28, accuracy: 0.14\n\n\n\nfor epoch in range(epochs):\n    for i in range(0, n, bs):\n        s = slice(i, min(i+bs, n))\n        xb,yb = x_train[s],y_train[s]\n        preds = model(xb)\n        loss = loss_func(preds, yb)\n        loss.backward()\n        with torch.no_grad():\n            for l in model.layers:\n                if hasattr(l, 'weight'):\n                    l.weight -= l.weight.grad * lr\n                    l.bias -= l.bias.grad * lr\n                    l.weight.grad.zero_()\n                    l.bias.grad.zero_()\n    report(loss, preds, yb)\n\nloss: 0.17, accuracy: 0.94\nloss: 0.13, accuracy: 0.94\nloss: 0.13, accuracy: 0.96\n\n\n\n# parameters\n\nm1 = nn.Module()\nm1.foo = nn.Linear(3, 4)\nm1.boo = 'hey'\nm1\n\nModule(\n  (foo): Linear(in_features=3, out_features=4, bias=True)\n)\n\n\n\nlist(m1.named_children())\n\n[('foo', Linear(in_features=3, out_features=4, bias=True))]\n\n\n\nlist(m1.parameters())\n\n[Parameter containing:\n tensor([[-0.4626, -0.5572, -0.2930],\n         [-0.2142,  0.2954, -0.5759],\n         [-0.0873,  0.5067,  0.0329],\n         [ 0.1627,  0.2251, -0.2415]], requires_grad=True),\n Parameter containing:\n tensor([-0.4074,  0.0654,  0.3297, -0.2555], requires_grad=True)]\n\n\n\nclass MLP(nn.Module):\n    def __init__(self, n_in, n_h, n_out):\n        super().__init__()\n        self.l1 = nn.Linear(n_in, n_h)\n        self.relu = nn.ReLU()\n        self.l2 = nn.Linear(n_h, n_out)\n    \n    def forward(self, x):\n        return self.l2(self.relu(self.l1(x)))\n\n\nmodel = MLP(n_in, n_h, 10)\nmodel\n\nMLP(\n  (l1): Linear(in_features=784, out_features=50, bias=True)\n  (relu): ReLU()\n  (l2): Linear(in_features=50, out_features=10, bias=True)\n)\n\n\n\nfor name, l in model.named_children():\n    print(f\"{name}: {l}\")\n\nl1: Linear(in_features=784, out_features=50, bias=True)\nrelu: ReLU()\nl2: Linear(in_features=50, out_features=10, bias=True)\n\n\n\nfor p in model.parameters():\n    print(p.shape)\n\ntorch.Size([50, 784])\ntorch.Size([50])\ntorch.Size([10, 50])\ntorch.Size([10])\n\n\n\ndef fit():\n    for epoch in range(epochs):\n        for i in range(0, n, bs):\n            s = slice(i, min(i+bs, n))\n            xb,yb = x_train[s], y_train[s]\n            preds = model(xb)\n            loss = loss_func(preds, yb)\n            loss.backward()\n            with torch.no_grad():\n                for p in model.parameters():\n                    p -= p.grad * lr\n                model.zero_grad()\n        report(loss, preds, yb)\n\n\nfit()\n\nloss: 0.02, accuracy: 1.00\nloss: 0.05, accuracy: 0.98\nloss: 0.03, accuracy: 1.00\n\n\n# nn.Module behind the scene\n\nclass MyModule:\n    def __init__(self, n_in, n_h, n_out):\n        self._modules = {}\n        self.l1 = nn.Linear(n_in, n_h)\n        self.l2 = nn.Linear(n_h, n_out)\n        self.relu = nn.ReLU()\n    \n    def __setattr__(self, k, v):\n        if not k.startswith('_'):\n            self._modules[k] = v\n        \n        super().__setattr__(k, v)\n    \n    def __repr__(self):\n        return f\"{self._modules}\"\n    \n    def parameters(self):\n        for l in self._modules.values():\n            yield from l.parameters()\n\n\nmdl = MyModule(n_in, n_h, n_o)\nmdl\n\n{'l1': Linear(in_features=784, out_features=50, bias=True), 'l2': Linear(in_features=50, out_features=10, bias=True), 'relu': ReLU()}\n\n\n\nfor p in mdl.parameters():\n    print(p.shape)\n\ntorch.Size([50, 784])\ntorch.Size([50])\ntorch.Size([10, 50])\ntorch.Size([10])\n\n\n# registering modules\n\nfrom functools import reduce\n\n\nlayers = [nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_o)]\n\n\nclass Model(nn.Module):\n    def __init__(self, layers):\n        super().__init__()\n        self.layers = layers\n        for i,l in enumerate(self.layers):\n            self.add_module(f\"layer_{i}\", l)\n    \n    def forward(self, x):\n        return reduce(lambda val, layer: layer(val), self.layers, x)\n\n\nmodel = Model(layers)\nmodel\n\nModel(\n  (layer_0): Linear(in_features=784, out_features=50, bias=True)\n  (layer_1): ReLU()\n  (layer_2): Linear(in_features=50, out_features=10, bias=True)\n)\n\n\n\nmodel(xb).shape\n\ntorch.Size([50, 10])\n\n\n# nn.ModuleList\n\nclass SequentialModel(nn.Module):\n    def __init__(self, layers):\n        super().__init__()\n        self.layers = nn.ModuleList(layers)\n    \n    def forward(self, x):\n        for l in self.layers:\n            x = l(x)\n        return x\n\n\nmodel = SequentialModel(layers)\nmodel(xb).shape\n\ntorch.Size([50, 10])\n\n\n# nn.Sequential\n\nmodel = nn.Sequential(*layers)\n\n\nfit()\n\nloss: 0.14, accuracy: 0.96\nloss: 0.11, accuracy: 0.96\nloss: 0.05, accuracy: 1.00\n\n\n\nmodel\n\nSequential(\n  (0): Linear(in_features=784, out_features=50, bias=True)\n  (1): ReLU()\n  (2): Linear(in_features=50, out_features=10, bias=True)\n)\n\n\n# optim\n\nclass Optimizer:\n    def __init__(self, params, lr=0.5):\n        self.params, self.lr = list(params), lr\n    \n    def step(self):\n        with torch.no_grad():\n            for p in self.params:\n                p -= p.grad * self.lr\n\n    def zero_grad(self):\n        for p in self.params:\n            p.grad.data.zero_()\n\n\nmodel = nn.Sequential(nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_o))\n\n\nopt = Optimizer(model.parameters(), lr=lr)\n\n\nfor epoch in range(epochs):\n    for i in range(0, n, bs):\n        s = slice(i, min(i+bs, n))\n        xb,yb = x_train[s],y_train[s]\n        preds = model(xb)\n        loss = loss_func(preds, yb)\n        loss.backward()\n        opt.step()\n        opt.zero_grad()\n    report(loss, preds, yb)\n\nloss: 0.13, accuracy: 0.96\nloss: 0.12, accuracy: 0.92\nloss: 0.08, accuracy: 0.96\n\n\n\nfrom torch import optim\n\n\ndef get_model():\n    model = nn.Sequential(nn.Linear(n_in, n_h), nn.ReLU(), nn.Linear(n_h, n_o))\n    opt = optim.SGD(model.parameters(), lr=lr)\n    return opt, model\n\n\nopt, model = get_model()\nloss_func(model(xb), yb)\n\ntensor(2.2912, grad_fn=<NllLossBackward0>)\n\n\n\nfor epoch in range(epochs):\n    for i in range(0, n, bs):\n        s = slice(i, min(i+bs, n))\n        xb,yb = x_train[s],y_train[s]\n        preds = model(xb)\n        loss = loss_func(preds, yb)\n        loss.backward()\n        opt.step()\n        opt.zero_grad()\n    report(loss, preds, yb)\n\nloss: 0.15, accuracy: 0.96\nloss: 0.11, accuracy: 0.96\nloss: 0.06, accuracy: 1.00\n\n\n# dataset\n\n\n\nDataset\n\n Dataset (x, y)\n\nInitialize self. See help(type(self)) for accurate signature.\n\ntrain_ds, valid_ds = Dataset(x_train, y_train), Dataset(x_test, y_test)\n\n\nopt, model = get_model()\n\n\nfor epoch in range(epochs):\n    for i in range(0, n, bs):\n        xb,yb = train_ds[i: min(i+bs, n)]\n        preds = model(xb)\n        loss = loss_func(preds, yb)\n        loss.backward()\n        opt.step()\n        opt.zero_grad()\n    report(loss, preds, yb)\n\nloss: 0.13, accuracy: 0.96\nloss: 0.10, accuracy: 0.98\nloss: 0.12, accuracy: 0.96\n\n\n# data loader\n\nclass DataLoader:\n    def __init__(self, ds, bs):\n        self.ds, self.bs = ds, bs\n    \n    def __iter__(self):\n        for i in range(0, len(self.ds), self.bs):\n            yield self.ds[i:i+self.bs]\n\n\ntrain_dl = DataLoader(train_ds, bs)\nvalid_dl = DataLoader(valid_ds, bs)\n\n\nxb, yb = next(iter(train_dl))\nxb.shape\n\ntorch.Size([50, 784])\n\n\n\nopt, model = get_model()\n\n\ndef fit():\n    for epoch in range(epochs):\n        for xb,yb in train_dl:\n            preds = model(xb)\n            loss = loss_func(preds, yb)\n            loss.backward()\n            opt.step()\n            opt.zero_grad()\n        report(loss, preds, yb)\n\n\nfit()\n\nloss: 0.16, accuracy: 0.96\nloss: 0.11, accuracy: 0.98\nloss: 0.07, accuracy: 0.98\n\n\n\n# random sampling\n\nimport random\n\n\nclass Sampler:\n    def __init__(self, ds, shuffle=False):\n        self.n, self.shuffle = len(ds), shuffle\n    \n    def __iter__(self):\n        res = list(range(self.n))\n        if self.shuffle:\n            random.shuffle(res)\n        return iter(res)\n\n\nfrom itertools import islice\n\n\nss = Sampler(train_ds)\n\n\nlist(islice(ss, 5))\n\n[0, 1, 2, 3, 4]\n\n\n\nimport fastcore.all as fc\n\n\nclass BatchSampler:\n    def __init__(self, sampler, bs, drop_last=False):\n        fc.store_attr()\n    \n    def __iter__(self):\n        yield from fc.chunked(iter(self.sampler), self.bs, drop_last=self.drop_last)\n\n\nbatches = BatchSampler(ss, 5)\nlist(islice(iter(batches), 3))\n\n[[0, 1, 2, 3, 4], [5, 6, 7, 8, 9], [10, 11, 12, 13, 14]]\n\n\n\ndef collate(b):\n    xs, ys = zip(*b)\n    return torch.stack(xs), torch.stack(ys)\n\n\nclass DataLoader:\n    def __init__(self, ds, batchs, collate_fn=collate):\n        fc.store_attr()\n    \n    def __iter__(self):\n        yield from (self.collate_fn(self.ds[i] for i in b) for b in self.batchs)\n\n\ntrain_sampler = BatchSampler(Sampler(train_ds, shuffle=True), bs)\nvalid_sampler = BatchSampler(Sampler(valid_ds, shuffle=True), bs)\n\n\ntrain_dl = DataLoader(train_ds, train_sampler)\nvalid_dl = DataLoader(valid_ds, valid_sampler)\n\n\nxb, yb = next(iter(valid_dl))\nxb.shape, yb.shape\n\n(torch.Size([50, 784]), torch.Size([50]))\n\n\n\nplt.imshow(xb[0].view(28, 28));\nplt.axis('off');\n\n\n\n\n\n\n\n\n\nopt, model = get_model()\n\n\nfit()\n\nloss: 0.11, accuracy: 0.94\nloss: 0.27, accuracy: 0.96\nloss: 0.03, accuracy: 1.00\n\n\n\n# multiprocessing dataloader\n\nimport torch.multiprocessing as mp\n\n\nclass DataLoader:\n    def __init__(self, ds, batchs, collate_fn=collate, num_workers=1):\n        fc.store_attr()\n    \n    def __iter__(self):\n        with mp.Pool(self.num_workers) as ex:\n            yield from ex.map(self.ds.__getitem__, iter(self.batchs))\n\n\ntrain_dl = DataLoader(train_ds, batchs=train_sampler)\n\n\nxb, yb = next(iter(train_dl))\nplt.imshow(xb[0].view(28, 28));\nplt.axis('off');\n\n\n\n\n\n\n\n\n# pytorch dataloaders\n\nt = RandomSampler(train_ds)\n\n\nnext(iter(t))\n\n24797\n\n\n\nt = BatchSampler(train_ds, batch_size=2, drop_last=False)\n\nk  = next(iter(t))\nprint(len(k))\nfor ele in k:\n    print(ele[0].shape, ele[1])\n\n2\ntorch.Size([784]) tensor(5)\ntorch.Size([784]) tensor(0)\n\n\n\nt = BatchSampler(RandomSampler(train_ds), batch_size=2, drop_last=False)\n\nk  = next(iter(t))\nprint(len(k))\nfor ele in k:\n    print(ele)\n\n2\n33683\n36592\n\n\n\ntrain_samp = BatchSampler(RandomSampler(train_ds), bs, drop_last=False)\nvalid_samp = BatchSampler(RandomSampler(valid_ds), bs, drop_last=False)\n\n\ntrain_dl = DataLoader(train_ds, batch_sampler=train_samp, collate_fn=collate)\nvalid_dl = DataLoader(valid_ds, batch_sampler=valid_samp, collate_fn=collate)\n\n\nopt, model = get_model()\nfit()\n\nloss: 0.20, accuracy: 0.94\nloss: 0.11, accuracy: 0.98\nloss: 0.20, accuracy: 0.98\n\n\n\ntrain_dl = DataLoader(train_ds, bs, shuffle=True, num_workers=2, drop_last=True)\nvalid_dl = DataLoader(valid_ds, bs, shuffle=False, num_workers=2)\n\n\nopt, model = get_model()\nfit()\n\nloss: 0.08, accuracy: 0.98\nloss: 0.31, accuracy: 0.86\nloss: 0.11, accuracy: 0.98\n\n\n\n# validation\n\n\n\nfit\n\n fit (epochs, model, loss_func, opt, train_dl, valid_dl)\n\n\n\n\nget_dls\n\n get_dls (train_ds, valid_ds, bs, **kwargs)\n\n\ntrain_dl, valid_dl = get_dls(train_ds, valid_ds, bs)\nopt, model = get_model()\n\n\n\n\n0 0.1775239165313542 0.948100003004074\n1 0.1179210783354938 0.9646000063419342\n2 0.11550588405691087 0.9665000039339066\n3 0.10593999677803367 0.9698000079393387\n4 0.10098711441038176 0.9727000087499619\nCPU times: user 17.8 s, sys: 16.1 s, total: 33.8 s\nWall time: 4.71 s"
   }
 ]
\ No newline at end of file
diff --git a/_proc/_docs/sitemap.xml b/_proc/_docs/sitemap.xml
index b6ac0702..6a4285d9 100644
--- a/_proc/_docs/sitemap.xml
+++ b/_proc/_docs/sitemap.xml
@@ -10,6 +10,6 @@
   
   
     https://arun477.github.io/practice_deep_learning/mini_batch_training.html
-    2024-02-19T15:03:49.698Z
+    2024-02-19T15:14:43.526Z
   
 
diff --git a/nbs/.ipynb_checkpoints/04_mini_batch_training-checkpoint.ipynb b/nbs/.ipynb_checkpoints/04_mini_batch_training-checkpoint.ipynb
index 14e5e454..fade6c19 100644
--- a/nbs/.ipynb_checkpoints/04_mini_batch_training-checkpoint.ipynb
+++ b/nbs/.ipynb_checkpoints/04_mini_batch_training-checkpoint.ipynb
@@ -489,9 +489,19 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "def accuracy(out, yb):\n",
-    "    return (out.argmax(1)==yb).float().mean()\n",
+    "#|export\n",
     "\n",
+    "def accuracy(out, yb):\n",
+    "    return (out.argmax(1)==yb).float().mean()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "dcca7a8d",
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "loss_func = F.cross_entropy"
    ]
   },
@@ -570,6 +580,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "#|export\n",
+    "\n",
     "def report(loss, preds, yb):\n",
     "    print(f\"loss: {loss:.2f}, accuracy: {accuracy(preds, yb):.2f}\")"
    ]
@@ -2029,7 +2041,7 @@
    "source": [
     "#|export\n",
     "\n",
-    "def fit(epochs, model, loss_func, opt, train_dl, valid_ld):\n",
+    "def fit(epochs, model, loss_func, opt, train_dl, valid_dl):\n",
     "    for epoch in range(epochs):\n",
     "        model.train()\n",
     "        for xb, yb in train_dl:\n",
diff --git a/nbs/04_mini_batch_training.ipynb b/nbs/04_mini_batch_training.ipynb
index 14e5e454..fade6c19 100644
--- a/nbs/04_mini_batch_training.ipynb
+++ b/nbs/04_mini_batch_training.ipynb
@@ -489,9 +489,19 @@
    "metadata": {},
    "outputs": [],
    "source": [
-    "def accuracy(out, yb):\n",
-    "    return (out.argmax(1)==yb).float().mean()\n",
+    "#|export\n",
     "\n",
+    "def accuracy(out, yb):\n",
+    "    return (out.argmax(1)==yb).float().mean()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "dcca7a8d",
+   "metadata": {},
+   "outputs": [],
+   "source": [
     "loss_func = F.cross_entropy"
    ]
   },
@@ -570,6 +580,8 @@
    "metadata": {},
    "outputs": [],
    "source": [
+    "#|export\n",
+    "\n",
     "def report(loss, preds, yb):\n",
     "    print(f\"loss: {loss:.2f}, accuracy: {accuracy(preds, yb):.2f}\")"
    ]
@@ -2029,7 +2041,7 @@
    "source": [
     "#|export\n",
     "\n",
-    "def fit(epochs, model, loss_func, opt, train_dl, valid_ld):\n",
+    "def fit(epochs, model, loss_func, opt, train_dl, valid_dl):\n",
     "    for epoch in range(epochs):\n",
     "        model.train()\n",
     "        for xb, yb in train_dl:\n",
diff --git a/practice_deep_learning/__pycache__/__init__.cpython-39.pyc b/practice_deep_learning/__pycache__/__init__.cpython-39.pyc
index 2724c842..27306e92 100644
Binary files a/practice_deep_learning/__pycache__/__init__.cpython-39.pyc and b/practice_deep_learning/__pycache__/__init__.cpython-39.pyc differ
diff --git a/practice_deep_learning/__pycache__/training.cpython-39.pyc b/practice_deep_learning/__pycache__/training.cpython-39.pyc
new file mode 100644
index 00000000..d40ca636
Binary files /dev/null and b/practice_deep_learning/__pycache__/training.cpython-39.pyc differ
diff --git a/practice_deep_learning/_modidx.py b/practice_deep_learning/_modidx.py
index a18489cf..112ad008 100644
--- a/practice_deep_learning/_modidx.py
+++ b/practice_deep_learning/_modidx.py
@@ -13,7 +13,11 @@
                                                                                                        'practice_deep_learning/training.py'),
                                                  'practice_deep_learning.training.Dataset.__len__': ( 'mini_batch_training.html#dataset.__len__',
                                                                                                       'practice_deep_learning/training.py'),
+                                                 'practice_deep_learning.training.accuracy': ( 'mini_batch_training.html#accuracy',
+                                                                                               'practice_deep_learning/training.py'),
                                                  'practice_deep_learning.training.fit': ( 'mini_batch_training.html#fit',
                                                                                           'practice_deep_learning/training.py'),
                                                  'practice_deep_learning.training.get_dls': ( 'mini_batch_training.html#get_dls',
-                                                                                              'practice_deep_learning/training.py')}}}
+                                                                                              'practice_deep_learning/training.py'),
+                                                 'practice_deep_learning.training.report': ( 'mini_batch_training.html#report',
+                                                                                             'practice_deep_learning/training.py')}}}
diff --git a/practice_deep_learning/training.py b/practice_deep_learning/training.py
index 1f4024d4..fbda8698 100644
--- a/practice_deep_learning/training.py
+++ b/practice_deep_learning/training.py
@@ -1,7 +1,7 @@
 # AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/04_mini_batch_training.ipynb.
 
 # %% auto 0
-__all__ = ['Dataset', 'fit', 'get_dls']
+__all__ = ['accuracy', 'report', 'Dataset', 'fit', 'get_dls']
 
 # %% ../nbs/04_mini_batch_training.ipynb 1
 import torch, torch.nn as nn
@@ -9,7 +9,15 @@
 from pathlib import Path
 import gzip, pickle, matplotlib.pyplot as plt
 
-# %% ../nbs/04_mini_batch_training.ipynb 74
+# %% ../nbs/04_mini_batch_training.ipynb 29
+def accuracy(out, yb):
+    return (out.argmax(1)==yb).float().mean()
+
+# %% ../nbs/04_mini_batch_training.ipynb 34
+def report(loss, preds, yb):
+    print(f"loss: {loss:.2f}, accuracy: {accuracy(preds, yb):.2f}")
+
+# %% ../nbs/04_mini_batch_training.ipynb 75
 class Dataset:
     def __init__(self, x, y):
         self.x, self.y = x, y
@@ -20,11 +28,11 @@ def __len__(self):
     def __getitem__(self, i):
         return self.x[i], self.y[i]
 
-# %% ../nbs/04_mini_batch_training.ipynb 108
+# %% ../nbs/04_mini_batch_training.ipynb 109
 from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, BatchSampler
 
-# %% ../nbs/04_mini_batch_training.ipynb 119
-def fit(epochs, model, loss_func, opt, train_dl, valid_ld):
+# %% ../nbs/04_mini_batch_training.ipynb 120
+def fit(epochs, model, loss_func, opt, train_dl, valid_dl):
     for epoch in range(epochs):
         model.train()
         for xb, yb in train_dl:
@@ -47,7 +55,7 @@ def fit(epochs, model, loss_func, opt, train_dl, valid_ld):
     
     return total_loss/count, total_acc/count           
 
-# %% ../nbs/04_mini_batch_training.ipynb 120
+# %% ../nbs/04_mini_batch_training.ipynb 121
 def get_dls(train_ds, valid_ds, bs, **kwargs):
     return (
         DataLoader(train_ds, bs, shuffle=True, **kwargs),