Skip to content

Commit

Permalink
modified notebook
Browse files Browse the repository at this point in the history
  • Loading branch information
arun477 committed Feb 19, 2024
1 parent 2c45a7d commit 2355642
Show file tree
Hide file tree
Showing 14 changed files with 4,147 additions and 3,901 deletions.
3,642 changes: 1,859 additions & 1,783 deletions .ipynb_checkpoints/07_convolutions-checkpoint.ipynb

Large diffs are not rendered by default.

3,641 changes: 1,858 additions & 1,783 deletions 07_convolutions.ipynb

Large diffs are not rendered by default.

90 changes: 70 additions & 20 deletions _proc/04_mini_batch_training.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -499,18 +499,48 @@
"```"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"---\n",
"\n",
"### accuracy\n",
"\n",
"> accuracy (out, yb)"
],
"text/plain": [
"---\n",
"\n",
"### accuracy\n",
"\n",
"> accuracy (out, yb)"
]
},
"execution_count": 1,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#| echo: false\n",
"#| output: asis\n",
"show_doc(accuracy)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "517b1844",
"id": "dcca7a8d",
"metadata": {
"language": "python"
},
"outputs": [],
"source": [
"def accuracy(out, yb):\n",
" return (out.argmax(1)==yb).float().mean()\n",
"\n",
"loss_func = F.cross_entropy"
]
},
Expand Down Expand Up @@ -590,15 +620,35 @@
},
{
"cell_type": "code",
"execution_count": null,
"id": "ee7b70dc",
"metadata": {
"language": "python"
},
"outputs": [],
"execution_count": 2,
"metadata": {},
"outputs": [
{
"data": {
"text/markdown": [
"---\n",
"\n",
"### report\n",
"\n",
"> report (loss, preds, yb)"
],
"text/plain": [
"---\n",
"\n",
"### report\n",
"\n",
"> report (loss, preds, yb)"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"def report(loss, preds, yb):\n",
" print(f\"loss: {loss:.2f}, accuracy: {accuracy(preds, yb):.2f}\")"
"#| echo: false\n",
"#| output: asis\n",
"show_doc(report)"
]
},
{
Expand Down Expand Up @@ -1422,7 +1472,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 3,
"metadata": {},
"outputs": [
{
Expand All @@ -1446,7 +1496,7 @@
"Initialize self. See help(type(self)) for accurate signature."
]
},
"execution_count": 1,
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
Expand Down Expand Up @@ -2196,7 +2246,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": 4,
"metadata": {},
"outputs": [
{
Expand All @@ -2206,17 +2256,17 @@
"\n",
"### fit\n",
"\n",
"> fit (epochs, model, loss_func, opt, train_dl, valid_ld)"
"> fit (epochs, model, loss_func, opt, train_dl, valid_dl)"
],
"text/plain": [
"---\n",
"\n",
"### fit\n",
"\n",
"> fit (epochs, model, loss_func, opt, train_dl, valid_ld)"
"> fit (epochs, model, loss_func, opt, train_dl, valid_dl)"
]
},
"execution_count": 2,
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
Expand All @@ -2229,7 +2279,7 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": 5,
"metadata": {},
"outputs": [
{
Expand All @@ -2249,7 +2299,7 @@
"> get_dls (train_ds, valid_ds, bs, **kwargs)"
]
},
"execution_count": 3,
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
Expand Down
609 changes: 309 additions & 300 deletions _proc/_docs/mini_batch_training.html

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion _proc/_docs/search.json

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion _proc/_docs/sitemap.xml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,6 @@
</url>
<url>
<loc>https://arun477.github.io/practice_deep_learning/mini_batch_training.html</loc>
<lastmod>2024-02-19T15:03:49.698Z</lastmod>
<lastmod>2024-02-19T15:14:43.526Z</lastmod>
</url>
</urlset>
18 changes: 15 additions & 3 deletions nbs/.ipynb_checkpoints/04_mini_batch_training-checkpoint.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -489,9 +489,19 @@
"metadata": {},
"outputs": [],
"source": [
"def accuracy(out, yb):\n",
" return (out.argmax(1)==yb).float().mean()\n",
"#|export\n",
"\n",
"def accuracy(out, yb):\n",
" return (out.argmax(1)==yb).float().mean()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dcca7a8d",
"metadata": {},
"outputs": [],
"source": [
"loss_func = F.cross_entropy"
]
},
Expand Down Expand Up @@ -570,6 +580,8 @@
"metadata": {},
"outputs": [],
"source": [
"#|export\n",
"\n",
"def report(loss, preds, yb):\n",
" print(f\"loss: {loss:.2f}, accuracy: {accuracy(preds, yb):.2f}\")"
]
Expand Down Expand Up @@ -2029,7 +2041,7 @@
"source": [
"#|export\n",
"\n",
"def fit(epochs, model, loss_func, opt, train_dl, valid_ld):\n",
"def fit(epochs, model, loss_func, opt, train_dl, valid_dl):\n",
" for epoch in range(epochs):\n",
" model.train()\n",
" for xb, yb in train_dl:\n",
Expand Down
18 changes: 15 additions & 3 deletions nbs/04_mini_batch_training.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -489,9 +489,19 @@
"metadata": {},
"outputs": [],
"source": [
"def accuracy(out, yb):\n",
" return (out.argmax(1)==yb).float().mean()\n",
"#|export\n",
"\n",
"def accuracy(out, yb):\n",
" return (out.argmax(1)==yb).float().mean()"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "dcca7a8d",
"metadata": {},
"outputs": [],
"source": [
"loss_func = F.cross_entropy"
]
},
Expand Down Expand Up @@ -570,6 +580,8 @@
"metadata": {},
"outputs": [],
"source": [
"#|export\n",
"\n",
"def report(loss, preds, yb):\n",
" print(f\"loss: {loss:.2f}, accuracy: {accuracy(preds, yb):.2f}\")"
]
Expand Down Expand Up @@ -2029,7 +2041,7 @@
"source": [
"#|export\n",
"\n",
"def fit(epochs, model, loss_func, opt, train_dl, valid_ld):\n",
"def fit(epochs, model, loss_func, opt, train_dl, valid_dl):\n",
" for epoch in range(epochs):\n",
" model.train()\n",
" for xb, yb in train_dl:\n",
Expand Down
Binary file modified practice_deep_learning/__pycache__/__init__.cpython-39.pyc
Binary file not shown.
Binary file not shown.
6 changes: 5 additions & 1 deletion practice_deep_learning/_modidx.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,11 @@
'practice_deep_learning/training.py'),
'practice_deep_learning.training.Dataset.__len__': ( 'mini_batch_training.html#dataset.__len__',
'practice_deep_learning/training.py'),
'practice_deep_learning.training.accuracy': ( 'mini_batch_training.html#accuracy',
'practice_deep_learning/training.py'),
'practice_deep_learning.training.fit': ( 'mini_batch_training.html#fit',
'practice_deep_learning/training.py'),
'practice_deep_learning.training.get_dls': ( 'mini_batch_training.html#get_dls',
'practice_deep_learning/training.py')}}}
'practice_deep_learning/training.py'),
'practice_deep_learning.training.report': ( 'mini_batch_training.html#report',
'practice_deep_learning/training.py')}}}
20 changes: 14 additions & 6 deletions practice_deep_learning/training.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,23 @@
# AUTOGENERATED! DO NOT EDIT! File to edit: ../nbs/04_mini_batch_training.ipynb.

# %% auto 0
__all__ = ['Dataset', 'fit', 'get_dls']
__all__ = ['accuracy', 'report', 'Dataset', 'fit', 'get_dls']

# %% ../nbs/04_mini_batch_training.ipynb 1
import torch, torch.nn as nn
import torch.nn.functional as F
from pathlib import Path
import gzip, pickle, matplotlib.pyplot as plt

# %% ../nbs/04_mini_batch_training.ipynb 74
# %% ../nbs/04_mini_batch_training.ipynb 29
def accuracy(out, yb):
return (out.argmax(1)==yb).float().mean()

# %% ../nbs/04_mini_batch_training.ipynb 34
def report(loss, preds, yb):
print(f"loss: {loss:.2f}, accuracy: {accuracy(preds, yb):.2f}")

# %% ../nbs/04_mini_batch_training.ipynb 75
class Dataset:
def __init__(self, x, y):
self.x, self.y = x, y
Expand All @@ -20,11 +28,11 @@ def __len__(self):
def __getitem__(self, i):
return self.x[i], self.y[i]

# %% ../nbs/04_mini_batch_training.ipynb 108
# %% ../nbs/04_mini_batch_training.ipynb 109
from torch.utils.data import DataLoader, SequentialSampler, RandomSampler, BatchSampler

# %% ../nbs/04_mini_batch_training.ipynb 119
def fit(epochs, model, loss_func, opt, train_dl, valid_ld):
# %% ../nbs/04_mini_batch_training.ipynb 120
def fit(epochs, model, loss_func, opt, train_dl, valid_dl):
for epoch in range(epochs):
model.train()
for xb, yb in train_dl:
Expand All @@ -47,7 +55,7 @@ def fit(epochs, model, loss_func, opt, train_dl, valid_ld):

return total_loss/count, total_acc/count

# %% ../nbs/04_mini_batch_training.ipynb 120
# %% ../nbs/04_mini_batch_training.ipynb 121
def get_dls(train_ds, valid_ds, bs, **kwargs):
return (
DataLoader(train_ds, bs, shuffle=True, **kwargs),
Expand Down

0 comments on commit 2355642

Please sign in to comment.