Skip to content

Commit

Permalink
Changes for neptune-scale==0.7.0 (#4)
Browse files Browse the repository at this point in the history
  • Loading branch information
SiddhantSadangi authored Oct 28, 2024
1 parent ef239c0 commit 9c99c26
Show file tree
Hide file tree
Showing 3 changed files with 71 additions and 89 deletions.
25 changes: 8 additions & 17 deletions how-to-guides/hpo/notebooks/Neptune_HPO.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@
"metadata": {},
"outputs": [],
"source": [
"learning_rates = [0.05, 0.1, 0.5] # learning rate choices"
"learning_rates = [0.025, 0.05, 0.075] # learning rate choices"
]
},
{
Expand Down Expand Up @@ -256,10 +256,7 @@
"source": [
"from random import random\n",
"\n",
"run = Run(\n",
" family=\"hpo\",\n",
" run_id=f\"hpo-{random()}\",\n",
")\n",
"run = Run(run_id=f\"hpo-{random()}\")\n",
"\n",
"run.add_tags([\"all-trials\", \"notebook\"])"
]
Expand Down Expand Up @@ -314,7 +311,7 @@
" step = 0\n",
"\n",
" for epoch in trange(parameters[\"epochs\"], desc=f\"Trial {trial} - lr: {lr}\"):\n",
" run.log_metrics(step=epoch, data={f\"trials/{trial}/epochs\": epoch})\n",
" run.log_metrics(data={f\"trials/{trial}/epochs\": epoch}, step=epoch)\n",
"\n",
" for x, y in trainloader:\n",
" x, y = x.to(parameters[\"device\"]), y.to(parameters[\"device\"])\n",
Expand All @@ -328,11 +325,11 @@
"\n",
" # Log trial metrics\n",
" run.log_metrics(\n",
" step=step,\n",
" data={\n",
" f\"trials/{trial}/metrics/batch/loss\": float(loss),\n",
" f\"trials/{trial}/metrics/batch/acc\": float(acc),\n",
" },\n",
" step=step,\n",
" )\n",
"\n",
" # Log best values across all trials\n",
Expand Down Expand Up @@ -445,10 +442,7 @@
"metadata": {},
"outputs": [],
"source": [
"sweep_run = Run(\n",
" family=f\"sweep-{sweep_id}\",\n",
" run_id=f\"sweep-{sweep_id}\",\n",
")\n",
"sweep_run = Run(run_id=f\"sweep-{sweep_id}\")\n",
"\n",
"sweep_run.add_tags([\"sweep\", \"notebook\"])"
]
Expand Down Expand Up @@ -512,10 +506,7 @@
" desc=\"Trials\",\n",
"):\n",
" # Create a trial-level run\n",
" with Run(\n",
" family=f\"sweep-{sweep_id}\",\n",
" run_id=f\"trial-{sweep_id}-{trial}\",\n",
" ) as trial_run:\n",
" with Run(run_id=f\"trial-{sweep_id}-{trial}\") as trial_run:\n",
" trial_run.add_tags([\"trial\", \"notebook\"])\n",
"\n",
" # Add sweep_id to the trial-level run\n",
Expand All @@ -529,7 +520,7 @@
" step = 0\n",
"\n",
" for epoch in trange(parameters[\"epochs\"], desc=f\"Trial {trial} - lr: {lr}\"):\n",
" trial_run.log_metrics(step=epoch, data={\"epochs\": epoch})\n",
" trial_run.log_metrics(data={\"epochs\": epoch}, step=epoch)\n",
"\n",
" for x, y in trainloader:\n",
" x, y = x.to(parameters[\"device\"]), y.to(parameters[\"device\"])\n",
Expand All @@ -543,11 +534,11 @@
"\n",
" # Log trial metrics\n",
" trial_run.log_metrics(\n",
" step=step,\n",
" data={\n",
" \"metrics/batch/loss\": float(loss),\n",
" \"metrics/batch/acc\": float(acc),\n",
" },\n",
" step=step,\n",
" )\n",
"\n",
" # Log best values across all trials to Sweep-level run\n",
Expand Down
70 changes: 32 additions & 38 deletions how-to-guides/hpo/scripts/neptune_hpo_separate_runs.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

input_size = math.prod(parameters["input_size"])

learning_rates = [0.075, 0.1, 0.25] # learning rate choices
learning_rates = [0.025, 0.05, 0.075] # learning rate choices


class BaseModel(nn.Module):
Expand All @@ -42,42 +42,39 @@ def forward(self, x):
return x


if __name__ == "__main__":
model = BaseModel(
input_size,
parameters["n_classes"],
).to(parameters["device"])

criterion = nn.CrossEntropyLoss()

data_tfms = {
"train": transforms.Compose(
[
transforms.ToTensor(),
]
)
}

trainset = datasets.MNIST(
root="mnist",
train=True,
download=True,
transform=data_tfms["train"],
)
model = BaseModel(
input_size,
parameters["n_classes"],
).to(parameters["device"])

criterion = nn.CrossEntropyLoss()

trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=parameters["batch_size"],
shuffle=True,
num_workers=0,
data_tfms = {
"train": transforms.Compose(
[
transforms.ToTensor(),
]
)
}

trainset = datasets.MNIST(
root="mnist",
train=True,
download=True,
transform=data_tfms["train"],
)

trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=parameters["batch_size"],
shuffle=True,
num_workers=0,
)

if __name__ == "__main__":
sweep_id = str(uuid.uuid4())

sweep_run = Run(
family=f"sweep-{sweep_id}",
run_id=f"sweep-{sweep_id}",
)
sweep_run = Run(run_id=f"sweep-{sweep_id}")

sweep_run.add_tags(["sweep", "script"])
sweep_run.add_tags([sweep_id], group_tags=True)
Expand All @@ -97,10 +94,7 @@ def forward(self, x):
desc="Trials",
):
# Create a trial-level run
with Run(
family=f"sweep-{sweep_id}",
run_id=f"trial-{sweep_id}-{trial}",
) as trial_run:
with Run(run_id=f"trial-{sweep_id}-{trial}") as trial_run:
trial_run.add_tags(["trial", "script"])

# Add sweep_id to the trial-level run
Expand All @@ -114,7 +108,7 @@ def forward(self, x):
step = 0

for epoch in trange(parameters["epochs"], desc=f"Trial {trial} - lr: {lr}"):
trial_run.log_metrics(step=epoch, data={"epochs": epoch})
trial_run.log_metrics(data={"epochs": epoch}, step=epoch)

for x, y in trainloader:
x, y = x.to(parameters["device"]), y.to(parameters["device"])
Expand All @@ -128,11 +122,11 @@ def forward(self, x):

# Log trial metrics
trial_run.log_metrics(
step=step,
data={
"metrics/batch/loss": float(loss),
"metrics/batch/acc": float(acc),
},
step=step,
)

# Log best values across all trials to Sweep-level run
Expand Down
65 changes: 31 additions & 34 deletions how-to-guides/hpo/scripts/neptune_hpo_single_run.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@

input_size = math.prod(parameters["input_size"])

learning_rates = [0.05, 0.075, 0.1] # learning rate choices
learning_rates = [0.025, 0.05, 0.075] # learning rate choices


class BaseModel(nn.Module):
Expand All @@ -42,40 +42,37 @@ def forward(self, x):
return x


if __name__ == "__main__":
model = BaseModel(
input_size,
parameters["n_classes"],
).to(parameters["device"])

criterion = nn.CrossEntropyLoss()

data_tfms = {
"train": transforms.Compose(
[
transforms.ToTensor(),
]
)
}

trainset = datasets.MNIST(
root="mnist",
train=True,
download=True,
transform=data_tfms["train"],
)
model = BaseModel(
input_size,
parameters["n_classes"],
).to(parameters["device"])

trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=parameters["batch_size"],
shuffle=True,
num_workers=0,
)
criterion = nn.CrossEntropyLoss()

run = Run(
family="hpo",
run_id=f"hpo-{random()}",
data_tfms = {
"train": transforms.Compose(
[
transforms.ToTensor(),
]
)
}

trainset = datasets.MNIST(
root="mnist",
train=True,
download=True,
transform=data_tfms["train"],
)

trainloader = torch.utils.data.DataLoader(
trainset,
batch_size=parameters["batch_size"],
shuffle=True,
num_workers=0,
)

if __name__ == "__main__":
run = Run(run_id=f"hpo-{random()}")

run.add_tags(["all-trials", "script"])

Expand All @@ -101,7 +98,7 @@ def forward(self, x):
step = 0

for epoch in trange(parameters["epochs"], desc=f"Trial {trial} - lr: {lr}"):
run.log_metrics(step=epoch, data={f"trials/{trial}/epochs": epoch})
run.log_metrics(data={f"trials/{trial}/epochs": epoch}, step=epoch)

for x, y in trainloader:
x, y = x.to(parameters["device"]), y.to(parameters["device"])
Expand All @@ -115,11 +112,11 @@ def forward(self, x):

# Log trial metrics
run.log_metrics(
step=step,
data={
f"trials/{trial}/metrics/batch/loss": float(loss),
f"trials/{trial}/metrics/batch/acc": float(acc),
},
step=step,
)

# Log best values across all trials
Expand Down

0 comments on commit 9c99c26

Please sign in to comment.