Skip to content

Commit

Permalink
Merge branch 'main' into hide-members-records
Browse files Browse the repository at this point in the history
  • Loading branch information
panh99 authored Apr 8, 2024
2 parents a1a0bb8 + 2f8f9e5 commit 5ae36d2
Show file tree
Hide file tree
Showing 35 changed files with 96 additions and 66 deletions.
2 changes: 1 addition & 1 deletion datasets/doc/source/how-to-use-with-pytorch.rst
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ expected by a model with a convolutional layer.

If you want to divide the dataset, you can use (at any point before passing the dataset to the DataLoader)::

partition_train_test = partition.train_test_split(test_size=0.2)
partition_train_test = partition.train_test_split(test_size=0.2, seed=42)
partition_train = partition_train_test["train"]
partition_test = partition_train_test["test"]

Expand Down
2 changes: 1 addition & 1 deletion datasets/e2e/pytorch/pytorch_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def _create_trainloader(self, batch_size: int) -> DataLoader:
partition_id = 0
fds = FederatedDataset(dataset=self.dataset_name, partitioners={"train": 100})
partition = fds.load_partition(partition_id, "train")
partition_train_test = partition.train_test_split(test_size=0.2)
partition_train_test = partition.train_test_split(test_size=0.2, seed=42)
partition_train_test = partition_train_test.map(
lambda img: {"img": self.transforms(img)}, input_columns="img"
)
Expand Down
2 changes: 1 addition & 1 deletion datasets/e2e/scikit-learn/sklearn_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ def _get_partition_data(self):
fds = FederatedDataset(dataset=self.dataset_name, partitioners={"train": 10})
partition = fds.load_partition(partition_id, "train")
partition.set_format("numpy")
partition_train_test = partition.train_test_split(test_size=0.2)
partition_train_test = partition.train_test_split(test_size=0.2, seed=42)
X_train, y_train = partition_train_test["train"]["image"], partition_train_test[
"train"]["label"]
X_test, y_test = partition_train_test["test"]["image"], partition_train_test[
Expand Down
47 changes: 37 additions & 10 deletions dev/update-examples.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,20 +16,47 @@ echo "---" >> $INDEX
echo "maxdepth: 1" >> $INDEX
echo "---" >> $INDEX

rm -f "examples/doc/source/*.md"
rm -f examples/doc/source/*.md

cd examples/
for d in $(printf '%s\n' */ | sort -V); do
example=${d%/}
# For each example, copy the README into the source of the Example docs
[[ $example != doc ]] && cp $example/README.md $ROOT/examples/doc/source/$example.md 2>&1 >/dev/null
# For each example, copy all images of the _static folder into the examples
# docs static folder
[[ $example != doc ]] && [ -d "$example/_static" ] && {
cp $example/_static/**.{jpg,png,jpeg} $ROOT/examples/doc/source/_static/ 2>/dev/null || true
}
# For each example, insert the name of the example into the index file
[[ $example != doc ]] && (echo $INSERT_LINE; echo a; echo $example; echo .; echo wq) | ed $INDEX 2>&1 >/dev/null

if [[ $example != doc ]]; then

for file in $example/*.md; do
# For each example, copy the README into the source of the Example docs
if [[ $(basename "$file") = "README.md" ]]; then
cp $file $ROOT/examples/doc/source/$example.md 2>&1 >/dev/null
else
# If the example contains other markdown files, copy them to the source of the Example docs
cp $file $ROOT/examples/doc/source/$(basename "$file") 2>&1 >/dev/null
fi
done

gh_text="[<img src=\"_static/view-gh.png\" alt=\"View on GitHub\" width=\"200\"/>](https://github.com/adap/flower/blob/main/examples/$example)"
readme_file="$ROOT/examples/doc/source/$example.md"

if ! grep -Fq "$gh_text" "$readme_file"; then
awk -v text="$gh_text" '
/^# / && !found {
print $0 "\n" text;
found=1;
next;
}
{ print }
' "$readme_file" > tmpfile && mv tmpfile "$readme_file"
fi

# For each example, copy all images of the _static folder into the examples
# docs static folder
[ -d "$example/_static" ] && {
cp $example/_static/**.{jpg,png,jpeg} $ROOT/examples/doc/source/_static/ 2>/dev/null || true
}
# For each example, insert the name of the example into the index file
(echo $INSERT_LINE; echo a; echo $example; echo .; echo wq) | ed $INDEX 2>&1 >/dev/null

fi
done

echo "\`\`\`" >> $INDEX
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@
" for partition_id in range(NUM_CLIENTS):\n",
" partition = fds.load_partition(partition_id, \"train\")\n",
" partition = partition.with_transform(apply_transforms)\n",
" partition = partition.train_test_split(train_size=0.8)\n",
" partition = partition.train_test_split(train_size=0.8, seed=42)\n",
" trainloaders.append(DataLoader(partition[\"train\"], batch_size=BATCH_SIZE))\n",
" valloaders.append(DataLoader(partition[\"test\"], batch_size=BATCH_SIZE))\n",
" testset = fds.load_split(\"test\").with_transform(apply_transforms)\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/advanced-pytorch/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def fit(self, parameters, config):
batch_size: int = config["batch_size"]
epochs: int = config["local_epochs"]

train_valid = self.trainset.train_test_split(self.validation_split)
train_valid = self.trainset.train_test_split(self.validation_split, seed=42)
trainset = train_valid["train"]
valset = train_valid["test"]

Expand Down
2 changes: 1 addition & 1 deletion examples/advanced-pytorch/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def load_partition(partition_id, toy: bool = False):
fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10})
partition = fds.load_partition(partition_id)
# Divide data on each node: 80% train, 20% test
partition_train_test = partition.train_test_split(test_size=0.2)
partition_train_test = partition.train_test_split(test_size=0.2, seed=42)
partition_train_test = partition_train_test.with_transform(apply_transforms)
return partition_train_test["train"], partition_train_test["test"]

Expand Down
2 changes: 1 addition & 1 deletion examples/advanced-tensorflow/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def load_partition(idx: int):
partition.set_format("numpy")

# Divide data on each node: 80% train, 20% test
partition = partition.train_test_split(test_size=0.2)
partition = partition.train_test_split(test_size=0.2, seed=42)
x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"]
x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"]
return x_train, y_train, x_test, y_test
Expand Down
1 change: 0 additions & 1 deletion examples/app-pytorch/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

# Define FlowerClient and client_fn
class FlowerClient(NumPyClient):

def fit(self, parameters, config):
set_weights(net, parameters)
results = train(net, trainloader, testloader, epochs=1, device=DEVICE)
Expand Down
4 changes: 2 additions & 2 deletions examples/custom-mods/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ $ tree .
pip install -r requirements.txt
```

For [W&B](wandb.ai) you will also need a valid account.
For [W&B](https://wandb.ai) you will also need a valid account.

### Start the long-running Flower server (SuperLink)

Expand Down Expand Up @@ -328,7 +328,7 @@ flower-server-app server:app --insecure

### Check the results

For W&B, you will need to login to the [website](wandb.ai).
For W&B, you will need to login to the [website](https://wandb.ai).

For TensorBoard, you will need to run the following command in your terminal:

Expand Down
1 change: 0 additions & 1 deletion examples/custom-mods/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ def wandb_mod(msg: Message, context: Context, app: ClientAppCallable) -> Message

# if the `ClientApp` just processed a "fit" message, let's log some metrics to W&B
if reply.metadata.message_type == MessageType.TRAIN and reply.has_content():

metrics = reply.content.configs_records

results_to_log = dict(metrics.get("fitres.metrics", ConfigsRecord()))
Expand Down
1 change: 1 addition & 0 deletions examples/doc/source/_static/.gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,3 +3,4 @@
!favicon.ico
!flower-logo.png
!tmux_jtop_view.gif
!view-gh.png
Binary file added examples/doc/source/_static/view-gh.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion examples/embedded-devices/client_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,7 +108,7 @@ def apply_transforms(batch):
for partition_id in range(NUM_CLIENTS):
partition = fds.load_partition(partition_id, "train")
# Divide data on each node: 90% train, 10% test
partition = partition.train_test_split(test_size=0.1)
partition = partition.train_test_split(test_size=0.1, seed=42)
partition = partition.with_transform(apply_transforms)
trainsets.append(partition["train"])
validsets.append(partition["test"])
Expand Down
2 changes: 1 addition & 1 deletion examples/embedded-devices/client_tf.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ def prepare_dataset(use_mnist: bool):
partition = fds.load_partition(partition_id, "train")
partition.set_format("numpy")
# Divide data on each node: 90% train, 10% test
partition = partition.train_test_split(test_size=0.1)
partition = partition.train_test_split(test_size=0.1, seed=42)
x_train, y_train = (
partition["train"][img_key] / 255.0,
partition["train"]["label"],
Expand Down
2 changes: 1 addition & 1 deletion examples/fl-dp-sa/fl_dp_sa/task.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def load_data(partition_id):
fds = FederatedDataset(dataset="mnist", partitioners={"train": 100})
partition = fds.load_partition(partition_id)
# Divide data on each node: 80% train, 20% test
partition_train_test = partition.train_test_split(test_size=0.2)
partition_train_test = partition.train_test_split(test_size=0.2, seed=42)
pytorch_transforms = Compose([ToTensor(), Normalize((0.5,), (0.5,))])

def apply_transforms(batch):
Expand Down
2 changes: 1 addition & 1 deletion examples/flower-via-docker-compose/helpers/load_data.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ def load_data(data_sampling_percentage=0.5, client_id=1, total_clients=2):
partition.set_format("numpy")

# Divide data on each client: 80% train, 20% test
partition = partition.train_test_split(test_size=0.2)
partition = partition.train_test_split(test_size=0.2, seed=42)
x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"]
x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"]

Expand Down
2 changes: 1 addition & 1 deletion examples/pytorch-from-centralized-to-federated/cifar.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def load_data(partition_id: int):
fds = FederatedDataset(dataset="cifar10", partitioners={"train": 10})
partition = fds.load_partition(partition_id)
# Divide data on each node: 80% train, 20% test
partition_train_test = partition.train_test_split(test_size=0.2)
partition_train_test = partition.train_test_split(test_size=0.2, seed=42)
pytorch_transforms = Compose(
[ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
Expand Down
2 changes: 1 addition & 1 deletion examples/quickstart-huggingface/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@ def load_data(partition_id):
fds = FederatedDataset(dataset="imdb", partitioners={"train": 1_000})
partition = fds.load_partition(partition_id)
# Divide data: 80% train, 20% test
partition_train_test = partition.train_test_split(test_size=0.2)
partition_train_test = partition.train_test_split(test_size=0.2, seed=42)

tokenizer = AutoTokenizer.from_pretrained(CHECKPOINT)

Expand Down
3 changes: 3 additions & 0 deletions examples/quickstart-mlcube/dev/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def create_directory(path: str) -> None:

def download(task_args: List[str]) -> None:
"""Task: download.
Input parameters:
--data_dir
"""
Expand Down Expand Up @@ -81,6 +82,7 @@ def download(task_args: List[str]) -> None:

def train(task_args: List[str]) -> None:
"""Task: train.
Input parameters:
--data_dir, --log_dir, --model_dir, --parameters_file
"""
Expand Down Expand Up @@ -175,6 +177,7 @@ def train(task_args: List[str]) -> None:

def evaluate(task_args: List[str]) -> None:
"""Task: train.
Input parameters:
--data_dir, --log_dir, --model_dir, --parameters_file
"""
Expand Down
2 changes: 1 addition & 1 deletion examples/quickstart-mlx/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def evaluate(self, parameters, config):

fds = FederatedDataset(dataset="mnist", partitioners={"train": 3})
partition = fds.load_partition(partition_id=args.partition_id)
partition_splits = partition.train_test_split(test_size=0.2)
partition_splits = partition.train_test_split(test_size=0.2, seed=42)

partition_splits["train"].set_format("numpy")
partition_splits["test"].set_format("numpy")
Expand Down
6 changes: 4 additions & 2 deletions examples/quickstart-pytorch-lightning/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,9 +82,11 @@ def load_data(partition):

partition = partition.with_transform(apply_transforms)
# 20 % for on federated evaluation
partition_full = partition.train_test_split(test_size=0.2)
partition_full = partition.train_test_split(test_size=0.2, seed=42)
# 60 % for the federated train and 20 % for the federated validation (both in fit)
partition_train_valid = partition_full["train"].train_test_split(train_size=0.75)
partition_train_valid = partition_full["train"].train_test_split(
train_size=0.75, seed=42
)
trainloader = DataLoader(
partition_train_valid["train"],
batch_size=32,
Expand Down
2 changes: 1 addition & 1 deletion examples/quickstart-pytorch/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ def load_data(partition_id):
fds = FederatedDataset(dataset="cifar10", partitioners={"train": 3})
partition = fds.load_partition(partition_id)
# Divide data on each node: 80% train, 20% test
partition_train_test = partition.train_test_split(test_size=0.2)
partition_train_test = partition.train_test_split(test_size=0.2, seed=42)
pytorch_transforms = Compose(
[ToTensor(), Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]
)
Expand Down
2 changes: 1 addition & 1 deletion examples/quickstart-tensorflow/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@
partition.set_format("numpy")

# Divide data on each node: 80% train, 20% test
partition = partition.train_test_split(test_size=0.2)
partition = partition.train_test_split(test_size=0.2, seed=42)
x_train, y_train = partition["train"]["img"] / 255.0, partition["train"]["label"]
x_test, y_test = partition["test"]["img"] / 255.0, partition["test"]["label"]

Expand Down
2 changes: 1 addition & 1 deletion examples/simulation-pytorch/sim.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -497,7 +497,7 @@
" client_dataset = dataset.load_partition(int(cid), \"train\")\n",
"\n",
" # Now let's split it into train (90%) and validation (10%)\n",
" client_dataset_splits = client_dataset.train_test_split(test_size=0.1)\n",
" client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42)\n",
"\n",
" trainset = client_dataset_splits[\"train\"]\n",
" valset = client_dataset_splits[\"test\"]\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/simulation-pytorch/sim.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def client_fn(cid: str) -> fl.client.Client:
client_dataset = dataset.load_partition(int(cid), "train")

# Now let's split it into train (90%) and validation (10%)
client_dataset_splits = client_dataset.train_test_split(test_size=0.1)
client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42)

trainset = client_dataset_splits["train"]
valset = client_dataset_splits["test"]
Expand Down
2 changes: 1 addition & 1 deletion examples/simulation-tensorflow/sim.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -179,7 +179,7 @@
" client_dataset = dataset.load_partition(int(cid), \"train\")\n",
"\n",
" # Now let's split it into train (90%) and validation (10%)\n",
" client_dataset_splits = client_dataset.train_test_split(test_size=0.1)\n",
" client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42)\n",
"\n",
" trainset = client_dataset_splits[\"train\"].to_tf_dataset(\n",
" columns=\"image\", label_cols=\"label\", batch_size=32\n",
Expand Down
2 changes: 1 addition & 1 deletion examples/simulation-tensorflow/sim.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ def client_fn(cid: str) -> fl.client.Client:
client_dataset = dataset.load_partition(int(cid), "train")

# Now let's split it into train (90%) and validation (10%)
client_dataset_splits = client_dataset.train_test_split(test_size=0.1)
client_dataset_splits = client_dataset.train_test_split(test_size=0.1, seed=42)

trainset = client_dataset_splits["train"].to_tf_dataset(
columns="image", label_cols="label", batch_size=32
Expand Down
Loading

0 comments on commit 5ae36d2

Please sign in to comment.