Skip to content

Commit

Permalink
Reformat examples
Browse files Browse the repository at this point in the history
  • Loading branch information
adam-narozniak committed Apr 3, 2024
1 parent ea80765 commit e429a70
Show file tree
Hide file tree
Showing 8 changed files with 17 additions and 15 deletions.
1 change: 0 additions & 1 deletion examples/app-pytorch/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@

# Define FlowerClient and client_fn
class FlowerClient(NumPyClient):

def fit(self, parameters, config):
set_weights(net, parameters)
results = train(net, trainloader, testloader, epochs=1, device=DEVICE)
Expand Down
1 change: 0 additions & 1 deletion examples/custom-mods/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,6 @@ def wandb_mod(msg: Message, context: Context, app: ClientAppCallable) -> Message

# if the `ClientApp` just processed a "fit" message, let's log some metrics to W&B
if reply.metadata.message_type == MessageType.TRAIN and reply.has_content():

metrics = reply.content.configs_records

results_to_log = dict(metrics.get("fitres.metrics", ConfigsRecord()))
Expand Down
3 changes: 3 additions & 0 deletions examples/quickstart-mlcube/dev/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ def create_directory(path: str) -> None:

def download(task_args: List[str]) -> None:
"""Task: download.
Input parameters:
--data_dir
"""
Expand Down Expand Up @@ -81,6 +82,7 @@ def download(task_args: List[str]) -> None:

def train(task_args: List[str]) -> None:
"""Task: train.
Input parameters:
--data_dir, --log_dir, --model_dir, --parameters_file
"""
Expand Down Expand Up @@ -175,6 +177,7 @@ def train(task_args: List[str]) -> None:

def evaluate(task_args: List[str]) -> None:
"""Task: train.
Input parameters:
--data_dir, --log_dir, --model_dir, --parameters_file
"""
Expand Down
4 changes: 3 additions & 1 deletion examples/quickstart-pytorch-lightning/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,9 @@ def load_data(partition):
# 20 % for on federated evaluation
partition_full = partition.train_test_split(test_size=0.2, seed=42)
# 60 % for the federated train and 20 % for the federated validation (both in fit)
partition_train_valid = partition_full["train"].train_test_split(train_size=0.75, seed=42)
partition_train_valid = partition_full["train"].train_test_split(
train_size=0.75, seed=42
)
trainloader = DataLoader(
partition_train_valid["train"],
batch_size=32,
Expand Down
2 changes: 0 additions & 2 deletions examples/vit-finetune/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,9 +8,7 @@


class FedViTClient(NumPyClient):

def __init__(self, trainset):

self.trainset = trainset
self.model = get_model()

Expand Down
1 change: 0 additions & 1 deletion examples/vit-finetune/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@


def main():

args = parser.parse_args()

# To control the degree of parallelism
Expand Down
16 changes: 8 additions & 8 deletions examples/whisper-federated-finetuning/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,10 +107,10 @@ def prepare_silences_dataset(train_dataset, ratio_silence: float = 0.1) -> Datas
"""Generate silences for the train set.
One of the classes in the SpeechCommands datatset is `silence`. However, the dataset
does not include clips of silence. It does however include 5 long files with different
background sounds. The taks of this function is to extract several (defined by `ratio_silence`)
one-second long clips from those background audio files. Later, those audio clips will be
included into the training set.
does not include clips of silence. It does however include 5 long files with
different background sounds. The taks of this function is to extract several
(defined by `ratio_silence`) one-second long clips from those background audio
files. Later, those audio clips will be included into the training set.
"""
# retrieve original silence audio clips
silences = [d for d in train_dataset if d["label"] == 35]
Expand Down Expand Up @@ -138,9 +138,9 @@ def prepare_silences_dataset(train_dataset, ratio_silence: float = 0.1) -> Datas
def construct_client_mapping(full_trainset, num_clients: int = 100):
"""Create a mapping to partition the dataset into `num_client` buckets.
These buckets contain the same number of `spekaer_id` but likely different
number of training exampes since each `speaker_id` in SpeechCommands does
provide different amounts of data to the dataset.
These buckets contain the same number of `spekaer_id` but likely different number of
training exampes since each `speaker_id` in SpeechCommands does provide different
amounts of data to the dataset.
"""
client_ids = list(set(full_trainset["speaker_id"]))
client_ids.remove(
Expand Down Expand Up @@ -191,7 +191,7 @@ def set_params(model: torch.nn.ModuleList, params: List[fl.common.NDArrays]):


def get_model(device, num_classes, compile: bool = True):
"""Create model: Whisper-tiny Encoder + classification head"""
"""Create model: Whisper-tiny Encoder + classification head."""
encoder = WhisperForConditionalGeneration.from_pretrained(
"openai/whisper-tiny"
).get_encoder()
Expand Down
4 changes: 3 additions & 1 deletion examples/xgboost-comprehensive/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,9 @@ def instantiate_partitioner(partitioner_type: str, num_partitions: int):

def train_test_split(partition: Dataset, test_fraction: float, seed: int):
"""Split the data into train and validation set given split rate."""
train_test = partition.train_test_split(test_size=test_fraction, seed=seed, shuffle=False)
train_test = partition.train_test_split(
test_size=test_fraction, seed=seed, shuffle=False
)
partition_train = train_test["train"]
partition_test = train_test["test"]

Expand Down

0 comments on commit e429a70

Please sign in to comment.