Skip to content

Commit

Permalink
fix(metrics.py): fix the bug that cannot print forget_rate metrics
Browse files Browse the repository at this point in the history
Signed-off-by: Marchons <[email protected]>
  • Loading branch information
Yoda-wu committed Sep 26, 2024
1 parent 13a8bd8 commit 69c3ebf
Show file tree
Hide file tree
Showing 14 changed files with 77 additions and 49 deletions.
2 changes: 1 addition & 1 deletion core/storymanager/rank/rank.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def _sort_all_df(self, all_df, all_metric_names):

if metric_name not in all_metric_names:
continue

print(metric_name)
sort_metric_list.append(metric_name)
is_ascend_list.append(ele.get(metric_name) == "ascend")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,10 @@ class FederatedClassIncrementalLearning(FederatedLearning):
def __init__(self, workspace, **kwargs):
super().__init__(workspace, **kwargs)
self.incremental_rounds = kwargs.get("incremental_rounds", 1)
self.system_metric_info = {SystemMetricType.FORGET_RATE.value: []}
self.system_metric_info = {
SystemMetricType.FORGET_RATE.value: [],
SystemMetricType.TASK_AVG_ACC.value: {},
}

self.aggregate_clients = []
self.train_infos = []
Expand Down Expand Up @@ -266,6 +269,10 @@ def evaluation(self, testdataset_files, incremental_round):
[testdataset_files[index]["y"][data_index]], res
)
acc_list.append(acc)
if index == len(testdataset_files) - 1:
self.system_metric_info[SystemMetricType.TASK_AVG_ACC.value][
"accuracy"
] = np.mean(acc_list)
old_class_acc_list.extend(acc_list)
current_forget_rate = 0.0
max_acc_sum = 0
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ def run(self):
# init client wait for connection
# self.init_client()
self.init_client()
dataset_files = self._split_dataset(1) # only one split ——all the data
dataset_files = self.get_all_train_data()
train_dataset_file, _ = dataset_files[0]
train_datasets = self.train_data_partition(train_dataset_file)
for r in range(self.rounds):
Expand All @@ -102,6 +102,10 @@ def run(self):
test_res = self.predict(self.dataset.test_url)
return test_res, self.system_metric_info

def get_all_train_data(self):
split_time = 1 # only one split ——all the data
return self._split_dataset(split_time)

def _split_dataset(self, splitting_dataset_times=1):
"""spit the dataset using ianvs dataset.split dataset method
Expand Down
2 changes: 1 addition & 1 deletion core/testcasecontroller/metrics/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -148,7 +148,7 @@ def forget_rate_func(system_metric_info: dict):
compute task forget rate
"""
info = system_metric_info.get(SystemMetricType.FORGET_RATE.value)
forget_rate = np.mean(info['forget_rate'])
forget_rate = np.mean(info)
print(f"forget_rate: {forget_rate}")
return forget_rate

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ def get_train_loader(self):
# print('in get train loader' , exm_set[0].shape)
train_x = np.concatenate((train_x, exm_set[0]), axis=0)
label = np.array(exm_set[1])
label = label.reshape(-1, 1)
# label = label.reshape(-1, 1)
train_y = np.concatenate((train_y, label), axis=0)
logging.info(
f"train_x shape: {train_x.shape} and train_y shape: {train_y.shape}"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ algorithm:
- 0.001
- epochs:
values:
- 10
- 1
- type: "aggregation"
name: "FedAvg"
url: "./examples/cifar100/fci_ssl/fed_ci_match/algorithm/aggregation.py"
Expand Down
47 changes: 27 additions & 20 deletions examples/cifar100/fci_ssl/fed_ci_match/algorithm/basemodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,59 +12,66 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import os
import numpy as np
import keras
import tensorflow as tf
from sedna.common.class_factory import ClassType, ClassFactory
from model import resnet10
from FedCiMatch import FedCiMatch
import logging
import logging

os.environ['BACKEND_TYPE'] = 'KERAS'
os.environ["BACKEND_TYPE"] = "KERAS"
__all__ = ["BaseModel"]
logging.getLogger().setLevel(logging.INFO)

@ClassFactory.register(ClassType.GENERAL, alias='fci_ssl')

@ClassFactory.register(ClassType.GENERAL, alias="fci_ssl")
class BaseModel:
def __init__(self, **kwargs) -> None:
self.kwargs = kwargs
self.learning_rate = kwargs.get('learning_rate', 0.001)
self.epochs = kwargs.get('epochs', 1)
self.batch_size = kwargs.get('batch_size', 32)
self.task_size = kwargs.get('task_size', 10)
self.memory_size = kwargs.get('memory_size', 2000)
self.learning_rate = kwargs.get("learning_rate", 0.001)
self.epochs = kwargs.get("epochs", 1)
self.batch_size = kwargs.get("batch_size", 32)
self.task_size = kwargs.get("task_size", 10)
self.memory_size = kwargs.get("memory_size", 2000)
# self.fe = self.build_feature_extractor()
self.num_classes = 10 # the number of class for the first task
self.FedCiMatch = FedCiMatch( self.num_classes, self.batch_size, self.epochs, self.learning_rate, self.memory_size, )
self.num_classes = 10 # the number of class for the first task
self.FedCiMatch = FedCiMatch(
self.num_classes,
self.batch_size,
self.epochs,
self.learning_rate,
self.memory_size,
)
self.class_learned = 0

def get_weights(self):
print("get weights")
return self.FedCiMatch.get_weights()

def set_weights(self, weights):
print("set weights")
self.FedCiMatch.set_weights(weights)
def train(self, train_data,val_data, **kwargs):
task_id = kwargs.get('task_id', 0)
round = kwargs.get('round', 1)
task_size = kwargs.get('task_size', self.task_size)

def train(self, train_data, val_data, **kwargs):
task_id = kwargs.get("task_id", 0)
round = kwargs.get("round", 1)
task_size = kwargs.get("task_size", self.task_size)
logging.info(f"in train: {round} task_id: {task_id}")
self.class_learned += self.task_size
self.FedCiMatch.before_train(task_id, round, train_data, task_size)
self.FedCiMatch.train(round)
logging.info(f'update example memory')
logging.info(f"update example memory")
self.FedCiMatch.build_exemplar()
return {'num_samples': self.FedCiMatch.get_data_size() , 'task_id': task_id}
return {"num_samples": self.FedCiMatch.get_data_size(), "task_id": task_id}

def predict(self, data_files, **kwargs):
result = {}
for data in data_files:
x = np.load(data)
logging.info(f"predicting {x.shape}")
res = self.FedCiMatch.predict(x)
res = self.FedCiMatch.icarl_predict(x)
# pred = tf.cast(tf.argmax(logits, axis=1), tf.int32)
result[data] = res.numpy()
print("finish predict")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ benchmarkingjob:
# currently the options of value are as follows:
# 1> "all": select all metrics in the leaderboard;
# 2> metrics in the leaderboard, e.g., "F1_SCORE"
metrics: [ "accuracy" ]
metrics: [ "accuracy", "forget_rate" ]

# network of save selected and all dataitems in workspace `./rank` ; string type;
# currently the options of value are as follows:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,4 @@ testenv:
- name: "forget_rate"
# incremental rounds setting of incremental learning; int type; default value is 2;
incremental_rounds: 10
round: 3
round: 1
Original file line number Diff line number Diff line change
Expand Up @@ -247,6 +247,9 @@ def _loss(self,x ,y):
def _loss_l2(self, global_model):
return 0.0

def unsupervised_loss(self, sux, wux):
return 0.0

def _merge_models(self, round, base_model_weights, meta_model_weights):
eta = np.exp(-self.beta * (round + 1 ) / self.num_rounds)
merged_meta_parameters = [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ algorithm:
- 0.001
- epochs:
values:
- 32
- 16
- type: "aggregation"
name: "FedAvg"
url: "./examples/cifar100/fci_ssl/fed_ci_match_v2/algorithm/aggregation.py"
Expand Down
43 changes: 25 additions & 18 deletions examples/cifar100/fci_ssl/fed_ci_match_v2/algorithm/basemodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,50 +12,57 @@
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import os
import numpy as np
import keras
import tensorflow as tf
from sedna.common.class_factory import ClassType, ClassFactory
from model import resnet10
from FedCiMatch import FedCiMatch
import logging
import logging

os.environ['BACKEND_TYPE'] = 'KERAS'
os.environ["BACKEND_TYPE"] = "KERAS"
__all__ = ["BaseModel"]
logging.getLogger().setLevel(logging.INFO)

@ClassFactory.register(ClassType.GENERAL, alias='fci_ssl')

@ClassFactory.register(ClassType.GENERAL, alias="fci_ssl")
class BaseModel:
def __init__(self, **kwargs) -> None:
self.kwargs = kwargs
self.learning_rate = kwargs.get('learning_rate', 0.001)
self.epochs = kwargs.get('epochs', 1)
self.batch_size = kwargs.get('batch_size', 32)
self.task_size = kwargs.get('task_size', 10)
self.memory_size = kwargs.get('memory_size', 2000)
self.learning_rate = kwargs.get("learning_rate", 0.001)
self.epochs = kwargs.get("epochs", 1)
self.batch_size = kwargs.get("batch_size", 32)
self.task_size = kwargs.get("task_size", 10)
self.memory_size = kwargs.get("memory_size", 2000)
# self.fe = self.build_feature_extractor()
self.num_classes = 10 # the number of class for the first task
self.FedCiMatch = FedCiMatch( self.num_classes, self.batch_size, self.epochs, self.learning_rate, self.memory_size, )
self.num_classes = 2 # the number of class for the first task
self.FedCiMatch = FedCiMatch(
self.num_classes,
self.batch_size,
self.epochs,
self.learning_rate,
self.memory_size,
)
self.class_learned = 0

def get_weights(self):
print("get weights")
return self.FedCiMatch.get_weights()

def set_weights(self, weights):
print("set weights")
self.FedCiMatch.set_weights(weights)
def train(self, train_data,val_data, **kwargs):
task_id = kwargs.get('task_id', 0)
round = kwargs.get('round', 1)

def train(self, train_data, val_data, **kwargs):
task_id = kwargs.get("task_id", 0)
round = kwargs.get("round", 1)
round = task_id * 1 + round
task_size = kwargs.get('task_size', self.task_size)
task_size = kwargs.get("task_size", self.task_size)
logging.info(f"in train: {round} task_id: {task_id}")
self.FedCiMatch.before_train(task_id, round, train_data, task_size)
self.FedCiMatch.train(task_id, round)
return {'num_samples': self.FedCiMatch.get_data_size() , 'task_id': task_id}
return {"num_samples": self.FedCiMatch.get_data_size(), "task_id": task_id}

def predict(self, data_files, **kwargs):
result = {}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ benchmarkingjob:
# currently the options of value are as follows:
# 1> "all": select all metrics in the leaderboard;
# 2> metrics in the leaderboard, e.g., "F1_SCORE"
metrics: [ "accuracy" ]
metrics: [ "accuracy", "forget_rate" ]

# network of save selected and all dataitems in workspace `./rank` ; string type;
# currently the options of value are as follows:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,5 +32,5 @@ testenv:
url: "/home/wyd/ianvs/project/ianvs/examples/cifar100/fci_ssl/fed_ci_match_v2/testenv/acc.py"
- name: "forget_rate"
# incremental rounds setting of incremental learning; int type; default value is 2;
incremental_rounds: 10
incremental_rounds: 50
round: 1

0 comments on commit 69c3ebf

Please sign in to comment.