diff --git a/src_py/apiServer/experiment_flow_debug.py b/src_py/apiServer/experiment_flow_debug.py
index 54cd2839b..61365a6f2 100644
--- a/src_py/apiServer/experiment_flow_debug.py
+++ b/src_py/apiServer/experiment_flow_debug.py
@@ -35,6 +35,7 @@ def print_test(in_str : str):
 
 experiment_inst = api_server_instance.get_experiment(experiment_name)
 exp_stats = Stats(experiment_inst)
+exp_stats.get_loss_min(saveToFile=True)
 loss = exp_stats.get_loss()
 loss_min = exp_stats.get_loss_min()
 conf = exp_stats.get_confusion_matrices()
diff --git a/src_py/apiServer/experiment_flow_test.py b/src_py/apiServer/experiment_flow_test.py
index 0c8ffdc5c..6d439c1b2 100644
--- a/src_py/apiServer/experiment_flow_test.py
+++ b/src_py/apiServer/experiment_flow_test.py
@@ -13,7 +13,8 @@ def print_test(in_str : str):
 
 NERLNET_PATH = os.getenv('NERLNET_PATH')
 TESTS_PATH = os.getenv('TESTS_PATH')
-TESTS_BASELINE = os.getenv('TESTS_BASELINE')
+TESTS_BASELINE_ACC_STATS = os.getenv('TEST_BASELINE_ACC_STATS')
+TESTS_BASELINE_LOSS_MIN = os.getenv('TEST_BASELINE_LOSS_MIN')
 NERLNET_RUN_SCRIPT = "./NerlnetRun.sh --run-mode release"
 NERLNET_RUN_STOP_SCRIPT = "./NerlnetRun.sh --run-mode stop"
 NERLNET_RUNNING_TIMEOUT_SEC = int(os.getenv('NERLNET_RUNNING_TIMEOUT_SEC'))
@@ -63,21 +64,31 @@ def print_test(in_str : str):
     print_test(stdout)
 
 exp_stats = Stats(experiment_inst)
-data = exp_stats.get_loss_min()
+loss_min = exp_stats.get_loss_min(saveToFile=True)
 print_test("min loss of each worker")
-print(data)
+baseline_loss_min = import_dict_json(TESTS_BASELINE_LOSS_MIN)
+for worker in loss_min.keys():
+    diff = abs(loss_min[worker] - baseline_loss_min[worker])
+    if baseline_loss_min[worker] == 0:
+        error = diff
+    else:
+        error = diff/baseline_loss_min[worker]
+    print_test(f"worker: {worker}, diff: {diff} , error: {error}")
+    if error > TEST_ACCEPTABLE_MARGIN_OF_ERROR:
+        print(f"Anomaly failure detected")
+        print(f"Error: {error} , Acceptable error: {TEST_ACCEPTABLE_MARGIN_OF_ERROR}")
+        exit(1)
+        
+
 
 conf = exp_stats.get_confusion_matrices()
 acc_stats = exp_stats.get_accuracy_stats(conf)
-baseline_acc_stats = import_dict_json(TESTS_BASELINE)
-diff_from_baseline = []
+baseline_acc_stats = import_dict_json(TESTS_BASELINE_ACC_STATS)
 for worker in acc_stats.keys():
     for j in acc_stats[worker].keys():
         diff = abs(acc_stats[worker][j]["F1"] - baseline_acc_stats[worker][str(j)]["F1"])
-        diff_from_baseline.append(diff/baseline_acc_stats[worker][str(j)]["F1"])
-anomaly_detected = not all([x < TEST_ACCEPTABLE_MARGIN_OF_ERROR for x in diff_from_baseline])
-if anomaly_detected:
-    print_test("Anomaly failure detected")
-    print_test(f"diff_from_baseline: {diff_from_baseline}")
-    exit(1)
-
+        error = diff/baseline_acc_stats[worker][str(j)]["F1"]
+        if error > TEST_ACCEPTABLE_MARGIN_OF_ERROR:
+            print_test("Anomaly failure detected")
+            print_test(f"diff_from_baseline: {diff}")
+            exit(1)
diff --git a/src_py/apiServer/stats.py b/src_py/apiServer/stats.py
index f4be4149d..26121ad29 100644
--- a/src_py/apiServer/stats.py
+++ b/src_py/apiServer/stats.py
@@ -44,6 +44,9 @@ def get_loss(self , plot : bool = False , saveToFile : bool = False):
             plt.grid(visible=True, which='minor', linestyle='-', alpha=0.7)
             plt.show()
             plt.savefig(f'{EXPERIMENT_RESULTS_PATH}/{self.experiment.name}/Training/Loss_graph.png')
+        
+        if saveToFile:
+            export_dict_json(f'{EXPERIMENT_RESULTS_PATH}/{self.exp_path}/loss.json', loss_dict)
         return loss_dict
     
     def get_loss_min(self , plot : bool = False , saveToFile : bool = False):
@@ -69,6 +72,9 @@ def get_loss_min(self , plot : bool = False , saveToFile : bool = False):
             plt.grid(visible=True, which='minor', linestyle='-', alpha=0.7)
             plt.show()
             plt.savefig(f'{EXPERIMENT_RESULTS_PATH}/{self.experiment.name}/Training/Min_loss_graph.png')
+            
+        if saveToFile:
+            export_dict_json(f'{EXPERIMENT_RESULTS_PATH}/{self.exp_path}/min_loss.json', min_loss_dict)
         return min_loss_dict
 
     def get_confusion_matrices(self , normalize : bool = False ,plot : bool = False , saveToFile : bool = False):
@@ -110,6 +116,9 @@ def get_confusion_matrices(self , normalize : bool = False ,plot : bool = False
             plt.subplots_adjust(wspace=1, hspace=0.15) 
             f.colorbar(disp.im_, ax=axes)
             plt.show()
+            
+        if saveToFile:
+            export_dict_json(f'{EXPERIMENT_RESULTS_PATH}/{self.exp_path}/confusion_matrices.json', workers_confusion_matrices)
         return workers_confusion_matrices
     
     def get_accuracy_stats(self , confMatDict , show : bool = False , saveToFile : bool = False) -> dict:
diff --git a/tests/NerlnetFullFlowTest.sh b/tests/NerlnetFullFlowTest.sh
index 3ad6b2c19..6d883ead2 100755
--- a/tests/NerlnetFullFlowTest.sh
+++ b/tests/NerlnetFullFlowTest.sh
@@ -13,7 +13,8 @@ NERLNET_CONFIG_INPUT_DATA_DIR=$NERLNET_CONFIG_DIR/inputDataDir.nerlconfig
 NERLNET_CONFIG_INPUT_DATA_DIR_BACKUP=$NERLNET_CONFIG_DIR/inputDataDir.nerlconfig.bac
 
 TEST_INPUT_JSONS_FILES_DIR="$TESTS_PATH/inputJsonsFiles"
-export TESTS_BASELINE="$TEST_INPUT_JSONS_FILES_DIR/accuracy_stats_synt_1d_2c_4r_4w.json"
+export TEST_BASELINE_ACC_STATS="$TEST_INPUT_JSONS_FILES_DIR/accuracy_stats_synt_1d_2c_4r_4w.json"
+export TEST_BASELINE_LOSS_MIN="$TEST_INPUT_JSONS_FILES_DIR/min_loss_synt_1d_2c_4r_4w.json"
 
 TEST_ARCH_JSON_NOIP_0=$TEST_INPUT_JSONS_FILES_DIR/arch_test_synt_1d_2c_1s_4r_4w.json.noip
 TEST_ARCH_JSON_0=$TEST_INPUT_JSONS_FILES_DIR/arch_test_synt_1d_2c_1s_4r_4w.json
diff --git a/tests/inputJsonsFiles/min_loss_synt_1d_2c_4r_4w.json b/tests/inputJsonsFiles/min_loss_synt_1d_2c_4r_4w.json
new file mode 100644
index 000000000..7e766f8e7
--- /dev/null
+++ b/tests/inputJsonsFiles/min_loss_synt_1d_2c_4r_4w.json
@@ -0,0 +1,6 @@
+{
+    "w1": 0.0,
+    "w2": 0.0,
+    "w3": 0.0,
+    "w4": 0.0
+}
\ No newline at end of file