Skip to content

Commit

Permalink
[NERLPLANNER] Add loss args
Browse files Browse the repository at this point in the history
  • Loading branch information
leondavi committed Aug 7, 2024
1 parent 3942784 commit a7b77d5
Show file tree
Hide file tree
Showing 13 changed files with 66 additions and 33 deletions.
3 changes: 2 additions & 1 deletion src_cpp/common/nerlWorker.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,15 @@ using namespace nerlnet;

NerlWorker::NerlWorker(int model_type, std::string &model_args_str , std::string &layer_sizes_str, std::string &layer_types_list, std::string &layers_functionality,
float learning_rate, int epochs, int optimizer_type, std::string &optimizer_args_str,
int loss_method, int distributed_system_type, std::string &distributed_system_args_str)
int loss_method, std::string &loss_args_str, int distributed_system_type, std::string &distributed_system_args_str)
{
_model_type = model_type;
_model_args_str = model_args_str;
_learning_rate = learning_rate;
_epochs = epochs;
_optimizer_type = optimizer_type;
_loss_method = loss_method;
_loss_args_str = loss_args_str;
_distributed_system_type = distributed_system_type;
_distributed_system_args_str = distributed_system_args_str;
_nerl_layers_linked_list = parse_layers_input(layer_sizes_str,layer_types_list,layers_functionality);
Expand Down
3 changes: 2 additions & 1 deletion src_cpp/common/nerlWorker.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ namespace nerlnet

NerlWorker(int model_type, std::string &model_args_str, std::string &layer_sizes_str, std::string &layer_types_list, std::string &layers_functionality,
float learning_rate, int epochs, int optimizer_type, std::string &optimizer_args_str,
int loss_method, int distributed_system_type, std::string &distributed_system_args_str);
int loss_method, std::string &loss_args_str, int distributed_system_type, std::string &distributed_system_args_str);
~NerlWorker();

std::shared_ptr<NerlLayer> parse_layers_input(std::string &layer_sizes_str, std::string &layer_types_list, std::string &layers_functionality);
Expand All @@ -35,6 +35,7 @@ namespace nerlnet
int _epochs;
int _optimizer_type;
int _loss_method;
std::string _loss_args_str;
std::string _distributed_system_args_str;
std::shared_ptr<std::vector<int>> _train_labels_count; // accumulates the number of each label in the training set

Expand Down
4 changes: 2 additions & 2 deletions src_cpp/common/nerlWorkerFunc.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ enum {KERNEL_SIZE = -1, PADDING_SIZE = -2,STRIDE_SIZE = -3 ,POOLING_SIZE= -4 , I

template <class NerlWorkerType>
std::shared_ptr<NerlWorkerType> parse_model_params(std::string &model_type_str,std::string &model_args_str, std::string &learning_rate_str,std::string &epochs_str,
std::string &optimizer_type_str,std::string &loss_method_str,std::string &distributed_system_type_str,
std::string &optimizer_type_str,std::string &loss_method_str, std::string &loss_args_str, std::string &distributed_system_type_str,
std::string &layer_sizes_str,std::string &layer_types_str,std::string &layers_functionality_str,
std::string &optimizer_args_str,std::string &distributed_system_args_str)
{
Expand All @@ -68,7 +68,7 @@ std::shared_ptr<NerlWorkerType> parse_model_params(std::string &model_type_str,s
int optimizer_type = std::stoi(optimizer_type_str);
int loss_method = std::stoi(loss_method_str);
int distributed_system_type = std::stoi(distributed_system_type_str);
return std::make_shared<NerlWorkerType>(model_type, model_args_str, layer_sizes_str, layer_types_str, layers_functionality_str,learning_rate, epochs, optimizer_type, optimizer_args_str, loss_method, distributed_system_type, distributed_system_args_str);
return std::make_shared<NerlWorkerType>(model_type, model_args_str, layer_sizes_str, layer_types_str, layers_functionality_str,learning_rate, epochs, optimizer_type, optimizer_args_str, loss_method, loss_args_str, distributed_system_type, distributed_system_args_str);
}

static void parse_layer_sizes_str(std::string &layer_sizes_str, std::vector<int> &layers_types_vec, std::vector<LayerSizingParams_t> &out_layer_sizes_params)
Expand Down
20 changes: 13 additions & 7 deletions src_cpp/opennnBridge/nerlWorkerNIF.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,18 +10,18 @@
using namespace nerlnet;

static std::shared_ptr<NerlWorkerOpenNN> create_nerlworker(std::string &model_type_str, std::string &model_args_str, std::string &learning_rate_str,
std::string &epochs_str, std::string &optimizer_type_str, std::string &loss_method_str,
std::string &epochs_str, std::string &optimizer_type_str, std::string &loss_method_str, std::string &loss_args_str,
std::string &distributed_system_type_str, std::string &layer_sizes_str, std:: string &layer_types_str,
std::string &layers_functionality_str, std::string &optimizer_args_str, std::string &distributed_system_args_str) //all should be const reference
{
std::shared_ptr<NerlWorkerOpenNN> new_worker = parse_model_params<NerlWorkerOpenNN>(model_type_str,model_args_str,learning_rate_str,epochs_str,optimizer_type_str,loss_method_str,distributed_system_type_str,layer_sizes_str,
std::shared_ptr<NerlWorkerOpenNN> new_worker = parse_model_params<NerlWorkerOpenNN>(model_type_str,model_args_str,learning_rate_str,epochs_str,optimizer_type_str,loss_method_str, loss_args_str, distributed_system_type_str,layer_sizes_str,
layer_types_str,layers_functionality_str,optimizer_args_str,distributed_system_args_str);
return new_worker;
}
static ERL_NIF_TERM new_nerlworker_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
enum{ARG_MODEL_ID,ARG_MODEL_TYPE, ARG_MODEL_ARGS, ARG_LAYERS_SIZES, ARG_LAYERS_TYPES, ARG_LAYERS_FUNCTIONALITY_CODES, ARG_LEARNING_RATE, ARG_EPOCHS, ARG_OPTIMIZER_TYPE,
ARG_OPTIMIZER_ARGS, ARG_LOSS_METHOD, ARG_DISTRIBUTED_SYSTEM_TYPE, ARG_DISTRIBUTED_SYSTEM_ARGS};
ARG_OPTIMIZER_ARGS, ARG_LOSS_METHOD, ARG_LOSS_ARGS, ARG_DISTRIBUTED_SYSTEM_TYPE, ARG_DISTRIBUTED_SYSTEM_ARGS};

unsigned long modelId;
std::string model_type_str;
Expand All @@ -34,6 +34,7 @@ static ERL_NIF_TERM new_nerlworker_nif(ErlNifEnv* env, int argc, const ERL_NIF_T
std::string optimizer_type_str;
std::string optimizer_args_str;
std::string loss_method_str;
std::string loss_args_str;
std::string distributed_system_type_str;
std::string distributed_system_args_str;

Expand All @@ -48,10 +49,12 @@ static ERL_NIF_TERM new_nerlworker_nif(ErlNifEnv* env, int argc, const ERL_NIF_T
nifpp::get_throws(env, argv[ARG_OPTIMIZER_TYPE], optimizer_type_str);
nifpp::get_throws(env, argv[ARG_OPTIMIZER_ARGS], optimizer_args_str);
nifpp::get_throws(env, argv[ARG_LOSS_METHOD], loss_method_str);
nifpp::get_throws(env, argv[ARG_LOSS_ARGS], loss_args_str);
nifpp::get_throws(env, argv[ARG_DISTRIBUTED_SYSTEM_TYPE], distributed_system_type_str);
nifpp::get_throws(env, argv[ARG_DISTRIBUTED_SYSTEM_ARGS], distributed_system_args_str);

std::shared_ptr<NerlWorkerOpenNN> new_nerl_worker_ptr = create_nerlworker(model_type_str,model_args_str,learning_rate_str,epochs_str,optimizer_type_str,loss_method_str,distributed_system_type_str,layer_sizes_str,
std::shared_ptr<NerlWorkerOpenNN> new_nerl_worker_ptr = create_nerlworker(model_type_str,model_args_str,learning_rate_str,epochs_str,optimizer_type_str,
loss_method_str,loss_args_str,distributed_system_type_str,layer_sizes_str,
layer_types_str,layers_functionality_str,optimizer_args_str,distributed_system_args_str);
// Create the singleton instance
BridgeController& onnBrCtrl = BridgeController::GetInstance();
Expand All @@ -65,7 +68,7 @@ static ERL_NIF_TERM new_nerlworker_nif(ErlNifEnv* env, int argc, const ERL_NIF_T
static ERL_NIF_TERM test_nerlworker_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{
enum{ARG_MODEL_ID,ARG_MODEL_TYPE, ARG_MODEL_ARGS , ARG_LAYERS_SIZES, ARG_LAYERS_TYPES, ARG_LAYERS_FUNCTIONALITY_CODES, ARG_LEARNING_RATE, ARG_EPOCHS, ARG_OPTIMIZER_TYPE,
ARG_OPTIMIZER_ARGS, ARG_LOSS_METHOD, ARG_DISTRIBUTED_SYSTEM_TYPE, ARG_DISTRIBUTED_SYSTEM_ARGS};
ARG_OPTIMIZER_ARGS, ARG_LOSS_METHOD, ARG_LOSS_ARGS, ARG_DISTRIBUTED_SYSTEM_TYPE, ARG_DISTRIBUTED_SYSTEM_ARGS};

unsigned long modelId;
std::string model_type_str;
Expand All @@ -78,6 +81,7 @@ static ERL_NIF_TERM test_nerlworker_nif(ErlNifEnv* env, int argc, const ERL_NIF_
std::string optimizer_type_str;
std::string optimizer_args_str;
std::string loss_method_str;
std::string loss_args_str;
std::string distributed_system_type_str;
std::string distributed_system_args_str;

Expand All @@ -92,10 +96,12 @@ static ERL_NIF_TERM test_nerlworker_nif(ErlNifEnv* env, int argc, const ERL_NIF_
nifpp::get_throws(env, argv[ARG_OPTIMIZER_TYPE], optimizer_type_str);
nifpp::get_throws(env, argv[ARG_OPTIMIZER_ARGS], optimizer_args_str);
nifpp::get_throws(env, argv[ARG_LOSS_METHOD], loss_method_str);
nifpp::get_throws(env, argv[ARG_LOSS_ARGS], loss_args_str);
nifpp::get_throws(env, argv[ARG_DISTRIBUTED_SYSTEM_TYPE], distributed_system_type_str);
nifpp::get_throws(env, argv[ARG_DISTRIBUTED_SYSTEM_ARGS], distributed_system_args_str);
std::shared_ptr<NerlWorkerOpenNN> new_nerl_worker_ptr = create_nerlworker(model_type_str,model_args_str,learning_rate_str,epochs_str,optimizer_type_str,loss_method_str,distributed_system_type_str,layer_sizes_str,
layer_types_str,layers_functionality_str,optimizer_args_str,distributed_system_args_str);
std::shared_ptr<NerlWorkerOpenNN> new_nerl_worker_ptr = create_nerlworker(model_type_str,model_args_str,learning_rate_str,epochs_str,
optimizer_type_str,loss_method_str,loss_args_str,distributed_system_type_str,layer_sizes_str,
layer_types_str,layers_functionality_str,optimizer_args_str,distributed_system_args_str);
// Create the singleton instance
BridgeController& onnBrCtrl = BridgeController::GetInstance();
// Put the model record to the map with modelId
Expand Down
24 changes: 17 additions & 7 deletions src_cpp/opennnBridge/nerlWorkerOpenNN.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,9 +9,9 @@ namespace nerlnet

NerlWorkerOpenNN::NerlWorkerOpenNN(int model_type, std::string &model_args_str , std::string &layer_sizes_str, std::string &layer_types_list, std::string &layers_functionality,
float learning_rate, int epochs, int optimizer_type, std::string &optimizer_args_str,
int loss_method, int distributed_system_type, std::string &distributed_system_args_str) : NerlWorker(model_type, model_args_str , layer_sizes_str, layer_types_list, layers_functionality,
int loss_method, std::string &loss_args_str, int distributed_system_type, std::string &distributed_system_args_str) : NerlWorker(model_type, model_args_str , layer_sizes_str, layer_types_list, layers_functionality,
learning_rate, epochs, optimizer_type, optimizer_args_str,
loss_method, distributed_system_type, distributed_system_args_str)
loss_method, loss_args_str, distributed_system_type, distributed_system_args_str)
{
_neural_network_ptr = std::make_shared<opennn::NeuralNetwork>();
generate_opennn_neural_network();
Expand Down Expand Up @@ -169,11 +169,13 @@ namespace nerlnet
**/
void NerlWorkerOpenNN::generate_training_strategy()
{
_training_strategy_ptr->set_neural_network_pointer(_neural_network_ptr.get()); // Neural network must be defined at this point
set_optimization_method(_optimizer_type,_learning_rate);
set_loss_method(_loss_method);
_training_strategy_ptr->set_maximum_epochs_number(_epochs);
_training_strategy_ptr->set_display(TRAINING_STRATEGY_SET_DISPLAY_OFF);
_training_strategy_ptr->set_neural_network_pointer(_neural_network_ptr.get()); // Neural network must be defined at this point
set_optimization_method(_optimizer_type,_learning_rate);
set_loss_method(_loss_method);
// TODO Ori add here the parsing of loss args
// _training_strategy_ptr->get_loss_index_pointer()->set_regularization_method(reg_val);
_training_strategy_ptr->set_maximum_epochs_number(_epochs);
_training_strategy_ptr->set_display(TRAINING_STRATEGY_SET_DISPLAY_OFF);
}

void NerlWorkerOpenNN::set_optimization_method(int optimizer_type,int learning_rate){
Expand Down Expand Up @@ -862,4 +864,12 @@ namespace nerlnet
}
}

opennn::LossIndex::RegularizationMethod NerlWorkerOpenNN::parse_loss_args(const std::string &loss_args)
{
// TODO Ori parse loss_args to find the regularization method
// use switch case to decide
// if nothing is given then NoRegularization (this enum # exist in opennn)
return opennn::LossIndex::RegularizationMethod::L1;
}

} // namespace nerlnet
3 changes: 2 additions & 1 deletion src_cpp/opennnBridge/nerlWorkerOpenNN.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ class NerlWorkerOpenNN : public NerlWorker

NerlWorkerOpenNN(int model_type, std::string &model_args_str , std::string &layer_sizes_str, std::string &layer_types_list, std::string &layers_functionality,
float learning_rate, int epochs, int optimizer_type, std::string &optimizer_args_str,
int loss_method, int distributed_system_type, std::string &distributed_system_args_str);
int loss_method, std::string &loss_args_str, int distributed_system_type, std::string &distributed_system_args_str);
~NerlWorkerOpenNN();

void generate_opennn_neural_network();
Expand Down Expand Up @@ -82,6 +82,7 @@ class NerlWorkerOpenNN : public NerlWorker
opennn::Scaler translate_unscaling_method(int scaling_method);
opennn::PoolingLayer::PoolingMethod translate_pooling_method(int pooling_method);
opennn::ProbabilisticLayer::ActivationFunction translate_probabilistic_activation_function(int activation_function);
opennn::LossIndex::RegularizationMethod parse_loss_args(const std::string &loss_args_str);

int translate_pooling_method_int(int pooling_method);
int translate_model_type(int model_type, int &custom_model);
Expand Down
4 changes: 2 additions & 2 deletions src_erl/NerlnetApp/src/Bridge/onnWorkers/nerlNIF.erl
Original file line number Diff line number Diff line change
Expand Up @@ -221,15 +221,15 @@ nerltensor_scalar_multiplication_erl({NerlTensorErl, Type}, ScalarValue) ->
%%%%%% NerlWorker NIF Methods %%%%%%

new_nerlworker_nif(_ModelId,_ModelType, _ModelArgs , _LayersSizes, _LayersTypes, _LayersFunctionalityCodes, _LearningRate, _Epochs, _OptimizerType,
_OptimizerArgs, _LossMethod, _DistributedSystemType, _DistributedSystemArgs) ->
_OptimizerArgs, _LossMethod, _LossArgs, _DistributedSystemType, _DistributedSystemArgs) ->
exit(nif_library_not_loaded).

remove_nerlworker_nif(_ModelId) ->
exit(nif_library_not_loaded).

%% All of inputs must be binary strings! except for _ModelId which is an integer
test_nerlworker_nif(_ModelId,_ModelType, _ModelArgs, _LayersSizes, _LayersTypes, _LayersFunctionalityCodes, _LearningRate, _Epochs, _OptimizerType,
_OptimizerArgs, _LossMethod, _DistributedSystemType, _DistributedSystemArgs) ->
_OptimizerArgs, _LossMethod, _LossArgs, _DistributedSystemType, _DistributedSystemArgs) ->
exit(nif_library_not_loaded).

% input - unsigned long modelId
Expand Down
2 changes: 1 addition & 1 deletion src_erl/NerlnetApp/src/Bridge/onnWorkers/workerGeneric.erl
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ init({WorkerName , WorkerArgs , DistributedBehaviorFunc , DistributedWorkerData


Res = nerlNIF:new_nerlworker_nif(ModelID , ModelType, ModelArgs, LayersSizes, LayersTypes, LayersFunctionalityCodes, LearningRate, Epochs, OptimizerType,
OptimizerArgs, LossMethod , DistributedSystemType , DistributedSystemArgs),
OptimizerArgs, LossMethod , LossArgs, DistributedSystemType , DistributedSystemArgs),
DistributedBehaviorFunc(init,{GenWorkerEts, DistributedWorkerData}),

if
Expand Down
4 changes: 2 additions & 2 deletions src_erl/NerlnetApp/src/Client/clientWorkersFunctions.erl
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ create_workers(ClientName, ClientEtsRef , ShaToModelArgsMap , EtsStats) ->
ModelID = erlang:unique_integer([positive]),
WorkerStatsETS = stats:generate_workers_stats_ets(),
{ok , SHA} = maps:find(WorkerName , ets:lookup_element(ClientEtsRef, workers_to_sha_map, ?DATA_IDX)),
{ModelType, ModelArgs, LayersSizes, LayersTypes, LayersFunctions, LossMethod,
{ModelType, ModelArgs, LayersSizes, LayersTypes, LayersFunctions, LossMethod, LossArgs,
LearningRate, Epochs, Optimizer, OptimizerArgs, _InfraType, DistributedSystemType,
DistributedSystemArgs, DistributedSystemToken} = maps:get(SHA, ShaToModelArgsMap),
DistributedTypeInteger = list_to_integer(DistributedSystemType),
Expand All @@ -64,7 +64,7 @@ create_workers(ClientName, ClientEtsRef , ShaToModelArgsMap , EtsStats) ->
W2wComPid = w2wCom:start_link({WorkerName, MyClientPid}), % TODO Switch to monitor instead of link

WorkerArgs = {ModelID , ModelType , ModelArgs , LayersSizes, LayersTypes, LayersFunctions, LearningRate , Epochs,
Optimizer, OptimizerArgs , LossMethod , DistributedSystemType , DistributedSystemToken, DistributedSystemArgs},
Optimizer, OptimizerArgs , LossMethod , LossArgs, DistributedSystemType , DistributedSystemToken, DistributedSystemArgs},
WorkerPid = workerGeneric:start_link({WorkerName , WorkerArgs , DistributedBehaviorFunc , DistributedWorkerData , MyClientPid , WorkerStatsETS , W2wComPid}),
gen_server:cast(W2wComPid, {update_gen_worker_pid, WorkerPid}),
ets:insert(WorkersETS, {WorkerName, {WorkerPid, WorkerArgs}}),
Expand Down
Loading

0 comments on commit a7b77d5

Please sign in to comment.