From 613174f15fecbb087355454eb444f622163856f7 Mon Sep 17 00:00:00 2001 From: shleym2000 Date: Tue, 9 Apr 2024 05:57:05 -0500 Subject: [PATCH 01/38] Update main.cpp Fixes compiling problem --- blank/main.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/blank/main.cpp b/blank/main.cpp index 49fdac1aa..21f6d9046 100755 --- a/blank/main.cpp +++ b/blank/main.cpp @@ -61,8 +61,8 @@ int main() Transformer transformer({ input_length, context_length, inputs_dimension, context_dimension, depth, perceptron_depth, heads_number, number_of_layers }); - Tensor& completion_vocabulary = language_data_set.get_completion_vocabulary(); - Tensor& context_vocabulary = language_data_set.get_context_vocabulary(); + const Tensor& completion_vocabulary = language_data_set.get_completion_vocabulary(); + const Tensor& context_vocabulary = language_data_set.get_context_vocabulary(); transformer.set_input_vocabulary(completion_vocabulary); transformer.set_context_vocabulary(context_vocabulary); From c0c7eef60f2134f1d862f5edd3ee98c43cc81341 Mon Sep 17 00:00:00 2001 From: Lenny Date: Fri, 17 May 2024 05:33:10 -0500 Subject: [PATCH 02/38] Eliminating few warnings --- opennn/convolutional_layer.cpp | 4 ++-- opennn/loss_index.cpp | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/opennn/convolutional_layer.cpp b/opennn/convolutional_layer.cpp index e148478f8..cd1620c18 100644 --- a/opennn/convolutional_layer.cpp +++ b/opennn/convolutional_layer.cpp @@ -1301,7 +1301,7 @@ void ConvolutionalLayer::write_XML(tinyxml2::XMLPrinter& file_stream) const buffer.str(""); - for(Index i = 0; i < inputs_dimensions.size(); i++) + for(Index i = 0; i < static_cast(inputs_dimensions.size()); i++) { buffer << inputs_dimensions[i]; if(i != inputs_dimensions.size() - 1) buffer << " x "; @@ -1320,7 +1320,7 @@ void ConvolutionalLayer::write_XML(tinyxml2::XMLPrinter& file_stream) const buffer.str(""); - for(Index i = 0; i < inputs_dimensions.size(); i++) + for(Index i = 0; i < static_cast(inputs_dimensions.size()); i++) { buffer << get_outputs_dimensions()[i]; if(i != inputs_dimensions.size() - 1) buffer << " x "; diff --git a/opennn/loss_index.cpp b/opennn/loss_index.cpp index 174c051b0..601b2780e 100644 --- a/opennn/loss_index.cpp +++ b/opennn/loss_index.cpp @@ -999,7 +999,7 @@ void BackPropagation::set(const Index& new_batch_samples_number, LossIndex* new_ output_deltas_dimensions[0] = batch_samples_number; Index size = batch_samples_number; - for (Index i = 0; i < output_dimensions.size(); i++) + for (Index i = 0; i < static_cast(output_dimensions.size()); i++) { output_deltas_dimensions[i + 1] = output_dimensions[i]; size *= output_dimensions[i]; From 6d3cbd875297891bee23fbb9ce42a4813b63fdd9 Mon Sep 17 00:00:00 2001 From: shleym2000 Date: Sun, 9 Jun 2024 07:48:02 -0500 Subject: [PATCH 03/38] Eliminate type mismatch warning --- opennn/neural_network.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opennn/neural_network.cpp b/opennn/neural_network.cpp index 0b9039c6e..6ab6cc3b2 100644 --- a/opennn/neural_network.cpp +++ b/opennn/neural_network.cpp @@ -1241,7 +1241,7 @@ Index NeuralNetwork::get_outputs_number() const Index outputs_number = 1; - for (Index i = 0; i < outputs_dimensions.size(); i++) outputs_number *= outputs_dimensions[i]; + for (Index i = 0; i < static_cast(outputs_dimensions.size()); i++) outputs_number *= outputs_dimensions[i]; return outputs_number; } From ed9447ff7245624e6827ad97ee100f6cde27cd18 Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Mon, 10 Jun 2024 10:18:04 -0500 Subject: [PATCH 04/38] Eliminated warning --- opennn/language_data_set.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opennn/language_data_set.cpp b/opennn/language_data_set.cpp index 7f72fda43..38d1a355f 100644 --- a/opennn/language_data_set.cpp +++ b/opennn/language_data_set.cpp @@ -1779,7 +1779,7 @@ const Tensor LanguageDataSet::calculate_vocabulary(const Tensor vocabulary = calculate_vocabulary_binary_search(filtered_counts, lower_search, upper_search, parameters); Tensor vocabulary_tensor(vocabulary.size()); - for (Index i = 0; i < vocabulary.size(); i++) vocabulary_tensor(i) = vocabulary[i]; + for (Index i = 0; i < static_cast(vocabulary.size()); i++) vocabulary_tensor(i) = vocabulary[i]; return vocabulary_tensor; } From bc848b2bd5b06dc7c8658193e2654501536d395e Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Mon, 10 Jun 2024 10:26:38 -0500 Subject: [PATCH 05/38] Simplify local header file inclusion --- opennn/unit_testing.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opennn/unit_testing.h b/opennn/unit_testing.h index 97812221c..1603cbc93 100644 --- a/opennn/unit_testing.h +++ b/opennn/unit_testing.h @@ -23,7 +23,7 @@ // OpenNN includes -#include "../../opennn/opennn/opennn.h" +#include "opennn.h" using namespace opennn; From 9608ca16ee75451ce431b2ad745c016f290091c7 Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Fri, 21 Jun 2024 14:03:59 -0500 Subject: [PATCH 06/38] Compile without CUDA by default --- opennn/config.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opennn/config.h b/opennn/config.h index 892ec3271..6746c6610 100644 --- a/opennn/config.h +++ b/opennn/config.h @@ -24,7 +24,7 @@ #include #include -#define OPENNN_CUDA +//#define OPENNN_CUDA #ifdef OPENNN_CUDA #include "../../opennn_cuda/CudaOpennn/kernel.cuh" From ef32fdd408f3ff8c396ac39bddc32fa094cf241c Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Fri, 21 Jun 2024 14:16:51 -0500 Subject: [PATCH 07/38] Eliminating warnings --- tests/learning_rate_algorithm_test.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/learning_rate_algorithm_test.cpp b/tests/learning_rate_algorithm_test.cpp index c8a43f956..ba142cd32 100644 --- a/tests/learning_rate_algorithm_test.cpp +++ b/tests/learning_rate_algorithm_test.cpp @@ -114,7 +114,7 @@ void LearningRateAlgorithmTest::test_calculate_bracketing_triplet() //loss = sum_squared_error.calculate_training_loss(); //training_direction = sum_squared_error.calculate_training_loss_gradient()*(-1.0); - initial_learning_rate = 0.01; + initial_learning_rate = static_cast(0.01); //triplet = learning_rate_algorithm.calculate_bracketing_triplet(loss, training_direction, initial_learning_rate); @@ -127,7 +127,7 @@ void LearningRateAlgorithmTest::test_calculate_bracketing_triplet() neural_network.set_parameters_constant(type(0)); - initial_learning_rate = 0.01; + initial_learning_rate = static_cast(0.01); //triplet = learning_rate_algorithm.calculate_bracketing_triplet(loss, training_direction, initial_learning_rate); @@ -135,7 +135,7 @@ void LearningRateAlgorithmTest::test_calculate_bracketing_triplet() neural_network.set_parameters_constant(type(1)); - initial_learning_rate = 0.0; + initial_learning_rate = static_cast(0.0); //triplet = learning_rate_algorithm.calculate_bracketing_triplet(loss, training_direction, initial_learning_rate); @@ -164,7 +164,7 @@ void LearningRateAlgorithmTest::test_calculate_bracketing_triplet() neural_network.set(NeuralNetwork::ModelType::Approximation, {inputs_number, targets_number}); neural_network.set_parameters_random(); - initial_learning_rate = 0.001; + initial_learning_rate = static_cast(0.001); //triplet = learning_rate_algorithm.calculate_bracketing_triplet(loss, training_direction, initial_learning_rate); From e0d0470869aae1126e913ab8761a33536f2120ca Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Fri, 21 Jun 2024 14:49:53 -0500 Subject: [PATCH 08/38] Fixing build --- blank/main.cpp | 4 ++-- tests/learning_rate_algorithm_test.cpp | 2 +- tests/response_optimization_test.cpp | 8 ++++---- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/blank/main.cpp b/blank/main.cpp index f726a1777..9a187d874 100755 --- a/blank/main.cpp +++ b/blank/main.cpp @@ -51,8 +51,8 @@ int main() language_data_set.read_txt_language_model(); - Tensor& completion_vocabulary = language_data_set.get_completion_vocabulary(); - Tensor& context_vocabulary = language_data_set.get_context_vocabulary(); + const Tensor& completion_vocabulary = language_data_set.get_completion_vocabulary(); + const Tensor& context_vocabulary = language_data_set.get_context_vocabulary(); Index input_length = language_data_set.get_completion_length(); Index context_length = language_data_set.get_context_length(); diff --git a/tests/learning_rate_algorithm_test.cpp b/tests/learning_rate_algorithm_test.cpp index ba142cd32..2be5df5c3 100644 --- a/tests/learning_rate_algorithm_test.cpp +++ b/tests/learning_rate_algorithm_test.cpp @@ -147,7 +147,7 @@ void LearningRateAlgorithmTest::test_calculate_bracketing_triplet() neural_network.set(NeuralNetwork::ModelType::Approximation, {inputs_number, targets_number}); neural_network.set_parameters_random(); - initial_learning_rate = 0.001; + initial_learning_rate = static_cast(0.001); //triplet = learning_rate_algorithm.calculate_bracketing_triplet(loss, training_direction, initial_learning_rate); diff --git a/tests/response_optimization_test.cpp b/tests/response_optimization_test.cpp index 037ec0152..0ba969092 100644 --- a/tests/response_optimization_test.cpp +++ b/tests/response_optimization_test.cpp @@ -109,7 +109,7 @@ void ResponseOptimizationTest::test_perform_optimization() results = response_optimization.perform_optimization(); assert_true(results->optimal_variables(0) = 1, LOG); assert_true(results->optimal_variables(1) <= 1, LOG); - assert_true(1 <= results->optimal_variables(2) <= 2.5, LOG); + assert_true(1 <= results->optimal_variables(2) && results->optimal_variables(2) <= 2.5, LOG); // Multiple outputs case 1 @@ -123,8 +123,8 @@ void ResponseOptimizationTest::test_perform_optimization() results = response_optimization.perform_optimization(); assert_true(results->optimal_variables(0) = 1, LOG); assert_true(results->optimal_variables(1) <= 1, LOG); - assert_true(1 <= results->optimal_variables(2) <= 3.0, LOG); - assert_true(-1 <= results->optimal_variables(3) <= type(1), LOG); + assert_true(1 <= results->optimal_variables(2) && results->optimal_variables(2) <= 3.0, LOG); + assert_true(-1 <= results->optimal_variables(3) && results->optimal_variables(3) <= type(1), LOG); // Multiple outputs case 2 @@ -143,7 +143,7 @@ void ResponseOptimizationTest::test_perform_optimization() results = response_optimization.perform_optimization(); assert_true(results->optimal_variables(0) = 1, LOG); assert_true(results->optimal_variables(1) <= 1, LOG); - assert_true(1 <= results->optimal_variables(2) <= 2.0, LOG); + assert_true(1 <= results->optimal_variables(2) && results->optimal_variables(2) <= 2.0, LOG); assert_true(type(-1) <= results->optimal_variables(3), LOG); assert_true(results->optimal_variables(3) <= type(0), LOG); From 100cbd487fd9fac143c80b5da737f85d6ca66863 Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Fri, 21 Jun 2024 15:33:14 -0500 Subject: [PATCH 09/38] removed warnings --- tests/genetic_algorithm_test.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/genetic_algorithm_test.cpp b/tests/genetic_algorithm_test.cpp index d3d1bbdcd..40ef6c664 100644 --- a/tests/genetic_algorithm_test.cpp +++ b/tests/genetic_algorithm_test.cpp @@ -193,7 +193,7 @@ void GeneticAlgorithmTest::test_perform_selection() genetic_algorithm.set_fitness(fitness); selection_errors.resize(4); - selection_errors.setValues({0.4,0.3,0.2,0.1}); + selection_errors.setValues({type(0.4),type(0.3),type(0.2),type(0.1)}); genetic_algorithm.initialize_population(); @@ -226,7 +226,7 @@ void GeneticAlgorithmTest::test_perform_selection() genetic_algorithm.set_fitness(fitness); selection_errors.resize(4); - selection_errors.setValues({0.4,0.3,0.2,0.1}); + selection_errors.setValues({type(0.4),type(0.3),type(0.2),type(0.1)}); genetic_algorithm.initialize_population(); @@ -306,7 +306,7 @@ void GeneticAlgorithmTest::test_perform_crossover() genetic_algorithm.set_population(population); - selection_errors.setValues({0.4,0.3,0.2,0.1}); + selection_errors.setValues({type(0.4),type(0.3),type(0.2),type(0.1)}); genetic_algorithm.set_selection_errors(selection_errors); From 13a7ae78219e77dd43eddb0851e2347bb786bec2 Mon Sep 17 00:00:00 2001 From: Lenny Date: Wed, 10 Jul 2024 05:51:55 -0500 Subject: [PATCH 10/38] Type adjustment --- opennn/loss_index.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opennn/loss_index.cpp b/opennn/loss_index.cpp index 3958acad0..3ae980cfa 100644 --- a/opennn/loss_index.cpp +++ b/opennn/loss_index.cpp @@ -1305,7 +1305,7 @@ void BackPropagationLM::set(const Index &new_batch_samples_number, output_deltas_dimensions[0] = batch_samples_number; Index size = batch_samples_number; - for (Index i = 0; i < output_dimensions.size(); i++) + for (Index i = 0; i < static_cast(output_dimensions.size()); i++) { output_deltas_dimensions[i + 1] = output_dimensions[i]; size *= output_dimensions[i]; From 5a161881cff044f065dbc6d5287d7ac1d9992bd5 Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Wed, 17 Jul 2024 12:39:11 -0500 Subject: [PATCH 11/38] minor fix --- opennn/loss_index.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/opennn/loss_index.cpp b/opennn/loss_index.cpp index 2363215e4..ac128da04 100644 --- a/opennn/loss_index.cpp +++ b/opennn/loss_index.cpp @@ -958,6 +958,7 @@ void BackPropagation::set(const Index& new_batch_samples_number, LossIndex* new_ Index size = batch_samples_number; + for (Index i = 0; i < static_cast(output_dimensions.size()); i++) { output_deltas_dimensions[i + 1] = output_dimensions[i]; From 83e2d2c63d56c29a0381e4483cf234214361a5e4 Mon Sep 17 00:00:00 2001 From: Lenny Date: Sat, 20 Jul 2024 08:20:25 -0500 Subject: [PATCH 12/38] No changes in loss_index.cpp are needed anymore --- opennn/loss_index.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/opennn/loss_index.cpp b/opennn/loss_index.cpp index fffa338ec..8399b04cf 100644 --- a/opennn/loss_index.cpp +++ b/opennn/loss_index.cpp @@ -1291,6 +1291,7 @@ void BackPropagationLM::set(const Index &new_batch_samples_number, output_deltas_dimensions[0] = batch_samples_number; Index size = batch_samples_number; + for (Index i = 0; i < Index(output_dimensions.size()); i++) { output_deltas_dimensions[i + 1] = output_dimensions[i]; From 0428a254389701cede305fa1e269dc9366d70dc0 Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Mon, 22 Jul 2024 16:12:35 -0500 Subject: [PATCH 13/38] One more preventable type mismatch --- opennn/strings_utilities.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opennn/strings_utilities.cpp b/opennn/strings_utilities.cpp index c8754340a..7f22cf008 100644 --- a/opennn/strings_utilities.cpp +++ b/opennn/strings_utilities.cpp @@ -1650,7 +1650,7 @@ Tensor fix_write_expression_outputs(const string &str, const Tensor(tokens.dimension(0)); i++) { string s = tokens(i); string word = ""; From b3e77cea2bade44c8b73cd38a1d60b5abd75edce Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Wed, 24 Jul 2024 09:24:25 -0500 Subject: [PATCH 14/38] Remove warning --- opennn/tensors.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opennn/tensors.cpp b/opennn/tensors.cpp index 019b89c8c..d8c5173e3 100644 --- a/opennn/tensors.cpp +++ b/opennn/tensors.cpp @@ -2304,7 +2304,7 @@ TensorMap> tensor_map(const Tensor& matrix, const Index void print_dimensions(const dimensions& new_dimensions) { - for(Index i = 0; i < new_dimensions.size(); i++) + for(Index i = 0; i < static_cast(new_dimensions.size()); i++) cout << new_dimensions[i] << " "; cout << endl; From 694aefcb82ff94b98e5f3a8523922047ed8714a3 Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Wed, 24 Jul 2024 09:48:03 -0500 Subject: [PATCH 15/38] One more fix to warnings --- tests/pooling_layer_test.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/pooling_layer_test.cpp b/tests/pooling_layer_test.cpp index 6ba2d02d8..2651a913f 100644 --- a/tests/pooling_layer_test.cpp +++ b/tests/pooling_layer_test.cpp @@ -290,7 +290,7 @@ void PoolingLayerTest::test_forward_propagate_average_pooling() assert_true(outputs_pair.second.size() == input_dimensions.size(), LOG); - for(Index i = 0; i < output_dimensions.size(); i++) + for(Index i = 0; i < static_cast(output_dimensions.size()); i++) { // assert_true(outputs_pair.second.dimensions(i) <= input_dimensions(i), LOG); } From 70f451df5824d58e7b05ba3b21508e41846abe3a Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Thu, 25 Jul 2024 09:23:57 -0500 Subject: [PATCH 16/38] Header is not needed and not working --- tests/minkowski_error_test.cpp | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/minkowski_error_test.cpp b/tests/minkowski_error_test.cpp index a465095e7..961e846d2 100644 --- a/tests/minkowski_error_test.cpp +++ b/tests/minkowski_error_test.cpp @@ -7,8 +7,6 @@ // artelnics@artelnics.com #include "minkowski_error_test.h" -#include "tensors.h" - MinkowskiErrorTest::MinkowskiErrorTest() : UnitTesting() { From 82edbff0a802000a84febdfafdef0baf5196cbaf Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Thu, 25 Jul 2024 09:33:19 -0500 Subject: [PATCH 17/38] minor fix --- opennn/probabilistic_layer_3d.h | 2 +- opennn/stochastic_gradient_descent.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/opennn/probabilistic_layer_3d.h b/opennn/probabilistic_layer_3d.h index 4fd065322..34074a5e3 100644 --- a/opennn/probabilistic_layer_3d.h +++ b/opennn/probabilistic_layer_3d.h @@ -60,7 +60,7 @@ class ProbabilisticLayer3D : public Layer /// Enumeration of the available methods for interpreting variables as probabilities. - enum class ActivationFunction{Softmax, Competitive}; + enum class ActivationFunction{Softmax, Competitive, Binary, Logistic}; // Get methods diff --git a/opennn/stochastic_gradient_descent.cpp b/opennn/stochastic_gradient_descent.cpp index d674520e5..79e19abf1 100644 --- a/opennn/stochastic_gradient_descent.cpp +++ b/opennn/stochastic_gradient_descent.cpp @@ -123,7 +123,7 @@ Index StochasticGradientDescent::get_batch_samples_number() const } -/// Set the initial value for the learning rate. If dacay is not active learning rate will be constant +/// Set the initial value for the learning rate. If decay is not active learning rate will be constant /// otherwise learning rate will decay over each update. /// @param new_initial_learning_rate initial learning rate value. @@ -163,7 +163,7 @@ void StochasticGradientDescent::set_initial_decay(const type& new_decay) buffer << "OpenNN Exception: StochasticGradientDescent class.\n" << "void set_initial_decay(const type&) method.\n" - << "new_dacay must be equal or greater than 0.\n"; + << "new_decay must be equal or greater than 0.\n"; throw runtime_error(buffer.str()); } From 254f78db8a9100b38dc0279b937f0ea812989607 Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Thu, 25 Jul 2024 09:43:59 -0500 Subject: [PATCH 18/38] Fixing 4 mistakes in test's code --- tests/response_optimization_test.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/response_optimization_test.cpp b/tests/response_optimization_test.cpp index 4e8602d7e..e706fc4ea 100644 --- a/tests/response_optimization_test.cpp +++ b/tests/response_optimization_test.cpp @@ -111,7 +111,7 @@ void ResponseOptimizationTest::test_perform_optimization() results = response_optimization.perform_optimization(); assert_true(results->optimal_variables(0) = 1, LOG); assert_true(results->optimal_variables(1) <= 1, LOG); - assert_true(1 <= results->optimal_variables(2) <= 2.5, LOG); + assert_true((1 <= results->optimal_variables(2)) && (results->optimal_variables(2) <= 2.5), LOG); // Multiple outputs case 1 @@ -125,8 +125,8 @@ void ResponseOptimizationTest::test_perform_optimization() results = response_optimization.perform_optimization(); assert_true(results->optimal_variables(0) = 1, LOG); assert_true(results->optimal_variables(1) <= 1, LOG); - assert_true(1 <= results->optimal_variables(2) <= 3.0, LOG); - assert_true(-1 <= results->optimal_variables(3) <= type(1), LOG); + assert_true((1 <= results->optimal_variables(2)) && (results->optimal_variables(2) <= 3.0), LOG); + assert_true((-1 <= results->optimal_variables(3)) && (results->optimal_variables(3) <= type(1)), LOG); // Multiple outputs case 2 @@ -145,7 +145,7 @@ void ResponseOptimizationTest::test_perform_optimization() results = response_optimization.perform_optimization(); assert_true(results->optimal_variables(0) = 1, LOG); assert_true(results->optimal_variables(1) <= 1, LOG); - assert_true(1 <= results->optimal_variables(2) <= 2.0, LOG); + assert_true((1 <= results->optimal_variables(2)) && (results->optimal_variables(2) <= 2.0), LOG); assert_true(type(-1) <= results->optimal_variables(3), LOG); assert_true(results->optimal_variables(3) <= type(0), LOG); From 4a7a3806786c0e220e2356b57a64f6f166c01331 Mon Sep 17 00:00:00 2001 From: Lenny Date: Sat, 27 Jul 2024 07:00:03 -0500 Subject: [PATCH 19/38] synch --- opennn/codification.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opennn/codification.cpp b/opennn/codification.cpp index 0802b77b4..5a29aaf74 100644 --- a/opennn/codification.cpp +++ b/opennn/codification.cpp @@ -46,7 +46,7 @@ string sj2utf8(const string &input_string) //converting to UTF8 if(unicodeValue < 0x80) { - output[indexOutput++] = unicodeValue; + output[indexOutput++] = char(unicodeValue); } else if(unicodeValue < 0x800) { From 8b15640f5bec67ba24359b6b44d410b501aee54b Mon Sep 17 00:00:00 2001 From: Lenny Date: Sat, 27 Jul 2024 13:32:31 -0500 Subject: [PATCH 20/38] warning eliminated --- tests/learning_rate_algorithm_test.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/learning_rate_algorithm_test.cpp b/tests/learning_rate_algorithm_test.cpp index 432ea9322..b168e630a 100644 --- a/tests/learning_rate_algorithm_test.cpp +++ b/tests/learning_rate_algorithm_test.cpp @@ -235,7 +235,7 @@ void LearningRateAlgorithmTest::test_calculate_Brent_method_directional_point() Tensor training_direction = gradient*(type(-1.0)); */ - type initial_learning_rate = 0.001; + type initial_learning_rate = static_cast(0.001); /* pair directional_point = learning_rate_algorithm.calculate_directional_point(1e-2, training_direction, initial_learning_rate); From d8222a04f7d4616a95f00b1cde652f9cd78c7c81 Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Mon, 29 Jul 2024 09:31:00 -0500 Subject: [PATCH 21/38] Merge branch 'dev' into patch-5 # Conflicts: # tests/pooling_layer_test.cpp --- opennn/pooling_layer.cpp | 14 +- tests/convolutional_layer_test.cpp | 16 +- tests/pooling_layer_test.cpp | 705 +++++++++++++---------------- 3 files changed, 318 insertions(+), 417 deletions(-) diff --git a/opennn/pooling_layer.cpp b/opennn/pooling_layer.cpp index 9f1cbc850..678243085 100644 --- a/opennn/pooling_layer.cpp +++ b/opennn/pooling_layer.cpp @@ -363,8 +363,8 @@ void PoolingLayer::forward_propagate(const Tensor, 1>& i /// @param inputs The batch of images. void PoolingLayer::forward_propagate_average_pooling(const Tensor& inputs, - LayerForwardPropagation* layer_forward_propagation, - const bool& is_training) const + LayerForwardPropagation* layer_forward_propagation, + const bool& is_training) const { const type pool_size = type(pool_height * pool_width); @@ -405,7 +405,6 @@ void PoolingLayer::forward_propagate_max_pooling(const Tensor& inputs, LayerForwardPropagation* layer_forward_propagation, const bool& is_training) const { - cout << "pooling inputs: " << endl << inputs << endl; const Index output_width = get_output_width(); const Index output_height = get_output_height(); @@ -437,13 +436,9 @@ void PoolingLayer::forward_propagate_max_pooling(const Tensor& inputs, PADDING_VALID, type(padding_width)); - cout << "image_patches: " << endl << image_patches << endl; - outputs.device(*thread_pool_device) = image_patches.maximum(max_pooling_dimensions).reshape(outputs_dimensions_array); - cout << "outputs: " << endl << outputs << endl; - // Extract maximum indices pooling_layer_forward_propagation->inputs_max_indices.resize(inputs.dimension(0), @@ -455,12 +450,8 @@ void PoolingLayer::forward_propagate_max_pooling(const Tensor& inputs, Index outputs_index = 0; - cout << "Arg max: " << image_patches.argmax() << endl; - for(Index i = 0; i < pooling_layer_forward_propagation->inputs_max_indices.size(); i++) { - cout << "inputs(i): " << inputs(i) - << "; outputs_index: " << outputs(outputs_index) << "; " <& inputs, } } - cout << "Max indices: " << pooling_layer_forward_propagation->inputs_max_indices << endl; } diff --git a/tests/convolutional_layer_test.cpp b/tests/convolutional_layer_test.cpp index f640f9106..5fbeac024 100644 --- a/tests/convolutional_layer_test.cpp +++ b/tests/convolutional_layer_test.cpp @@ -170,14 +170,14 @@ void ConvolutionalLayerTest::test_constructor() ConvolutionalLayer convolutional_layer(input_dimensions, kernel_dimensions); - assert_true(convolutional_layer.get_input_channels() == 1 && - convolutional_layer.get_input_height() == 28 && - convolutional_layer.get_input_width() == 29, LOG); - - assert_true(convolutional_layer.get_kernel_height() == 3 && - convolutional_layer.get_kernel_width() == 2 && - convolutional_layer.get_kernel_channels() == 1 && - convolutional_layer.get_kernels_number() == 16, LOG); + assert_true(convolutional_layer.get_input_height() == 28 + && convolutional_layer.get_input_width() == 29 + && convolutional_layer.get_input_channels() == 1, LOG); + + assert_true(convolutional_layer.get_kernel_height() == 3 + && convolutional_layer.get_kernel_width() == 2 + && convolutional_layer.get_kernel_channels() == 1 + && convolutional_layer.get_kernels_number() == 16, LOG); } diff --git a/tests/pooling_layer_test.cpp b/tests/pooling_layer_test.cpp index 2651a913f..36dc4c3d2 100644 --- a/tests/pooling_layer_test.cpp +++ b/tests/pooling_layer_test.cpp @@ -22,6 +22,21 @@ PoolingLayerTest::~PoolingLayerTest() void PoolingLayerTest::test_constructor() { cout << "test_constructor\n"; + + dimensions input_dimensions; + dimensions pool_dimensions; + + input_dimensions = { 28, 29, 1 }; + pool_dimensions = { 3, 2 }; + + PoolingLayer pooling_layer(input_dimensions, pool_dimensions); + + assert_true(pooling_layer.get_input_height() == 28 + && pooling_layer.get_input_width() == 29 + && pooling_layer.get_channels_number() == 1, LOG); + + assert_true(pooling_layer.get_pool_height() == 3 + && pooling_layer.get_pool_width() == 2, LOG); } void PoolingLayerTest::test_destructor() @@ -29,186 +44,182 @@ void PoolingLayerTest::test_destructor() cout << "test_destructor\n"; } -/* -void PoolingLayerTest::test_calculate_average_pooling_outputs() + +void PoolingLayerTest::test_forward_propagate_max_pooling() { - cout << "test_calculate_average_pooling_outputs\n"; - - Tensor inputs; - Tensor outputs; - - // Test - - inputs.resize(6,6,6,6); - - pooling_layer.set_pool_size(1, 1); - pooling_layer.set_row_stride(1); - pooling_layer.set_column_stride(1); - -// outputs = pooling_layer.calculate_average_pooling_outputs(inputs); - -// assert_true(outputs.dimension(0) == 6 && -// outputs.dimension(1) == 6 && -// outputs.dimension(2) == 6 && -// outputs.dimension(3) == 6, LOG); - - // Test - -// inputs.resize(({10,3,20,20})); - -// pooling_layer.set_pool_size(2,2); -// pooling_layer.set_row_stride(1); -// pooling_layer.set_column_stride(1); - -// outputs = pooling_layer.calculate_average_pooling_outputs(inputs); - -// assert_true(outputs.dimension(0) == 10 && -// outputs.dimension(1) == 3 && -// outputs.dimension(2) == 19 && -// outputs.dimension(3) == 19, LOG); - - // Test - -// inputs.resize(({1,1,4,4})); -// inputs(0,0,0,0) = type(1); -// inputs(0,0,0,1) = 2.0; -// inputs(0,0,0,2) = 3.0; -// inputs(0,0,0,3) = 4.0; -// inputs(0,0,1,0) = 16.0; -// inputs(0,0,1,1) = 9.0; -// inputs(0,0,1,2) = 4.0; -// inputs(0,0,1,3) = type(1); -// inputs(0,0,2,0) = type(1); -// inputs(0,0,2,1) = 8.0; -// inputs(0,0,2,2) = 27.0; -// inputs(0,0,2,3) = 64.0; -// inputs(0,0,3,0) = 256.0; -// inputs(0,0,3,1) = 81.0; -// inputs(0,0,3,2) = 16.0; -// inputs(0,0,3,3) = type(1); - -// pooling_layer.set_pool_size(2, 2); -// pooling_layer.set_row_stride(1); -// pooling_layer.set_column_stride(1); - -// outputs = pooling_layer.calculate_average_pooling_outputs(inputs); - -// assert_true(outputs.dimension(0) == 1 && -// outputs.dimension(1) == 1 && -// outputs.dimension(2) == 3 && -// outputs.dimension(3) == 3 && -// outputs(0,0,0,0) == 7.0 && -// outputs(0,0,0,1) == 4.5 && -// outputs(0,0,0,2) == 3.0 && -// outputs(0,0,1,0) == 8.5 && -// outputs(0,0,1,1) == 12.0 && -// outputs(0,0,1,2) == 24.0 && -// outputs(0,0,2,0) == 86.5 && -// outputs(0,0,2,1) == 33.0 && -// outputs(0,0,2,2) == 27.0, LOG); - - // Test - -// inputs.resize(({1,1,4,4})); -// inputs(0,0,0,0) = type(1); -// inputs(0,0,0,1) = 2.0; -// inputs(0,0,0,2) = 3.0; -// inputs(0,0,0,3) = 4.0; -// inputs(0,0,1,0) = 16.0; -// inputs(0,0,1,1) = 9.0; -// inputs(0,0,1,2) = 4.0; -// inputs(0,0,1,3) = type(1); -// inputs(0,0,2,0) = type(1); -// inputs(0,0,2,1) = 8.0; -// inputs(0,0,2,2) = 27.0; -// inputs(0,0,2,3) = 64.0; -// inputs(0,0,3,0) = 256.0; -// inputs(0,0,3,1) = 81.0; -// inputs(0,0,3,2) = 16.0; -// inputs(0,0,3,3) = type(1); - -// pooling_layer.set_pool_size(3, 3); -// pooling_layer.set_row_stride(1); -// pooling_layer.set_column_stride(1); - -// outputs = pooling_layer.calculate_average_pooling_outputs(inputs); - -// assert_true(outputs.dimension(0) == 1 && -// outputs.dimension(1) == 1 && -// outputs.dimension(2) == 2 && -// outputs.dimension(3) == 2 && -// outputs(0,0,0,0) - 7.8888 < 0.001 && -// outputs(0,0,0,1) - 13.5555 < 0.001 && -// outputs(0,0,1,0) - 46.4444 < 0.001 && -// outputs(0,0,1,1) - 23.4444 < 0.001, LOG); - - // input_dimensions - - const Index input_images = 1; - const Index channels = 1; - - const Index input_heigth = 4; - const Index input_width = 4; - - //pooling dimensions - - const Index pool_height = 2; - const Index pool_width = 2; - - //stride - - const Index row_stride = 1; - const Index column_stride = 1; - - //output dimensions - - const Index output_height = (input_heigth - pool_height)/row_stride + 1; - const Index output_width = (input_width - pool_width)/column_stride +1; - - inputs.resize(input_heigth, input_width, channels, input_images); - outputs.resize(output_height, output_width, channels, input_images); - - inputs.setRandom(); - - //pooling average - - Index column = 0; - Index row = 0; - - for(int i = 0; i bmp_image_1; + Tensor bmp_image_2; + + PoolingLayer pooling_layer; + PoolingLayer pooling_layer_2; + + dimensions input_dimensions; + dimensions pool_dimensions; + + bmp_image_1 = read_bmp_image("../examples/mnist/data/images/one/1_0.bmp"); + bmp_image_2 = read_bmp_image("../examples/mnist/data/images/one/1_1.bmp"); + + Index input_height = bmp_image_1.dimension(0); // 28 + Index input_width = bmp_image_1.dimension(1); // 28 + Index input_channels = bmp_image_1.dimension(2); // 1 + + Index pool_height = 27; + Index pool_width = 27; + + pair outputs_pair; + pair outputs_pair_2; + + input_dimensions = { input_height, input_width, input_channels }; + + pool_dimensions = { pool_height, pool_width }; + + pooling_layer.set(input_dimensions, pool_dimensions); + + PoolingLayerForwardPropagation pooling_layer_forward_propagation(images_number, &pooling_layer); + + Tensor inputs(images_number, + input_height, + input_width, + input_channels); + + // Copy bmp_image data into inputs + for (int h = 0; h < input_height; ++h) + { + for (int w = 0; w < input_width; ++w) + { + for (int c = 0; c < input_channels; ++c) + { + inputs(0, h, w, c) = type(bmp_image_1(h, w, c)); + } + } + } + + // Copy bmp_image_2 data into inputs + for (int h = 0; h < input_height; ++h) { - for(int c = 0; c < channels; c++) + for (int w = 0; w < input_width; ++w) { - for(int k = 0; k < output_width; k++) + for (int c = 0; c < input_channels; ++c) { - for(int l = 0; l < output_height; l++) - { - float tmp_result = 0; + inputs(1, h, w, c) = type(bmp_image_2(h, w, c)); + } + } + } + + pooling_layer.forward_propagate_max_pooling(inputs, + &pooling_layer_forward_propagation, + is_training); + + outputs_pair = pooling_layer_forward_propagation.get_outputs_pair(); + + assert_true(outputs_pair.second.size() == input_dimensions.size() + 1, LOG); + + type* outputs_data = outputs_pair.first; + + TensorMap> outputs(outputs_data, + outputs_pair.second[0], + outputs_pair.second[1], + outputs_pair.second[2], + outputs_pair.second[3]); + + assert_true(outputs(0, 0, 0, 0) == type(255) + && outputs(1, 0, 0, 0) == type(254), LOG); + + //cout << "outputs:" << endl << "Image 1 (0,0,0): " << round(outputs(0, 0, 0, 0)) << endl << "Image 2 (0,0,0): " << round(outputs(1, 0, 0, 0)) << endl; + + // 2 images 3 channels + + bmp_image_1 = read_bmp_image("../examples/mnist/data/test/4x4_0.bmp"); + bmp_image_2 = read_bmp_image("../examples/mnist/data/test/4x4_1.bmp"); + + input_height = bmp_image_1.dimension(0); // 4 + input_width = bmp_image_1.dimension(1); // 4 + input_channels = bmp_image_1.dimension(2); // 3 + + input_dimensions = { input_height, input_width, input_channels }; + + pool_height = 2; + pool_width = 2; - for(int m = 0; m < pool_width; m++) - { - column = m*column_stride + k; + pool_dimensions = { pool_height, pool_width }; - for(int n = 0; n < pool_height; n++) - { - row = n*row_stride + l; + pooling_layer_2.set(input_dimensions, pool_dimensions); - tmp_result += inputs(row,column,c,i); - } - } + PoolingLayerForwardPropagation pooling_layer_forward_propagation_2(images_number, &pooling_layer_2); - outputs(l,k,c,i) = tmp_result/(pool_width*pool_height); - } + inputs.resize(images_number, + input_height, + input_width, + input_channels); + + // Copy bmp_image data into inputs + for (int h = 0; h < input_height; ++h) + { + for (int w = 0; w < input_width; ++w) + { + for (int c = 0; c < input_channels; ++c) + { + inputs(0, h, w, c) = type(bmp_image_1(h, w, c)); } } } -} -*/ + // Copy bmp_image_2 data into inputs + for (int h = 0; h < input_height; ++h) + { + for (int w = 0; w < input_width; ++w) + { + for (int c = 0; c < input_channels; ++c) + { + inputs(1, h, w, c) = type(bmp_image_2(h, w, c)); + } + } + } + + pooling_layer_2.forward_propagate_max_pooling(inputs, + &pooling_layer_forward_propagation_2, + is_training); + + outputs_pair_2 = pooling_layer_forward_propagation_2.get_outputs_pair(); + + assert_true(outputs_pair_2.second.size() == input_dimensions.size() + 1, LOG); + + type* outputs_data_2 = outputs_pair_2.first; + + TensorMap> outputs_2(outputs_data_2, + outputs_pair_2.second[0], + outputs_pair_2.second[1], + outputs_pair_2.second[2], + outputs_pair_2.second[3]); + //Image 1: + assert_true(outputs_2(0, 0, 0, 0) == type(255) + && outputs_2(0, 0, 0, 1) == type(255) + && outputs_2(0, 0, 0, 2) == type(255) + && outputs_2(0, 0, 1, 0) == type(255) + && outputs_2(0, 0, 1, 1) == type(255) + && outputs_2(0, 0, 1, 2) == type(255) + && outputs_2(0, 0, 2, 0) == type(0) + && outputs_2(0, 0, 2, 1) == type(0) + && outputs_2(0, 0, 2, 2) == type(0) + // Image 2: + && outputs_2(1, 0, 0, 0) == type(0) + && outputs_2(1, 0, 0, 1) == type(0) + && outputs_2(1, 0, 0, 2) == type(0) + && outputs_2(1, 0, 1, 0) == type(255) + && outputs_2(1, 0, 1, 1) == type(255) + && outputs_2(1, 0, 1, 2) == type(255) + && outputs_2(1, 0, 2, 0) == type(255) + && outputs_2(1, 0, 2, 1) == type(255) + && round(outputs_2(1, 0, 2, 2)) == type(255), LOG); -void PoolingLayerTest::test_forward_propagate_max_pooling() -{ - cout << "test_forward_propagate_max_pooling" << endl; } @@ -216,71 +227,70 @@ void PoolingLayerTest::test_forward_propagate_average_pooling() { cout << "test_forward_propagate_average_pooling" << endl; - const Index batch_samples_number = 1; + // 2 images 1 channel + + bool is_training = true; - const Index input_channels = 3; - const Index input_height = 5; - const Index input_width = 5; + const Index images_number = 2; - const Index pool_height = 2; - const Index pool_width = 2; + Tensor bmp_image_1; + Tensor bmp_image_2; - const Index targets_number = 1; + PoolingLayer pooling_layer; + PoolingLayer pooling_layer_2; dimensions input_dimensions; dimensions pool_dimensions; - dimensions output_dimensions; - pair outputs_pair; + bmp_image_1 = read_bmp_image("../examples/mnist/data/images/one/1_0.bmp"); + bmp_image_2 = read_bmp_image("../examples/mnist/data/images/one/1_1.bmp"); - ImageDataSet image_data_set(batch_samples_number, - input_channels, - input_height, - input_width, - targets_number); + Index input_height = bmp_image_1.dimension(0); // 28 + Index input_width = bmp_image_1.dimension(1); // 28 + Index input_channels = bmp_image_1.dimension(2); // 1 - image_data_set.set_data_constant(type(1)); + Index pool_height = 27; + Index pool_width = 27; - input_dimensions = {input_channels, - input_height, - input_width}; + pair outputs_pair; + pair outputs_pair_2; - pool_dimensions = {pool_height, - pool_width}; + input_dimensions = { input_height, input_width, input_channels }; - bool is_training = true; + pool_dimensions = { pool_height, pool_width }; - PoolingLayer pooling_layer(input_dimensions, pool_dimensions); + pooling_layer.set(input_dimensions, pool_dimensions); - PoolingLayerForwardPropagation pooling_layer_forward_propagation(batch_samples_number, &pooling_layer); + PoolingLayerForwardPropagation pooling_layer_forward_propagation(images_number, &pooling_layer); - Tensor inputs(batch_samples_number, - input_channels, + Tensor inputs(images_number, input_height, - input_width); - - inputs.setValues({{ - {{0.0, 1.0, 2.0, 2.0, 2.0}, - {0.0, 1.0, 2.0, 2.0, 2.0}, - {0.0, 1.0, 2.0, 2.0, 2.0}, - {0.0, 1.0, 2.0, 2.0, 2.0}, - {0.0, 1.0, 2.0, 2.0, 2.0} - }, + input_width, + input_channels); + + // Copy bmp_image data into inputs + for (int h = 0; h < input_height; ++h) + { + for (int w = 0; w < input_width; ++w) { - {0.0, 1.0, 2.0, 2.0, 2.0}, - {0.0, 1.0, 2.0, 2.0, 2.0}, - {0.0, 1.0, 2.0, 2.0, 2.0}, - {0.0, 1.0, 2.0, 2.0, 2.0}, - {0.0, 1.0, 2.0, 2.0, 2.0} - }, + for (int c = 0; c < input_channels; ++c) + { + inputs(0, h, w, c) = type(bmp_image_1(h, w, c)); + } + } + } + + // Copy bmp_image_2 data into inputs + for (int h = 0; h < input_height; ++h) + { + for (int w = 0; w < input_width; ++w) { - {0.0, 1.0, 2.0, 2.0, 2.0}, - {0.0, 1.0, 2.0, 2.0, 2.0}, - {0.0, 1.0, 2.0, 2.0, 2.0}, - {0.0, 1.0, 2.0, 2.0, 2.0}, - {0.0, 1.0, 2.0, 2.0, 2.0} + for (int c = 0; c < input_channels; ++c) + { + inputs(1, h, w, c) = type(bmp_image_2(h, w, c)); + } } - }}); + } pooling_layer.forward_propagate_average_pooling(inputs, &pooling_layer_forward_propagation, @@ -288,207 +298,108 @@ void PoolingLayerTest::test_forward_propagate_average_pooling() outputs_pair = pooling_layer_forward_propagation.get_outputs_pair(); - assert_true(outputs_pair.second.size() == input_dimensions.size(), LOG); + assert_true(outputs_pair.second.size() == input_dimensions.size() + 1, LOG); - for(Index i = 0; i < static_cast(output_dimensions.size()); i++) - { -// assert_true(outputs_pair.second.dimensions(i) <= input_dimensions(i), LOG); - } -/* - type* outputs_data = pooling_layer_forward.outputs_data(0); + type* outputs_data = outputs_pair.first; TensorMap> outputs(outputs_data, - output_dimensions[0], - output_dimensions(1), - output_dimensions(2), - output_dimensions(3)); + outputs_pair.second[0], + outputs_pair.second[1], + outputs_pair.second[2], + outputs_pair.second[3]); - Tensor batch = outputs.chip(0,0); + assert_true(round(outputs(0, 0, 0, 0)) == type(14) + && round(outputs(1, 0, 0, 0)) == type(19), LOG); - cout << "1 single channel: " << endl << batch.chip(0,0) << endl; + //cout << "outputs:" << endl << "Image 1 (0,0,0): " << round(outputs(0, 0, 0, 0)) << endl << "Image 2 (0,0,0): " << round(outputs(1, 0, 0, 0)) << endl; - cout << "outputs: " << endl << outputs << endl; -*/ -} + // 2 images 3 channels -/* -void PoolingLayerTest::test_calculate_max_pooling_outputs() -{ - cout << "test_calculate_max_pooling_outputs\n"; - -// Tensor inputs; -// Tensor outputs; - - // Test - -// inputs.resize(({6,6,6,6})); - -// pooling_layer.set_pool_size(1,1); -// pooling_layer.set_row_stride(1); -// pooling_layer.set_column_stride(1); - -// outputs = pooling_layer.calculate_max_pooling_outputs(inputs); - -// assert_true(outputs.dimension(0) == 6 && -// outputs.dimension(1) == 6 && -// outputs.dimension(2) == 6 && -// outputs.dimension(3) == 6, LOG); - - // Test - -// inputs.resize(({10,3,20,20})); - -// pooling_layer.set_pool_size(2,2); -// pooling_layer.set_row_stride(1); -// pooling_layer.set_column_stride(1); - -// outputs = pooling_layer.calculate_max_pooling_outputs(inputs); - -// assert_true(outputs.dimension(0) == 10 && -// outputs.dimension(1) == 3 && -// outputs.dimension(2) == 19 && -// outputs.dimension(3) == 19, LOG); - - // Test - -// inputs.resize(({1,1,4,4})); -// inputs(0,0,0,0) = type(1); -// inputs(0,0,0,1) = 2.0; -// inputs(0,0,0,2) = 3.0; -// inputs(0,0,0,3) = 4.0; -// inputs(0,0,1,0) = 16.0; -// inputs(0,0,1,1) = 9.0; -// inputs(0,0,1,2) = 4.0; -// inputs(0,0,1,3) = type(1); -// inputs(0,0,2,0) = type(1); -// inputs(0,0,2,1) = 8.0; -// inputs(0,0,2,2) = 27.0; -// inputs(0,0,2,3) = 64.0; -// inputs(0,0,3,0) = 256.0; -// inputs(0,0,3,1) = 81.0; -// inputs(0,0,3,2) = 16.0; -// inputs(0,0,3,3) = type(1); - -// pooling_layer.set_pool_size(2, 2); -// pooling_layer.set_row_stride(1); -// pooling_layer.set_column_stride(1); - -// outputs = pooling_layer.calculate_max_pooling_outputs(inputs); - -// assert_true(outputs.dimension(0) == 1 && -// outputs.dimension(1) == 1 && -// outputs.dimension(2) == 3 && -// outputs.dimension(3) == 3 && -// outputs(0,0,0,0) == 16.0 && -// outputs(0,0,0,1) == 9.0 && -// outputs(0,0,0,2) == 4.0 && -// outputs(0,0,1,0) == 16.0 && -// outputs(0,0,1,1) == 27.0 && -// outputs(0,0,1,2) == 64.0 && -// outputs(0,0,2,0) == 256.0 && -// outputs(0,0,2,1) == 81.0 && -// outputs(0,0,2,2) == 64.0, LOG); - - // Test - -// inputs.resize(({1,1,4,4})); -// inputs(0,0,0,0) = type(1); -// inputs(0,0,0,1) = 2.0; -// inputs(0,0,0,2) = 3.0; -// inputs(0,0,0,3) = 4.0; -// inputs(0,0,1,0) = -16.0; -// inputs(0,0,1,1) = -9.0; -// inputs(0,0,1,2) = -4.0; -// inputs(0,0,1,3) = -1.0; -// inputs(0,0,2,0) = type(1); -// inputs(0,0,2,1) = 8.0; -// inputs(0,0,2,2) = 27.0; -// inputs(0,0,2,3) = 64.0; -// inputs(0,0,3,0) = -256.0; -// inputs(0,0,3,1) = -81.0; -// inputs(0,0,3,2) = -16.0; -// inputs(0,0,3,3) = -1.0; - -// pooling_layer.set_pool_size(3, 3); -// pooling_layer.set_row_stride(1); -// pooling_layer.set_column_stride(1); - -// outputs = pooling_layer.calculate_max_pooling_outputs(inputs); - -// assert_true(outputs.dimension(0) == 1 && -// outputs.dimension(1) == 1 && -// outputs.dimension(2) == 2 && -// outputs.dimension(3) == 2 && -// outputs(0,0,0,0) == 27.0 && -// outputs(0,0,0,1) == 64.0 && -// outputs(0,0,1,0) == 27.0 && -// outputs(0,0,1,1) == 64.0, LOG); - - cout << "test_calculate_max_pooling_outputs\n"; - - //input_dimensions - const Index input_images = 1; - const Index channels = 1; - - const Index input_heigth = 4; - const Index input_width = 4; - - //pooling dimensions - const Index pool_height = 2; - const Index pool_width = 2; - - //stride - const Index row_stride = 1; - const Index column_stride = 1; - - //output dimensions - - const Index output_height = (input_heigth - pool_height)/row_stride + 1; - const Index output_width = (input_width - pool_width)/column_stride +1; - - Tensor inputs(input_heigth, input_width, channels, input_images); - Tensor outputs(output_height, output_width, channels, input_images); - - inputs.setRandom(); - - //pooling average - - Index column = 0; - Index row = 0; - - for(int i = 0; i < input_images; i++) - { - for(int c = 0; c < channels; c++) - { - for(int k = 0; k < output_width; k++) - { - for(int l = 0; l < output_height; l++) - { - float tmp_result = 0; + bmp_image_1 = read_bmp_image("../examples/mnist/data/test/4x4_0.bmp"); + bmp_image_2 = read_bmp_image("../examples/mnist/data/test/4x4_1.bmp"); + + input_height = bmp_image_1.dimension(0); // 4 + input_width = bmp_image_1.dimension(1); // 4 + input_channels = bmp_image_1.dimension(2); // 3 + + input_dimensions = { input_height, input_width, input_channels }; - float final_result = 0; + pool_height = 2; + pool_width = 2; - for(int m = 0; m < pool_width; m++) - { - column = m*column_stride + k; + pool_dimensions = { pool_height, pool_width }; - for(int n = 0; n < pool_height; n++) - { - row = n*row_stride + l; + pooling_layer_2.set(input_dimensions, pool_dimensions); - tmp_result = inputs(row,column,c,i); + PoolingLayerForwardPropagation pooling_layer_forward_propagation_2(images_number, &pooling_layer_2); - if(tmp_result > final_result) final_result = tmp_result; - } - } + inputs.resize(images_number, + input_height, + input_width, + input_channels); - outputs(l,k,c,i) = final_result; - } + // Copy bmp_image data into inputs + for (int h = 0; h < input_height; ++h) + { + for (int w = 0; w < input_width; ++w) + { + for (int c = 0; c < input_channels; ++c) + { + inputs(0, h, w, c) = type(bmp_image_1(h, w, c)); } } } + + // Copy bmp_image_2 data into inputs + for (int h = 0; h < input_height; ++h) + { + for (int w = 0; w < input_width; ++w) + { + for (int c = 0; c < input_channels; ++c) + { + inputs(1, h, w, c) = type(bmp_image_2(h, w, c)); + } + } + } + + pooling_layer_2.forward_propagate_average_pooling(inputs, + &pooling_layer_forward_propagation_2, + is_training); + + outputs_pair_2 = pooling_layer_forward_propagation_2.get_outputs_pair(); + + assert_true(outputs_pair_2.second.size() == input_dimensions.size() + 1, LOG); + + type* outputs_data_2 = outputs_pair_2.first; + + TensorMap> outputs_2(outputs_data_2, + outputs_pair_2.second[0], + outputs_pair_2.second[1], + outputs_pair_2.second[2], + outputs_pair_2.second[3]); + //Image 1: + assert_true(outputs_2(0, 0, 0, 0) == type(255) + && outputs_2(0, 0, 0, 1) == type(255) + && outputs_2(0, 0, 0, 2) == type(255) + && outputs_2(0, 0, 1, 0) == type(127.5) + && outputs_2(0, 0, 1, 1) == type(127.5) + && outputs_2(0, 0, 1, 2) == type(127.5) + && outputs_2(0, 0, 2, 0) == type(0) + && outputs_2(0, 0, 2, 1) == type(0) + && outputs_2(0, 0, 2, 2) == type(0) + // Image 2: + && outputs_2(1, 0, 0, 0) == type(0) + && outputs_2(1, 0, 0, 1) == type(0) + && outputs_2(1, 0, 0, 2) == type(0) + && outputs_2(1, 0, 1, 0) == type(127.5) + && outputs_2(1, 0, 1, 1) == type(127.5) + && outputs_2(1, 0, 1, 2) == type(127.5) + && outputs_2(1, 0, 2, 0) == type(255) + && outputs_2(1, 0, 2, 1) == type(255) + && round(outputs_2(1, 0, 2, 2)) == type(255), LOG); + } -*/ + void PoolingLayerTest::run_test_case() { From 8106eaf5fa4003bd69c0618b84a84902365e65b1 Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Tue, 30 Jul 2024 12:31:36 -0500 Subject: [PATCH 22/38] Minor --- opennn/strings_utilities.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/opennn/strings_utilities.cpp b/opennn/strings_utilities.cpp index 2030da35f..e764f174c 100644 --- a/opennn/strings_utilities.cpp +++ b/opennn/strings_utilities.cpp @@ -2681,8 +2681,8 @@ void delete_short_long_words(Tensor,1>& documents_words, for(Index j = 0; j < documents_words(i).size(); j++) { - if(documents_words(i)(j).length() >= minimum_length - && documents_words(i)(j).length() <= maximum_length) + if(static_cast(documents_words(i)(j).length()) >= minimum_length + && static_cast(documents_words(i)(j).length()) <= maximum_length) { push_back_string(new_document_words, documents_words(i)(j)); } From 864bfbc984049ce7f302874ebd3f7eb35b664bb0 Mon Sep 17 00:00:00 2001 From: Lenny Date: Tue, 30 Jul 2024 20:10:41 -0500 Subject: [PATCH 23/38] merge --- opennn/strings_utilities.cpp | 252 +++++++++++++++++++++++------------ opennn/strings_utilities.h | 16 +++ opennn/text_data_set.cpp | 161 +++++----------------- opennn/text_data_set.h | 4 +- 4 files changed, 218 insertions(+), 215 deletions(-) diff --git a/opennn/strings_utilities.cpp b/opennn/strings_utilities.cpp index e764f174c..1c4cdac7f 100644 --- a/opennn/strings_utilities.cpp +++ b/opennn/strings_utilities.cpp @@ -563,15 +563,15 @@ bool starts_with(const string& word, const string& starting) /// @param word Word to check. /// @param ending Substring to comparison given word. -bool ends_with(const string& word, const string& ending) -{ - if(ending.length() > word.length()) - { - return false; - } +//bool ends_with(const string& word, const string& ending) +//{ +// if(ending.length() > word.length()) +// { +// return false; +// } - return(word.substr(word.length() - ending.length()) == ending); -} +// return(word.substr(word.length() - ending.length()) == ending); +//} /// Returns true if a word ending with a given substring Tensor, and false otherwise. @@ -2593,77 +2593,6 @@ void delete_words(Tensor, 1>& tokens, const Tensor& } - - -/* -/// Returns the language selected. - -Language get_language() -{ - return lang; -} - - -/// Returns the language selected in string format. - -string get_language_string() -{ - if(lang == ENG) - { - return "ENG"; - } - else if(lang == SPA) - { - return "SPA"; - } - else - { - return string(); - } -} - - -/// Returns the stop words. - -Tensor get_stop_words() -{ - return stop_words; -} - - -/// Sets a stop words. -/// @param new_stop_words String Tensor with the new stop words. - -void set_stop_words(const Tensor& new_stop_words) -{ - stop_words = new_stop_words; -} - - -void set_separator(const string& new_separator) -{ - if(new_separator == "Semicolon") - { - separator = ";"; - } - else if(new_separator == "Tab") - { - separator = "\t"; - } - else - { - ostringstream buffer; - - buffer << "OpenNN Exception: TextAnalytics class.\n" - << "void set_separator(const string&) method.\n" - << "Unknown separator: " << new_separator << ".\n"; - - throw runtime_error(buffer.str()); - } -} -*/ - - /// Delete short words from the documents /// @param minimum_length Minimum length of the words that new documents must have(including herself) @@ -2677,18 +2606,20 @@ void delete_short_long_words(Tensor,1>& documents_words, for(Index i = 0; i < documents_number; i++) { - Tensor new_document_words; - for(Index j = 0; j < documents_words(i).size(); j++) { +<<<<<<< HEAD if(static_cast(documents_words(i)(j).length()) >= minimum_length && static_cast(documents_words(i)(j).length()) <= maximum_length) +======= + const Index length = documents_words(i)(j).length(); + + if(length >= minimum_length || length <= maximum_length) +>>>>>>> dev { - push_back_string(new_document_words, documents_words(i)(j)); + documents_words(i)(j).clear(); } } - - documents_words(i) = new_document_words; } } @@ -3748,6 +3679,159 @@ void print_tokens(const Tensor,1>& tokens) } } +bool is_vowel(char ch) +{ + return ch == 'a' || ch == 'e' || ch == 'i' || ch == 'o' || ch == 'u'; +} + +bool ends_with(const string& word, const string& suffix) +{ + return word.length() >= suffix.length() && word.compare(word.length() - suffix.length(), suffix.length(), suffix) == 0; +} + +int measure(const string& word) +{ + int count = 0; + bool vowel_seen = false; + for (char ch : word) + { + if(is_vowel(ch)) + { + vowel_seen = true; + } else if(vowel_seen) + { + count++; + vowel_seen = false; + } + } + + return count; +} + + +bool contains_vowel(const string& word) +{ + for (char ch : word) + { + if(is_vowel(ch)) + return true; + } + + return false; +} + + +bool is_double_consonant(const string& word) +{ + if(word.length() < 2) return false; + + char last = word[word.length() - 1]; + + char second_last = word[word.length() - 2]; + + return last == second_last && !is_vowel(last); +} + + +bool is_consonant_vowel_consonant(const string& word) +{ + if(word.length() < 3) return false; + char last = word[word.length() - 1]; + char second_last = word[word.length() - 2]; + char third_last = word[word.length() - 3]; + return !is_vowel(last) && is_vowel(second_last) && !is_vowel(third_last) && last != 'w' && last != 'x' && last != 'y'; +} + + +// Porter Stemmer algorithm + +string stem(const string& word) +{ + string result = word; + + if(result.length() <= 2) return result; + + // Convert to lowercase + transform(result.begin(), result.end(), result.begin(), ::tolower); + + // Step 1a + if(ends_with(result, "sses")) + { + result = result.substr(0, result.length() - 2); + } + else if(ends_with(result, "ies")) + { + result = result.substr(0, result.length() - 2); + } + else if(ends_with(result, "ss")) + { + // Do nothing + } + else if(ends_with(result, "s")) + { + result = result.substr(0, result.length() - 1); + } + + // Step 1b + + if(ends_with(result, "eed")) + { + if(measure(result.substr(0, result.length() - 3)) > 0) + { + result = result.substr(0, result.length() - 1); + } + } + else if((ends_with(result, "ed") || ends_with(result, "ing")) + && contains_vowel(result.substr(0, result.length() - 2))) + { + result = result.substr(0, result.length() - (ends_with(result, "ed") ? 2 : 3)); + if(ends_with(result, "at") || ends_with(result, "bl") || ends_with(result, "iz")) + { + result += "e"; + } + else if(is_double_consonant(result)) + { + result = result.substr(0, result.length() - 1); + } + else if(measure(result) == 1 && is_consonant_vowel_consonant(result)) + { + result += "e"; + } + } + + // Step 1c + + if(ends_with(result, "y") && contains_vowel(result.substr(0, result.length() - 1))) + { + result[result.length() - 1] = 'i'; + } + + // Additional steps can be added here following the Porter Stemmer algorithm + + return result; +} + + +void stem(Tensor& words) +{ + + for(Index i = 0; i < words.size(); i++) + { + words(i) = stem(words(i)); + } +} + + +void stem(Tensor, 1>& words) +{ +#pragma omp parallel for + + for(Index i = 0; i < words.size(); i++) + { + stem(words(i)); + } +} + } // OpenNN: Open Neural Networks Library. diff --git a/opennn/strings_utilities.h b/opennn/strings_utilities.h index 2fda406e6..e923fa058 100644 --- a/opennn/strings_utilities.h +++ b/opennn/strings_utilities.h @@ -265,6 +265,22 @@ namespace opennn // Tensor top_words_correlations(const Tensor, 1>&, const double&, const Tensor&); + bool is_vowel(char); + + bool ends_with(const string&, const string&); + + int measure(const string&); + + bool contains_vowel(const string&); + + bool is_double_consonant(const string&); + + bool is_consonant_vowel_consonant(const string&); + + string stem(const string&); + void stem(Tensor&); + void stem(Tensor, 1>&); + void print_tokens(const Tensor,1>&); } diff --git a/opennn/text_data_set.cpp b/opennn/text_data_set.cpp index 9513754c6..9be0dcd00 100644 --- a/opennn/text_data_set.cpp +++ b/opennn/text_data_set.cpp @@ -1267,7 +1267,7 @@ Tensor TextDataSet::sentence_to_data(const string& sentence) const /// Reduces inflected (or sometimes derived) words to their word stem, base or root form (english language). /// @param tokens -Tensor,1> apply_english_stemmer(const Tensor,1>& tokens) +Tensor,1> stem(const Tensor,1>& tokens) { const Index documents_number = tokens.size(); @@ -1276,15 +1276,12 @@ Tensor,1> apply_english_stemmer(const Tensor,1 // Set vowels and suffixes Tensor vowels(6); - vowels.setValues({"a","e","i","o","u","y"}); Tensor double_consonants(9); - double_consonants.setValues({"bb", "dd", "ff", "gg", "mm", "nn", "pp", "rr", "tt"}); Tensor li_ending(10); - li_ending.setValues({"c", "d", "e", "g", "h", "k", "m", "n", "r", "t"}); const Index step0_suffixes_size = 3; @@ -1309,8 +1306,30 @@ Tensor,1> apply_english_stemmer(const Tensor,1 Tensor step2_suffixes(step2_suffixes_size); - step2_suffixes.setValues({"ization", "ational", "fulness", "ousness", "iveness", "tional", "biliti", "lessli", "entli", "ation", "alism", - "aliti", "ousli", "iviti", "fulli", "enci", "anci", "abli", "izer", "ator", "alli", "bli", "ogi", "li"}); + step2_suffixes.setValues({"ization", + "ational", + "fulness", + "ousness", + "iveness", + "tional", + "biliti", + "lessli", + "entli", + "ation", + "alism", + "aliti", + "ousli", + "iviti", + "fulli", + "enci", + "anci", + "abli", + "izer", + "ator", + "alli", + "bli", + "ogi", + "li"}); const Index step3_suffixes_size = 9; @@ -1325,7 +1344,7 @@ Tensor,1> apply_english_stemmer(const Tensor,1 step4_suffixes.setValues({"ement", "ance", "ence", "able", "ible", "ment", "ant", "ent", "ism", "ate", "iti", "ous", "ive", "ize", "ion", "al", "er", "ic"}); - Tensor special_words(40,2); + Tensor special_words(40,2); special_words(0,0) = "skis"; special_words(0,1) = "ski"; special_words(1,0) = "skies"; special_words(1,1) = "sky"; @@ -1383,18 +1402,18 @@ Tensor,1> apply_english_stemmer(const Tensor,1 trim(current_word); - if( contains(special_words.chip(0,1),current_word)) + if(contains(special_words.chip(0,1),current_word)) { auto it = find(special_words.data(), special_words.data() + special_words.size(), current_word); - Index word_index = it - special_words.data(); + const Index word_index = it - special_words.data(); - current_document(j) = special_words(word_index,1); + current_document(j) = special_words(word_index, 1); break; } - if(starts_with(current_word,"'")) + if(starts_with(current_word, "'")) { current_word = current_word.substr(1); } @@ -2025,12 +2044,12 @@ void TextDataSet::read_txt() //delete_emails(documents_words); - //tokens = apply_stemmer(documents_words); deleted recover from git - //delete_numbers(documents_words); //delete_blanks(documents_words); + documents_words = stem(documents_words); + print_tokens(documents_words); /* Tensor, 1> targets; @@ -2141,122 +2160,6 @@ void TextDataSet::read_txt() */ } - -void TextDataSet::load_documents() -{ - if(data_source_path.empty()) - { - ostringstream buffer; - - buffer << "OpenNN Exception: TextDataSet class.\n" - << "void load_documents() method.\n" - << "Data file name is empty.\n"; - - throw runtime_error(buffer.str()); - } - - ifstream file(data_source_path.c_str()); - - if(!file.is_open()) - { - ostringstream buffer; - - buffer << "OpenNN Exception: TextDataSet class.\n" - << "void load_documents() method.\n" - << "Cannot open documents file: " << data_source_path << "\n"; - - throw runtime_error(buffer.str()); - } - - Index lines_count = 0; - - string line; - - while(file.good()) - { - getline(file, line); - trim(line); - erase(line, '"'); - - if(line.empty()) continue; - - lines_count++; - - if(file.peek() == EOF) break; - } - - file.seekg (0, ios::beg); - - cout << lines_count << endl; - - Tensor documents(lines_count); - Tensor targets(lines_count); - - Index tokens_number = 0; - - string delimiter = ""; - const string separator = get_separator_string(); - - Tensor tokens; - - while(file.good()) - { - getline(file, line); - - if(line.empty()) continue; - - if(line[0] == '"') - { - replace(line, "\"\"", "\""); - - line = "\"" + line; - - delimiter = "\"\""; - } - - if(line.find("\"" + separator) != string::npos) - replace(line, "\"" + separator, "\"\"" + separator); - - const Tensor tokens = get_tokens(line, delimiter + separator); - - tokens_number = tokens.size(); - - if(tokens_number == 1) - { - if(tokens(0).find(delimiter, 0) == 0) - documents(lines_count) += tokens(0).substr(delimiter.length(), tokens(0).size()); - else - documents(lines_count) += " " + tokens(0); - } - else if(tokens_number == 2) - { - if(tokens(0).empty() && tokens(1).empty()) continue; - - documents(lines_count) += " " + tokens(0); - - targets(lines_count) += tokens(1); - - delimiter = ""; - - lines_count++; - } - else if(tokens_number > 2) - { - ostringstream buffer; - - buffer << "OpenNN Exception: TextAnalytics class.\n" - << "void load_documents() method.\n" - << "Found more than one separator in line: " << line << "\n"; - - throw runtime_error(buffer.str()); - } - - if(file.peek() == EOF) break; - } - - file.close(); -} - } diff --git a/opennn/text_data_set.h b/opennn/text_data_set.h index b56738a9e..f4354230f 100644 --- a/opennn/text_data_set.h +++ b/opennn/text_data_set.h @@ -52,9 +52,9 @@ class TextDataSet : public DataSet void read_txt(); - Tensor, 1> apply_english_stemmer(const Tensor, 1>&) const; + Tensor, 1> stem(const Tensor, 1>&) const; + - void load_documents(); private: From b70466b48661df5f0553ace7555d785c8a7f81ce Mon Sep 17 00:00:00 2001 From: Lenny Date: Tue, 30 Jul 2024 20:17:21 -0500 Subject: [PATCH 24/38] Merging --- opennn/strings_utilities.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/opennn/strings_utilities.cpp b/opennn/strings_utilities.cpp index 1c4cdac7f..421968654 100644 --- a/opennn/strings_utilities.cpp +++ b/opennn/strings_utilities.cpp @@ -2608,14 +2608,9 @@ void delete_short_long_words(Tensor,1>& documents_words, { for(Index j = 0; j < documents_words(i).size(); j++) { -<<<<<<< HEAD - if(static_cast(documents_words(i)(j).length()) >= minimum_length - && static_cast(documents_words(i)(j).length()) <= maximum_length) -======= const Index length = documents_words(i)(j).length(); if(length >= minimum_length || length <= maximum_length) ->>>>>>> dev { documents_words(i)(j).clear(); } From 241f4c830aacfb81fd3bd077d27a7f8b2c4b752f Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Thu, 1 Aug 2024 09:27:38 -0500 Subject: [PATCH 25/38] Cleanup --- opennn/text_data_set.cpp | 3 ++- opennn/text_data_set.h | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/opennn/text_data_set.cpp b/opennn/text_data_set.cpp index ad0b7f90b..23bb27069 100644 --- a/opennn/text_data_set.cpp +++ b/opennn/text_data_set.cpp @@ -2055,13 +2055,13 @@ void TextDataSet::read_txt() delete_emails(documents_words); delete_numbers(documents_words); + delete_blanks(documents_words); stem(documents_words); // print_tokens(documents_words); - cout << "Calculating wordbag..." << endl; const Tensor tokens = tokens_list(documents_words); @@ -2151,6 +2151,7 @@ void TextDataSet::read_txt() // for(Index i = 0; i < get_input_raw_variables_number(); i++) // set_raw_variable_type(i, RawVariableType::Numeric); + } } diff --git a/opennn/text_data_set.h b/opennn/text_data_set.h index 35bd6b693..a89ed6eca 100644 --- a/opennn/text_data_set.h +++ b/opennn/text_data_set.h @@ -55,7 +55,6 @@ class TextDataSet : public DataSet - private: // Separator text_separator = Separator::Tab; From e2fcae766101b660c5864eeac40fb018739e9923 Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Wed, 14 Aug 2024 09:39:06 -0500 Subject: [PATCH 26/38] no changes to this file needed --- opennn/strings_utilities.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opennn/strings_utilities.h b/opennn/strings_utilities.h index 10392fac1..7955b8f59 100644 --- a/opennn/strings_utilities.h +++ b/opennn/strings_utilities.h @@ -282,7 +282,7 @@ namespace opennn - void print_tokens(const Tensor,1>&); + void print_tokens(const Tensor,1>&); } #endif // OPENNNSTRINGS_H From 1551a137af181526741d930c9b1814e5512bb60a Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Wed, 14 Aug 2024 11:29:36 -0500 Subject: [PATCH 27/38] header path is corrected --- tests/minkowski_error_test.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/minkowski_error_test.cpp b/tests/minkowski_error_test.cpp index 5933c9686..010155d10 100644 --- a/tests/minkowski_error_test.cpp +++ b/tests/minkowski_error_test.cpp @@ -7,7 +7,7 @@ // artelnics@artelnics.com #include "minkowski_error_test.h" -#include "tensors.h" +#include "../opennn/tensors.h" namespace opennn { From fc11c49ed65d255f18b5f238ff3f0d4fe7eed314 Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Thu, 15 Aug 2024 13:06:07 -0500 Subject: [PATCH 28/38] Restore compilability --- .../Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h | 4 ++-- examples/forecasting/main.cpp | 2 +- opennn/learning_rate_algorithm.cpp | 1 + tests/statistics_test.cpp | 2 +- 4 files changed, 5 insertions(+), 4 deletions(-) diff --git a/eigen/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h b/eigen/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h index 23a2b5467..78eaab517 100644 --- a/eigen/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h +++ b/eigen/unsupported/Eigen/CXX11/src/ThreadPool/NonBlockingThreadPool.h @@ -352,7 +352,7 @@ class ThreadPoolTempl : public Eigen::ThreadPoolInterface { } victim += inc; if (victim >= size) { - victim -= size; + victim -= static_cast(size); } } return Task(); @@ -443,7 +443,7 @@ class ThreadPoolTempl : public Eigen::ThreadPoolInterface { } victim += inc; if (victim >= size) { - victim -= size; + victim -= static_cast(size); } } return -1; diff --git a/examples/forecasting/main.cpp b/examples/forecasting/main.cpp index 91798b400..cfb940459 100644 --- a/examples/forecasting/main.cpp +++ b/examples/forecasting/main.cpp @@ -31,7 +31,7 @@ int main() // Data set - TimeSeriesDataSet time_series_data_set("../data/Pendulum.csv", ',', false); + TimeSeriesDataSet time_series_data_set("../data/Pendulum.csv", ',', false, false); time_series_data_set.set_lags_number(5); // Not working time_series_data_set.print(); diff --git a/opennn/learning_rate_algorithm.cpp b/opennn/learning_rate_algorithm.cpp index b95a9700a..4522209f2 100644 --- a/opennn/learning_rate_algorithm.cpp +++ b/opennn/learning_rate_algorithm.cpp @@ -8,6 +8,7 @@ #include "learning_rate_algorithm.h" #include "back_propagation.h" +#include "tensors.h" namespace opennn { diff --git a/tests/statistics_test.cpp b/tests/statistics_test.cpp index 5e94d0214..8218ad1cc 100644 --- a/tests/statistics_test.cpp +++ b/tests/statistics_test.cpp @@ -7,7 +7,7 @@ // artelnics@artelnics.com #include "statistics_test.h" -#include "strings_utilities.h" +#include "../opennn/strings_utilities.h" #include "../opennn/statistics.h" #include "../opennn/histogram.h" From b50d21b723ea7966bbac5b6f8b5a7cb4f0b8b0eb Mon Sep 17 00:00:00 2001 From: Lenny Date: Sat, 17 Aug 2024 20:26:49 -0500 Subject: [PATCH 29/38] Compilable in Debug mode --- opennn/learning_rate_algorithm.cpp | 2 +- opennn/neurons_selection.cpp | 2 +- opennn/statistics.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/opennn/learning_rate_algorithm.cpp b/opennn/learning_rate_algorithm.cpp index 695d390f4..6c9b14d99 100644 --- a/opennn/learning_rate_algorithm.cpp +++ b/opennn/learning_rate_algorithm.cpp @@ -354,7 +354,7 @@ LearningRateAlgorithm::Triplet LearningRateAlgorithm::calculate_bracketing_tripl if(is_zero(optimization_data.training_direction)) throw runtime_error("Training direction is zero.\n"); - if(optimization_data.initial_learning_rate < type(NUMERIC_LIMITS_MIN)) + if(optimization_data.initial_learning_rate < type(NUMERIC_LIMITS_MIN)) { throw runtime_error("Initial learning rate is zero.\n"); } diff --git a/opennn/neurons_selection.cpp b/opennn/neurons_selection.cpp index eefd38c7e..304b7f343 100644 --- a/opennn/neurons_selection.cpp +++ b/opennn/neurons_selection.cpp @@ -180,7 +180,7 @@ void NeuronsSelection::set_maximum_neurons_number(const Index& new_maximum_neuro throw runtime_error("maximum_neurons(" + to_string(new_maximum_neurons) + ") must be greater than 0.\n"); if(new_maximum_neurons < minimum_neurons) - throw runtime_error("maximum_neurons(" + to_string(new_maximum_neurons) + ") must be equal or greater than minimum_neurons(" << to_string(minimum_neurons) + ").\n"); + throw runtime_error("maximum_neurons(" + to_string(new_maximum_neurons) + ") must be equal or greater than minimum_neurons(" + to_string(minimum_neurons) + ").\n"); #endif diff --git a/opennn/statistics.cpp b/opennn/statistics.cpp index e9221ecf1..08eef52cb 100644 --- a/opennn/statistics.cpp +++ b/opennn/statistics.cpp @@ -2165,7 +2165,7 @@ Tensor mean(const Tensor& matrix, const Tensor& row_ for(Index i = 0; i < row_indices_size; i++) { if(row_indices(i) >= rows_number) - throw runtime_error("Row index " + i + " must be less than rows number.\n"); + throw runtime_error("Row index " + to_string(i) + " must be less than rows number.\n"); } if(row_indices_size == 0) From 247491b50ded911963e5f43f51427d7b8f4b5be8 Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Mon, 16 Sep 2024 11:51:49 -0500 Subject: [PATCH 30/38] Minor change --- opennn/loss_index.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opennn/loss_index.cpp b/opennn/loss_index.cpp index d0fe36fde..cc3a27c48 100644 --- a/opennn/loss_index.cpp +++ b/opennn/loss_index.cpp @@ -925,7 +925,7 @@ Tensor LossIndex::calculate_numerical_inputs_derivatives() Index inputs_number = 1; - for(Index i = 0; i < inputs_dimensions.size(); i++) + for(size_t i = 0; i < inputs_dimensions.size(); i++) inputs_number *= inputs_dimensions[i]; inputs_number = samples_number * inputs_number; From 7a4d2d685260a6a6e25541c2b68e8f518e456ed0 Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Wed, 9 Oct 2024 16:41:15 -0500 Subject: [PATCH 31/38] Eliminate few compile-time warnings --- opennn/auto_associative_neural_network.cpp | 4 ++-- opennn/embedding_layer.cpp | 2 +- opennn/loss_index.cpp | 6 +++--- opennn/perceptron_layer_3d.cpp | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/opennn/auto_associative_neural_network.cpp b/opennn/auto_associative_neural_network.cpp index b098dae7d..47aeaea66 100644 --- a/opennn/auto_associative_neural_network.cpp +++ b/opennn/auto_associative_neural_network.cpp @@ -546,7 +546,7 @@ void AutoAssociativeNeuralNetwork::to_XML(tinyxml2::XMLPrinter& file_stream) con buffer.str(""); - for(Index i = 0; i < layers.size(); i++) + for(Index i = 0; i < static_cast(layers.size()); i++) { buffer << layers[i]->get_type_string(); if(i != layers.size()-1) buffer << " "; @@ -558,7 +558,7 @@ void AutoAssociativeNeuralNetwork::to_XML(tinyxml2::XMLPrinter& file_stream) con // Layers information - for(Index i = 0; i < layers.size(); i++) + for(Index i = 0; i < static_cast(layers.size()); i++) { layers[i]->to_XML(file_stream); } diff --git a/opennn/embedding_layer.cpp b/opennn/embedding_layer.cpp index 19234cb9b..a844ad845 100644 --- a/opennn/embedding_layer.cpp +++ b/opennn/embedding_layer.cpp @@ -324,7 +324,7 @@ void EmbeddingLayer::add_deltas(const vector>& deltas_pa TensorMap> deltas = tensor_map_3(deltas_pair[0]); - for(Index i = 1; i < deltas_pair.size(); i++) + for(Index i = 1; i < static_cast(deltas_pair.size()); i++) { const TensorMap> other_deltas = tensor_map_3(deltas_pair[i]); diff --git a/opennn/loss_index.cpp b/opennn/loss_index.cpp index 9ce806784..e1f7271a4 100644 --- a/opennn/loss_index.cpp +++ b/opennn/loss_index.cpp @@ -788,7 +788,7 @@ vector>> BackPropagation::get_layer_delta_pairs(c continue; } - for (Index j = 0; j < layer_output_indices[i].size(); j++) + for (Index j = 0; j < static_cast(layer_output_indices[i].size()); j++) { const Index output_index = layer_output_indices[i][j]; const Index input_index = neural_network_ptr->find_input_index(layer_input_indices[output_index], i); @@ -1079,7 +1079,7 @@ void BackPropagationLM::set_layer_output_indices(const vector>& la for(Index i = 0; i < layers_number; i++) { for(Index j = 0; j < layers_number; j++) - for(Index k = 0; k < layer_inputs_indices[j].size(); k++) + for(Index k = 0; k < static_cast(layer_inputs_indices[j].size()); k++) if(layer_inputs_indices[j][k] == i) layer_count++; @@ -1088,7 +1088,7 @@ void BackPropagationLM::set_layer_output_indices(const vector>& la for(Index j = 0; j < layers_number; j++) { - for(Index k = 0; k < layer_inputs_indices[j].size(); k++) + for(Index k = 0; k < static_cast(layer_inputs_indices[j].size()); k++) { if(layer_inputs_indices[j][k] == i) { diff --git a/opennn/perceptron_layer_3d.cpp b/opennn/perceptron_layer_3d.cpp index 3798631a8..fcec9cff3 100644 --- a/opennn/perceptron_layer_3d.cpp +++ b/opennn/perceptron_layer_3d.cpp @@ -481,7 +481,7 @@ void PerceptronLayer3D::add_deltas(const vector>& deltas { TensorMap> deltas = tensor_map_3(deltas_pair[0]); - for(Index i = 1; i < deltas_pair.size(); i++) + for(Index i = 1; i < static_cast(deltas_pair.size()); i++) { const TensorMap> other_deltas = tensor_map_3(deltas_pair[i]); From baad85bf111c2377131e61bd8622f8304d7269e7 Mon Sep 17 00:00:00 2001 From: Lenny Date: Thu, 10 Oct 2024 06:07:55 -0500 Subject: [PATCH 32/38] Fixing compile error --- tests/minkowski_error_test.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/minkowski_error_test.cpp b/tests/minkowski_error_test.cpp index e8bc202f0..a815275ef 100644 --- a/tests/minkowski_error_test.cpp +++ b/tests/minkowski_error_test.cpp @@ -139,7 +139,7 @@ void MinkowskiErrorTest::test_back_propagate() // Loss index - Tensor trainable_layers = neural_network.get_trainable_layers(); + auto trainable_layers = neural_network.get_trainable_layers(); back_propagation.set(samples_number, &minkowski_error); From dacc63f432dd668899440fc47bbc624c25456404 Mon Sep 17 00:00:00 2001 From: Lenny Shleymovich Date: Wed, 16 Oct 2024 13:57:05 -0500 Subject: [PATCH 33/38] warningd removed in few places --- opennn/embedding_layer.cpp | 2 +- opennn/loss_index.cpp | 2 +- opennn/perceptron_layer_3d.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/opennn/embedding_layer.cpp b/opennn/embedding_layer.cpp index 098c2d39e..43d527f7e 100644 --- a/opennn/embedding_layer.cpp +++ b/opennn/embedding_layer.cpp @@ -326,7 +326,7 @@ void EmbeddingLayer::add_deltas(const vector>& delta_pai TensorMap> deltas = tensor_map_3(delta_pairs[0]); - for(Index i = 1; i < delta_pairs.size(); i++) + for(Index i = 1; i < static_cast(delta_pairs.size()); i++) { const TensorMap> other_deltas = tensor_map_3(delta_pairs[i]); diff --git a/opennn/loss_index.cpp b/opennn/loss_index.cpp index 6adffa1bf..1b6e52e6b 100644 --- a/opennn/loss_index.cpp +++ b/opennn/loss_index.cpp @@ -678,7 +678,7 @@ vector>> BackPropagation::get_layer_delta_pairs() continue; } - for (Index j = 0; j < layer_output_indices[i].size(); j++) + for (Index j = 0; j < static_cast(layer_output_indices[i].size()); j++) { const Index output_index = layer_output_indices[i][j]; const Index input_index = neural_network_ptr->find_input_index(layer_input_indices[output_index], i); diff --git a/opennn/perceptron_layer_3d.cpp b/opennn/perceptron_layer_3d.cpp index cea6a8459..f5680eb7c 100644 --- a/opennn/perceptron_layer_3d.cpp +++ b/opennn/perceptron_layer_3d.cpp @@ -481,7 +481,7 @@ void PerceptronLayer3D::add_deltas(const vector>& delta_ { TensorMap> deltas = tensor_map_3(delta_pairs[0]); - for(Index i = 1; i < delta_pairs.size(); i++) + for(Index i = 1; i < static_cast(delta_pairs.size()); i++) { const TensorMap> other_deltas = tensor_map_3(delta_pairs[i]); From ea87a8c34cc08c90333a43f0ca76a13922972a0f Mon Sep 17 00:00:00 2001 From: Lenny Date: Sat, 19 Oct 2024 20:01:30 -0500 Subject: [PATCH 34/38] Fix for warnings --- opennn/embedding_layer.cpp | 2 +- opennn/normalization_layer_3d.cpp | 2 +- opennn/perceptron_layer_3d.cpp | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/opennn/embedding_layer.cpp b/opennn/embedding_layer.cpp index d177b90f9..593c90c9e 100644 --- a/opennn/embedding_layer.cpp +++ b/opennn/embedding_layer.cpp @@ -325,7 +325,7 @@ void EmbeddingLayer::add_deltas(const vector>& delta_pai { TensorMap> deltas = tensor_map_3(delta_pairs[0]); - for(Index i = 1; i < delta_pairs.size(); i++) + for(Index i = 1; i < static_cast(delta_pairs.size()); i++) deltas.device(*thread_pool_device) += tensor_map_3(delta_pairs[i]); } diff --git a/opennn/normalization_layer_3d.cpp b/opennn/normalization_layer_3d.cpp index 323511d3a..5161500fe 100644 --- a/opennn/normalization_layer_3d.cpp +++ b/opennn/normalization_layer_3d.cpp @@ -322,7 +322,7 @@ void NormalizationLayer3D::add_deltas(const vector>& del { TensorMap> deltas= tensor_map_3(delta_pairs[0]); - for(Index i = 1; i < delta_pairs.size(); i++) + for(Index i = 1; i < static_cast(delta_pairs.size()); i++) deltas.device(*thread_pool_device) += tensor_map_3(delta_pairs[i]); } diff --git a/opennn/perceptron_layer_3d.cpp b/opennn/perceptron_layer_3d.cpp index 9ae9b064c..89ab80925 100644 --- a/opennn/perceptron_layer_3d.cpp +++ b/opennn/perceptron_layer_3d.cpp @@ -456,7 +456,7 @@ void PerceptronLayer3D::add_deltas(const vector>& delta_ { TensorMap> deltas = tensor_map_3(delta_pairs[0]); - for(Index i = 1; i < delta_pairs.size(); i++) + for(Index i = 1; i < static_cast(delta_pairs.size()); i++) deltas.device(*thread_pool_device) += tensor_map_3(delta_pairs[i]); } From 8fb7d6a10fd38c49910777c939fc2f598f57bdf8 Mon Sep 17 00:00:00 2001 From: Lenny Date: Sat, 19 Oct 2024 20:01:55 -0500 Subject: [PATCH 35/38] Fix for error --- opennn/batch.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opennn/batch.cpp b/opennn/batch.cpp index 7bcbc5361..86f327602 100644 --- a/opennn/batch.cpp +++ b/opennn/batch.cpp @@ -27,7 +27,7 @@ void Batch::fill(const Tensor& samples_indices, { ImageDataSet* image_data_set = static_cast(data_set); // @TODO - Tensor& augmented_data = perform_augmentation(data); + const Tensor& augmented_data = perform_augmentation(data); fill_tensor_data(augmented_data, samples_indices, inputs_indices, input_data); } From 92da65f06aa593d99a98e6a60a47e4d4fb393e0e Mon Sep 17 00:00:00 2001 From: Lenny Date: Thu, 24 Oct 2024 19:02:25 -0500 Subject: [PATCH 36/38] Synchronizing --- opennn/auto_associative_neural_network.cpp | 21 ++++------- opennn/embedding_layer.cpp | 41 +++++++--------------- opennn/normalization_layer_3d.cpp | 28 ++------------- tests/genetic_algorithm_test.cpp | 6 ++-- 4 files changed, 24 insertions(+), 72 deletions(-) diff --git a/opennn/auto_associative_neural_network.cpp b/opennn/auto_associative_neural_network.cpp index ec9315439..7c971edcf 100644 --- a/opennn/auto_associative_neural_network.cpp +++ b/opennn/auto_associative_neural_network.cpp @@ -73,9 +73,7 @@ Tensor AutoAssociativeNeuralNetwork::get_multivariate_distances_box_plo Tensor minimum_distances(multivariate_distances_box_plot.size()); for(Index i = 0; i < multivariate_distances_box_plot.size(); i++) - { minimum_distances(i) = multivariate_distances_box_plot(i).minimum; - } return minimum_distances; } @@ -86,9 +84,7 @@ Tensor AutoAssociativeNeuralNetwork::get_multivariate_distances_box_plo Tensor first_quartile_distances(multivariate_distances_box_plot.size()); for(Index i = 0; i < multivariate_distances_box_plot.size(); i++) - { first_quartile_distances(i) = multivariate_distances_box_plot(i).first_quartile; - } return first_quartile_distances; } @@ -99,21 +95,18 @@ Tensor AutoAssociativeNeuralNetwork::get_multivariate_distances_box_plo Tensor median_distances(multivariate_distances_box_plot.size()); for(Index i = 0; i < multivariate_distances_box_plot.size(); i++) - { median_distances(i) = multivariate_distances_box_plot(i).median; - } return median_distances; } + Tensor AutoAssociativeNeuralNetwork::get_multivariate_distances_box_plot_third_quartile() const { Tensor third_quartile_distances(multivariate_distances_box_plot.size()); for(Index i = 0; i < multivariate_distances_box_plot.size(); i++) - { third_quartile_distances(i) = multivariate_distances_box_plot(i).third_quartile; - } return third_quartile_distances; } @@ -124,9 +117,7 @@ Tensor AutoAssociativeNeuralNetwork::get_multivariate_distances_box_plo Tensor maximum_distances(multivariate_distances_box_plot.size()); for(Index i = 0; i < multivariate_distances_box_plot.size(); i++) - { maximum_distances(i) = multivariate_distances_box_plot(i).maximum; - } return maximum_distances; } @@ -543,10 +534,12 @@ void AutoAssociativeNeuralNetwork::to_XML(tinyxml2::XMLPrinter& file_stream) con buffer.str(""); - for(Index i = 0; i < static_cast(layers.size()); i++) + for(Index i = 0; i < Index(layers.size()); i++) { buffer << layers[i]->get_type_string(); - if(i != layers.size()-1) buffer << " "; + + if(i != layers.size()-1) + buffer << " "; } file_stream.PushText(buffer.str().c_str()); @@ -555,10 +548,8 @@ void AutoAssociativeNeuralNetwork::to_XML(tinyxml2::XMLPrinter& file_stream) con // Layers information - for(Index i = 0; i < static_cast(layers.size()); i++) - { + for(Index i = 0; i < Index(layers.size()); i++) layers[i]->to_XML(file_stream); - } // Layers (end tag) diff --git a/opennn/embedding_layer.cpp b/opennn/embedding_layer.cpp index db8998786..18a1691bc 100644 --- a/opennn/embedding_layer.cpp +++ b/opennn/embedding_layer.cpp @@ -70,12 +70,6 @@ dimensions EmbeddingLayer::get_output_dimensions() const } -Tensor EmbeddingLayer::get_embedding_weights() const -{ - return embedding_weights; -} - - Index EmbeddingLayer::get_parameters_number() const { return embedding_weights.size(); @@ -224,16 +218,11 @@ void EmbeddingLayer::dropout(Tensor& outputs) const { const type scaling_factor = type(1) / (type(1) - dropout_rate); - type random; - + #pragma omp parallel for for(Index i = 0; i < outputs.size(); i++) - { - random = calculate_random_uniform(type(0), type(1)); - - outputs(i) = (random < dropout_rate) + outputs(i) = (calculate_random_uniform(type(0), type(1)) < dropout_rate) ? 0 : outputs(i) * scaling_factor; - } } @@ -241,15 +230,11 @@ void EmbeddingLayer::lookup_embedding(const Tensor& inputs, Tensor>& in const Tensor& positional_encoding = embedding_layer_forward_propagation->positional_encoding; for(Index batch_element = 0; batch_element < outputs.dimension(0); batch_element++) - { outputs.chip(batch_element, 0).device(*thread_pool_device) += positional_encoding; - } } - if(dropout_rate > 0 && is_training) dropout(outputs); + if(dropout_rate > 0 && is_training) + dropout(outputs); } @@ -311,12 +295,13 @@ void EmbeddingLayer::back_propagate(const vector>& input { if(positional_encoding) sample_deltas.device(*thread_pool_device) - = deltas.chip(i, 0) * sample_deltas.constant(sqrt(depth)); + = deltas.chip(i, 0) * sample_deltas.constant(sqrt(depth)); else sample_deltas.device(*thread_pool_device) = deltas.chip(i, 0); for(Index j = 0; j < inputs_number; j++) - embedding_weights_derivatives.chip(Index(inputs(i, j)), 0).device(*thread_pool_device) += sample_deltas.chip(j, 0); + embedding_weights_derivatives.chip(Index(inputs(i, j)), 0).device(*thread_pool_device) + += sample_deltas.chip(j, 0); } } @@ -324,13 +309,13 @@ void EmbeddingLayer::back_propagate(const vector>& input void EmbeddingLayer::add_deltas(const vector>& delta_pairs) const { TensorMap> deltas = tensor_map_3(delta_pairs[0]); - - for(Index i = 1; i < static_cast(delta_pairs.size()); i++) + + for(Index i = 1; i < Index(delta_pairs.size()); i++) deltas.device(*thread_pool_device) += tensor_map_3(delta_pairs[i]); } -void EmbeddingLayer::insert_gradient(unique_ptr back_propagation, +void EmbeddingLayer::insert_gradient(unique_ptr& back_propagation, const Index& index, Tensor& gradient) const { @@ -496,7 +481,8 @@ void EmbeddingLayerForwardPropagation::set(const Index& new_batch_samples_number outputs_data = outputs.data(); - if(embedding_layer->get_positional_encoding()) build_positional_encoding_matrix(); + if(embedding_layer->get_positional_encoding()) + build_positional_encoding_matrix(); } @@ -514,7 +500,6 @@ void EmbeddingLayerForwardPropagation::build_positional_encoding_matrix() const type half_depth = type(depth) / 2; #pragma omp parallel for - for(Index i = 0; i < inputs_number; i++) for(Index j = 0; j < Index(depth); j++) positional_encoding(i, j) = (j < Index(half_depth)) diff --git a/opennn/normalization_layer_3d.cpp b/opennn/normalization_layer_3d.cpp index 9b9323eda..845d76ef5 100644 --- a/opennn/normalization_layer_3d.cpp +++ b/opennn/normalization_layer_3d.cpp @@ -50,18 +50,6 @@ dimensions NormalizationLayer3D::get_output_dimensions() const } -const Tensor& NormalizationLayer3D::get_gammas() const -{ - return gammas; -} - - -const Tensor& NormalizationLayer3D::get_betas() const -{ - return betas; -} - - Index NormalizationLayer3D::get_gammas_number() const { return gammas.size(); @@ -144,18 +132,6 @@ void NormalizationLayer3D::set_inputs_depth(const Index& new_inputs_depth) } -void NormalizationLayer3D::set_gammas(const Tensor& new_gammas) -{ - gammas = new_gammas; -} - - -void NormalizationLayer3D::set_betas(const Tensor& new_betas) -{ - betas = new_betas; -} - - void NormalizationLayer3D::set_parameters(const Tensor& new_parameters, const Index& index) { memcpy(gammas.data(), new_parameters.data() + index, gammas.size()*sizeof(type)); @@ -322,12 +298,12 @@ void NormalizationLayer3D::add_deltas(const vector>& del { TensorMap> deltas= tensor_map_3(delta_pairs[0]); - for(Index i = 1; i < static_cast(delta_pairs.size()); i++) + for(Index i = 1; i < Index(delta_pairs.size()); i++) deltas.device(*thread_pool_device) += tensor_map_3(delta_pairs[i]); } -void NormalizationLayer3D::insert_gradient(unique_ptr back_propagation, +void NormalizationLayer3D::insert_gradient(unique_ptr& back_propagation, const Index& index, Tensor& gradient) const { diff --git a/tests/genetic_algorithm_test.cpp b/tests/genetic_algorithm_test.cpp index be5dd3c04..136830ef9 100644 --- a/tests/genetic_algorithm_test.cpp +++ b/tests/genetic_algorithm_test.cpp @@ -182,7 +182,7 @@ void GeneticAlgorithmTest::test_perform_selection() genetic_algorithm.set_fitness(fitness); selection_errors.resize(4); - selection_errors.setValues({type(0.4),type(0.3),type(0.2),type(0.1)}); + selection_errors.setValues({type(0.4), type(0.3), type(0.2), type(0.1)}); genetic_algorithm.initialize_population(); @@ -215,7 +215,7 @@ void GeneticAlgorithmTest::test_perform_selection() genetic_algorithm.set_fitness(fitness); selection_errors.resize(4); - selection_errors.setValues({type(0.4),type(0.3),type(0.2),type(0.1)}); + selection_errors.setValues({type(0.4), type(0.3), type(0.2), type(0.1)}); genetic_algorithm.initialize_population(); @@ -295,7 +295,7 @@ void GeneticAlgorithmTest::test_perform_crossover() genetic_algorithm.set_population(population); - selection_errors.setValues({type(0.4),type(0.3),type(0.2),type(0.1)}); + selection_errors.setValues({type(0.4), type(0.3), type(0.2), type(0.1)}); genetic_algorithm.set_selection_errors(selection_errors); From 3be769e021a5320815f3fd9378048834678aaad9 Mon Sep 17 00:00:00 2001 From: Lenny Date: Thu, 24 Oct 2024 20:48:57 -0500 Subject: [PATCH 37/38] 1 warning and 1 error --- opennn/perceptron_layer_3d.cpp | 2 +- tests/genetic_algorithm_test.cpp | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/opennn/perceptron_layer_3d.cpp b/opennn/perceptron_layer_3d.cpp index 7f07e0391..006ca50c8 100644 --- a/opennn/perceptron_layer_3d.cpp +++ b/opennn/perceptron_layer_3d.cpp @@ -461,7 +461,7 @@ void PerceptronLayer3D::add_deltas(const vector>& delta_ } -void PerceptronLayer3D::insert_gradient(unique_ptr back_propagation, +void PerceptronLayer3D::insert_gradient(unique_ptr& back_propagation, const Index& index, Tensor& gradient) const { diff --git a/tests/genetic_algorithm_test.cpp b/tests/genetic_algorithm_test.cpp index 136830ef9..47fb8555b 100644 --- a/tests/genetic_algorithm_test.cpp +++ b/tests/genetic_algorithm_test.cpp @@ -201,9 +201,9 @@ void GeneticAlgorithmTest::test_perform_selection() assert_true(selection(2) == 0 || selection(2) == 1,LOG); assert_true(selection(3) == 0 || selection(3) == 1,LOG); - assert_true( count(selection.data(), selection.data() + selection.size(), 1) == 2,LOG); + assert_true( count(selection.data(), selection.data() + selection.size(), 1U) == 2U,LOG); - assert_true( count(selection.data() + 1, selection.data() + selection.size(), 1) >= 1,LOG); + assert_true( count(selection.data() + 1, selection.data() + selection.size(), 1U) >= 1U,LOG); // 4 individuals with elitism size = 1 From f45d51a59c9cab9b7b0911bafa158ccd9e18eb5f Mon Sep 17 00:00:00 2001 From: Lenny Date: Thu, 24 Oct 2024 21:06:55 -0500 Subject: [PATCH 38/38] Typos corrected --- opennn/time_series_data_set.cpp | 6 ++++-- tests/genetic_algorithm_test.cpp | 4 ++-- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/opennn/time_series_data_set.cpp b/opennn/time_series_data_set.cpp index 2d10509b5..ab25080c2 100644 --- a/opennn/time_series_data_set.cpp +++ b/opennn/time_series_data_set.cpp @@ -52,14 +52,16 @@ Tensor TimeSeriesDataSet::get_time_series_raw_variables const string& TimeSeriesDataSet::get_time_raw_variable() const { //return time_column; @todo - return ""; + static const string empty(""); + return empty; } const string& TimeSeriesDataSet::get_group_by_column() const { //return group_by_column; @todo - return string(); + static const string empty; + return empty; } diff --git a/tests/genetic_algorithm_test.cpp b/tests/genetic_algorithm_test.cpp index 47fb8555b..136830ef9 100644 --- a/tests/genetic_algorithm_test.cpp +++ b/tests/genetic_algorithm_test.cpp @@ -201,9 +201,9 @@ void GeneticAlgorithmTest::test_perform_selection() assert_true(selection(2) == 0 || selection(2) == 1,LOG); assert_true(selection(3) == 0 || selection(3) == 1,LOG); - assert_true( count(selection.data(), selection.data() + selection.size(), 1U) == 2U,LOG); + assert_true( count(selection.data(), selection.data() + selection.size(), 1) == 2,LOG); - assert_true( count(selection.data() + 1, selection.data() + selection.size(), 1U) >= 1U,LOG); + assert_true( count(selection.data() + 1, selection.data() + selection.size(), 1) >= 1,LOG); // 4 individuals with elitism size = 1