diff --git a/benchmark/cajita/Cajita_ParticleDynamicPartitionerPerformance.cpp b/benchmark/cajita/Cajita_ParticleDynamicPartitionerPerformance.cpp index e28ec18cd..5c5aaf9de 100644 --- a/benchmark/cajita/Cajita_ParticleDynamicPartitionerPerformance.cpp +++ b/benchmark/cajita/Cajita_ParticleDynamicPartitionerPerformance.cpp @@ -146,8 +146,8 @@ void performanceTest( std::ostream& stream, MPI_Comm comm, for ( int t = 0; t < num_run; ++t ) { // ensure every optimization process starts from the same status - partitioner.initializeRecPartition( - ave_partition[0], ave_partition[1], ave_partition[2] ); + partitioner.initializePartitionByAverage( comm, + global_num_cell ); // compute local workload local_workload_timer.start( p ); diff --git a/benchmark/cajita/Cajita_SparseMapDynamicPartitionerPerformance.cpp b/benchmark/cajita/Cajita_SparseMapDynamicPartitionerPerformance.cpp index 671993f6b..19b5f0533 100644 --- a/benchmark/cajita/Cajita_SparseMapDynamicPartitionerPerformance.cpp +++ b/benchmark/cajita/Cajita_SparseMapDynamicPartitionerPerformance.cpp @@ -164,8 +164,8 @@ void performanceTest( std::ostream& stream, MPI_Comm comm, for ( int t = 0; t < num_run; ++t ) { // ensure every optimization process starts from the same status - partitioner.initializeRecPartition( - ave_partition[0], ave_partition[1], ave_partition[2] ); + partitioner.initializePartitionByAverage( comm, + global_num_cell ); // compute local workload local_workload_timer.start( frac ); diff --git a/cajita/src/Cajita_DynamicPartitioner.hpp b/cajita/src/Cajita_DynamicPartitioner.hpp index f13534b9a..af21ebde4 100644 --- a/cajita/src/Cajita_DynamicPartitioner.hpp +++ b/cajita/src/Cajita_DynamicPartitioner.hpp @@ -101,6 +101,7 @@ class DynamicPartitioner : public BlockPartitioner // compute the ranks_per_dim from MPI communicator allocate( global_cells_per_dim ); ranksPerDimension( comm ); + initializePartitionByAverage( comm, global_cells_per_dim ); } /*! @@ -126,6 +127,7 @@ class DynamicPartitioner : public BlockPartitioner int comm_size; MPI_Comm_size( comm, &comm_size ); MPI_Dims_create( comm_size, num_space_dim, _ranks_per_dim.data() ); + initializePartitionByAverage( comm, global_cells_per_dim ); } /*! @@ -267,18 +269,49 @@ class DynamicPartitioner : public BlockPartitioner } /*! - \brief Initialize the tile partition; partition in each dimension + \brief Initialize the tile partition by average size + \param comm The communicator to use for initializing partitioning + \param global_cells_per_dim 3D array, global cells in each dimension + */ + void initializePartitionByAverage( + MPI_Comm comm, + const std::array& global_cells_per_dim ) + { + std::array global_num_tile = { + global_cells_per_dim[0] / (int)cell_num_per_tile_dim, + global_cells_per_dim[1] / (int)cell_num_per_tile_dim, + global_cells_per_dim[2] / (int)cell_num_per_tile_dim }; + + auto ranks_per_dim = ranksPerDimension( comm, global_cells_per_dim ); + std::array, 3> rec_partitions; + for ( int d = 0; d < 3; ++d ) + { + int ele = global_num_tile[d] / ranks_per_dim[d]; + int part = 0; + for ( int i = 0; i < ranks_per_dim[d]; ++i ) + { + rec_partitions[d].push_back( part ); + part += ele; + } + rec_partitions[d].push_back( global_num_tile[d] ); + } + + setRecPartition( rec_partitions[0], rec_partitions[1], + rec_partitions[2] ); + } + + /*! + \brief Set the tile partition; partition in each dimension has the form [0, p_1, ..., p_n, total_tile_num], so the partition would be [0, p_1), [p_1, p_2) ... [p_n, total_tile_num] \param rec_partition_i partition array in dimension i \param rec_partition_j partition array in dimension j \param rec_partition_k partition array in dimension k */ - void initializeRecPartition( std::vector& rec_partition_i, - std::vector& rec_partition_j, - std::vector& rec_partition_k ) + void setRecPartition( std::vector& rec_partition_i, + std::vector& rec_partition_j, + std::vector& rec_partition_k ) { - int max_size = 0; for ( std::size_t d = 0; d < num_space_dim; ++d ) max_size = diff --git a/cajita/unit_test/tstGlobalGrid.hpp b/cajita/unit_test/tstGlobalGrid.hpp index 6d40623db..1e0e243ff 100644 --- a/cajita/unit_test/tstGlobalGrid.hpp +++ b/cajita/unit_test/tstGlobalGrid.hpp @@ -446,8 +446,6 @@ void sparseGridTest3d() } rec_partitions[d].push_back( global_num_tile[d] ); } - partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1], - rec_partitions[2] ); // Create spares global grid auto global_grid = createGlobalGrid( MPI_COMM_WORLD, global_mesh, @@ -562,7 +560,7 @@ void sparseGridTest3d() for ( int id = 1; id < ranks_per_dim[d]; id++ ) part[d][id] += 1; - partitioner.initializeRecPartition( part[0], part[1], part[2] ); + partitioner.setRecPartition( part[0], part[1], part[2] ); std::array new_owned_num_cell; std::array new_global_cell_offset; diff --git a/cajita/unit_test/tstParticleDynamicPartitioner.hpp b/cajita/unit_test/tstParticleDynamicPartitioner.hpp index 539f358bd..a06e96162 100644 --- a/cajita/unit_test/tstParticleDynamicPartitioner.hpp +++ b/cajita/unit_test/tstParticleDynamicPartitioner.hpp @@ -177,23 +177,6 @@ void random_distribution_automatic_rank( int occupy_num_per_rank ) MPI_Barrier( MPI_COMM_WORLD ); } - // init partitions (average partition) - std::array, 3> rec_partitions; - for ( int d = 0; d < 3; ++d ) - { - int ele = size_tile_per_dim / ranks_per_dim[d]; - int part = 0; - for ( int i = 0; i < ranks_per_dim[d]; ++i ) - { - rec_partitions[d].push_back( part ); - part += ele; - } - rec_partitions[d].push_back( size_tile_per_dim ); - } - - partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1], - rec_partitions[2] ); - // basic settings for domain size and position double cell_size = 0.1; std::array global_low_corner = { 1.2, 3.3, -2.8 }; diff --git a/cajita/unit_test/tstSparseLocalGrid.hpp b/cajita/unit_test/tstSparseLocalGrid.hpp index e6a10f674..dfb4b3209 100644 --- a/cajita/unit_test/tstSparseLocalGrid.hpp +++ b/cajita/unit_test/tstSparseLocalGrid.hpp @@ -33,9 +33,6 @@ void sparseLocalGridTest( EntityType t2 ) double cell_size = 0.23; std::array global_num_cell = { 16, 32, 64 }; int cell_num_per_tile_dim = 4; - std::array global_num_tile = { 16 / cell_num_per_tile_dim, - 32 / cell_num_per_tile_dim, - 64 / cell_num_per_tile_dim }; std::array global_low_corner = { 1.2, 3.3, -2.8 }; std::array global_high_corner = { global_low_corner[0] + cell_size * global_num_cell[0], @@ -48,22 +45,6 @@ void sparseLocalGridTest( EntityType t2 ) std::array periodic = { false, false, false }; DynamicPartitioner partitioner( MPI_COMM_WORLD, global_num_cell, 10 ); - auto ranks_per_dim = - partitioner.ranksPerDimension( MPI_COMM_WORLD, global_num_cell ); - std::array, 3> rec_partitions; - for ( int d = 0; d < 3; ++d ) - { - int ele = global_num_tile[d] / ranks_per_dim[d]; - int part = 0; - for ( int i = 0; i < ranks_per_dim[d]; ++i ) - { - rec_partitions[d].push_back( part ); - part += ele; - } - rec_partitions[d].push_back( global_num_tile[d] ); - } - partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1], - rec_partitions[2] ); // Create global grid auto global_grid_ptr = Cajita::createGlobalGrid( diff --git a/cajita/unit_test/tstSparseMapDynamicPartitioner.hpp b/cajita/unit_test/tstSparseMapDynamicPartitioner.hpp index 0374ab9e5..fb30ddb09 100644 --- a/cajita/unit_test/tstSparseMapDynamicPartitioner.hpp +++ b/cajita/unit_test/tstSparseMapDynamicPartitioner.hpp @@ -74,8 +74,6 @@ void uniform_distribution_automatic_rank() } rec_partitions[d].push_back( size_tile_per_dim ); } - partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1], - rec_partitions[2] ); // test getCurrentPartition function { @@ -324,9 +322,6 @@ void random_distribution_automatic_rank( int occupy_num_per_rank ) rec_partitions[d].push_back( size_tile_per_dim ); } - partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1], - rec_partitions[2] ); - // basic settings for domain size and position double cell_size = 0.1; int pre_alloc_size = size_per_dim * size_per_dim;