diff --git a/core/src/CMakeLists.txt b/core/src/CMakeLists.txt index 7d69dc852..571bdba75 100644 --- a/core/src/CMakeLists.txt +++ b/core/src/CMakeLists.txt @@ -32,6 +32,12 @@ if(MPI_FOUND) ) endif() +if(Cabana_ENABLE_CAJITA) + list(APPEND HEADERS_PUBLIC + Cabana_ParticleGridCommunication.hpp + ) +endif() + set(HEADERS_IMPL impl/Cabana_CartesianGrid.hpp impl/Cabana_Index.hpp diff --git a/core/src/Cabana_Core.hpp b/core/src/Cabana_Core.hpp index 3f72e795d..67737f3a3 100644 --- a/core/src/Cabana_Core.hpp +++ b/core/src/Cabana_Core.hpp @@ -33,6 +33,10 @@ #include #endif +#ifdef Cabana_ENABLE_CAJITA +#include +#endif + #ifdef Cabana_ENABLE_ARBORX #include #endif diff --git a/core/src/Cabana_Halo.hpp b/core/src/Cabana_Halo.hpp index a97055625..407b82201 100644 --- a/core/src/Cabana_Halo.hpp +++ b/core/src/Cabana_Halo.hpp @@ -222,9 +222,11 @@ struct is_halo : public is_halo_impl::type>::type elements are expected to appear first (i.e. in the first halo.numLocal() elements) and the ghosted elements are expected to appear second (i.e. in the next halo.numGhost() elements()). + + \param functor A functor to modify particle properties in the send buffer. */ -template -void gather( const Halo_t &halo, AoSoA_t &aosoa, +template +void gather( const Halo_t &halo, AoSoA_t &aosoa, Functor_t modify_buffer, typename std::enable_if<( is_halo::value && is_aosoa::value ), int>::type * = 0 ) @@ -246,6 +248,7 @@ void gather( const Halo_t &halo, AoSoA_t &aosoa, auto gather_send_buffer_func = KOKKOS_LAMBDA( const std::size_t i ) { send_buffer( i ) = aosoa.getTuple( steering( i ) ); + modify_buffer( i ); }; Kokkos::RangePolicy gather_send_buffer_policy( 0, halo.totalNumExport() ); @@ -320,6 +323,15 @@ void gather( const Halo_t &halo, AoSoA_t &aosoa, MPI_Barrier( halo.comm() ); } +template +void gather( const Halo_t &halo, AoSoA_t &aosoa, + typename std::enable_if<( is_halo::value && + is_aosoa::value ), + int>::type * = 0 ) +{ + gather( halo, aosoa, []( const int i ) {} ); +} + //---------------------------------------------------------------------------// /*! \brief Synchronously gather data from the local decomposition to the ghosts diff --git a/core/src/Cabana_ParticleGridCommunication.hpp b/core/src/Cabana_ParticleGridCommunication.hpp new file mode 100644 index 000000000..faca39766 --- /dev/null +++ b/core/src/Cabana_ParticleGridCommunication.hpp @@ -0,0 +1,511 @@ +/**************************************************************************** + * Copyright (c) 2018-2020 by the Cabana authors * + * All rights reserved. * + * * + * This file is part of the Cabana library. Cabana is distributed under a * + * BSD 3-clause license. For the licensing terms see the LICENSE file in * + * the top-level directory. * + * * + * SPDX-License-Identifier: BSD-3-Clause * + ****************************************************************************/ + +#ifndef CABANA_PERIODICCOMM_HPP +#define CABANA_PERIODICCOMM_HPP + +#include +#include + +#include +#include +#include +#include + +#include + +#include + +namespace Cabana +{ + +//---------------------------------------------------------------------------// +/*! + \brief Wrap particles through periodic bounds according to Cajita grid global + bounds. + + \tparam LocalGridType Cajita LocalGrid type. + + \tparam PositionSliceType Particle position type. + + \param local_grid The local grid containing periodicity and system bound + information. + + \param positions The particle position container, either Slice or View. + */ +// TODO: inline +template +void periodicWrap( const LocalGridType &local_grid, + PositionSliceType &positions ) +{ + using execution_space = typename PositionSliceType::execution_space; + + const auto &global_grid = local_grid.globalGrid(); + const auto &global_mesh = global_grid.globalMesh(); + const Kokkos::Array periodic = { + global_grid.isPeriodic( Cajita::Dim::I ), + global_grid.isPeriodic( Cajita::Dim::J ), + global_grid.isPeriodic( Cajita::Dim::K )}; + const Kokkos::Array global_low = { + global_mesh.lowCorner( Cajita::Dim::I ), + global_mesh.lowCorner( Cajita::Dim::J ), + global_mesh.lowCorner( Cajita::Dim::K )}; + const Kokkos::Array global_high = { + global_mesh.highCorner( Cajita::Dim::I ), + global_mesh.highCorner( Cajita::Dim::J ), + global_mesh.highCorner( Cajita::Dim::K )}; + const Kokkos::Array global_span = { + global_mesh.extent( Cajita::Dim::I ), + global_mesh.extent( Cajita::Dim::J ), + global_mesh.extent( Cajita::Dim::K )}; + Kokkos::parallel_for( + "periodic_wrap", + Kokkos::RangePolicy( 0, positions.size() ), + KOKKOS_LAMBDA( const int p ) { + for ( int d = 0; d < 3; ++d ) + { + if ( periodic[d] ) + { + if ( positions( p, d ) > global_high[d] ) + positions( p, d ) -= global_span[d]; + else if ( positions( p, d ) < global_low[d] ) + positions( p, d ) += global_span[d]; + } + } + } ); +} + +namespace Impl +{ + +//---------------------------------------------------------------------------// +// Locate the particles in the local grid and get their destination rank. +// Particles are assumed to only migrate to a location in the 26 neighbor halo +// or stay on this rank. If the particle crosses a global periodic boundary +// wrap it's coordinates back into the domain. +template +void getMigrateDestinations( const LocalGridType &local_grid, + const NeighborRankView &neighbor_ranks, + DestinationRankView &destinations, + const PositionSliceType &positions ) +{ + using execution_space = typename PositionSliceType::execution_space; + + auto local_mesh = Cajita::createLocalMesh( local_grid ); + + // TODO: add optional "force_comm" + // TODO: add optional halo_width + const Kokkos::Array local_low = { + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::I ), + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::J ), + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::K )}; + const Kokkos::Array local_high = { + local_mesh.highCorner( Cajita::Own(), Cajita::Dim::I ), + local_mesh.highCorner( Cajita::Own(), Cajita::Dim::J ), + local_mesh.highCorner( Cajita::Own(), Cajita::Dim::K )}; + + Kokkos::parallel_for( + "get_migrate_destinations", + Kokkos::RangePolicy( 0, positions.size() ), + KOKKOS_LAMBDA( const int p ) { + // Compute the logical index of the neighbor we are sending to. + int nid[3] = {1, 1, 1}; + for ( int d = 0; d < 3; ++d ) + { + if ( positions( p, d ) < local_low[d] ) + nid[d] = 0; + else if ( positions( p, d ) > local_high[d] ) + nid[d] = 2; + } + + // Compute the destination MPI rank. + destinations( p ) = neighbor_ranks( + nid[Cajita::Dim::I] + + 3 * ( nid[Cajita::Dim::J] + 3 * nid[Cajita::Dim::K] ) ); + } ); +} + +//---------------------------------------------------------------------------// +// Locate particles within the local grid and determine if any from this rank +// need to be ghosted to one (or more) of the 26 neighbor ranks, keeping track +// of destination rank, index in the container, and periodic shift needed (not +// yet applied). +template +void getHaloIds( const LocalGridType &local_grid, + const NeighborRankView &neighbor_ranks, + DestinationRankView &destinations, DestinationRankView &ids, + ShiftRankView &shifts, PositionSliceType &positions ) +{ + using execution_space = typename PositionSliceType::execution_space; + using memory_space = typename PositionSliceType::memory_space; + + // Locate the particles in the local grid and determine if they need to be + // ghosted on neighboring ranks. + // TODO: can't use width (int) this way + //auto halo_width = local_grid.haloCellWidth(); + double halo_width = 0.0; + auto local_mesh = Cajita::createLocalMesh( local_grid ); + const Kokkos::Array local_low = { + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::I ) + halo_width, + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::J ) + halo_width, + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::K ) + halo_width}; + const Kokkos::Array local_high = { + local_mesh.highCorner( Cajita::Own(), Cajita::Dim::I ) - halo_width, + local_mesh.highCorner( Cajita::Own(), Cajita::Dim::J ) - halo_width, + local_mesh.highCorner( Cajita::Own(), Cajita::Dim::K ) - halo_width}; + + const auto &global_grid = local_grid.globalGrid(); + const Kokkos::Array periodic = { + global_grid.isPeriodic( Cajita::Dim::I ), + global_grid.isPeriodic( Cajita::Dim::J ), + global_grid.isPeriodic( Cajita::Dim::K )}; + const Kokkos::Array low_edge = { + global_grid.dimNumBlock( Cajita::Dim::I ) == 0, + global_grid.dimNumBlock( Cajita::Dim::J ) == 0, + global_grid.dimNumBlock( Cajita::Dim::K ) == 0}; + const Kokkos::Array high_edge = { + global_grid.dimNumBlock( Cajita::Dim::I ) - 1 == + global_grid.dimBlockId( Cajita::Dim::I ), + global_grid.dimNumBlock( Cajita::Dim::J ) - 1 == + global_grid.dimBlockId( Cajita::Dim::J ), + global_grid.dimNumBlock( Cajita::Dim::K ) - 1 == + global_grid.dimBlockId( Cajita::Dim::K )}; + const auto &global_mesh = global_grid.globalMesh(); + const Kokkos::Array global_span = { + global_mesh.extent( Cajita::Dim::I ), + global_mesh.extent( Cajita::Dim::J ), + global_mesh.extent( Cajita::Dim::K )}; + + Kokkos::View> + num_send( "num_send_halo" ); + auto halo_ids_func = KOKKOS_LAMBDA( const int p ) + { + // Compute the logical index of the neighbor we are sending to. + int nr = 0; + for ( int k = -1; k < 2; ++k ) + for ( int j = -1; j < 2; ++j ) + for ( int i = -1; i < 2; ++i, ++nr ) + { + // Add a ghost if this particle is near the local boundary, + // potentially for each of the 26 neighbors. + if ( i && + ( positions( p, Cajita::Dim::I ) < + local_low[Cajita::Dim::I] || + positions( p, Cajita::Dim::I ) > + local_high[Cajita::Dim::I] ) && + j && + ( positions( p, Cajita::Dim::J ) < + local_low[Cajita::Dim::J] || + positions( p, Cajita::Dim::J ) > + local_high[Cajita::Dim::J] ) && + k && + ( positions( p, Cajita::Dim::K ) < + local_low[Cajita::Dim::K] || + positions( p, Cajita::Dim::K ) > + local_high[Cajita::Dim::K] ) ) + { + const std::size_t ns = num_send()++; + // If the size of the destination and other arrays is + // exceeded, keep counting to resize and fill next. + if ( num_send() < destinations.extent() ) + { + // Compute the destination MPI rank. + destinations( ns ) = neighbor_ranks( + ( i + 1 ) + 3 * ( ( j + 1 ) + 3 * ( k + 1 ) ) ); + // Keep the particle ID. + ids( ns ) = p; + // Determine if this ghost particle needs to be + // shifted through the periodic boundary. + for ( int d = 0; d < 3; ++d ) + { + if ( periodic[d] ) + { + if ( high_edge[d] ) + shifts( ns, d ) -= global_span[d]; + else if ( low_edge[d] ) + shifts( ns, d ) += global_span[d]; + } + } + } + } + } + }; + + auto policy = Kokkos::RangePolicy( 0, positions.size() ); + Kokkos::parallel_for( "get_halo_ids", policy, halo_ids_func ); + + int first_count = destinations.extent(); + // Resize views to actual send sizes. + if ( num_send() != destinations.extent() ) + { + destinations.resize( num_send() ); + ids.resize( num_send() ); + shifts.resize( num_send(), 3 ); + } + // If original view sizes were exceeded, only counting was done so we need + // to rerun. + if ( num_send() > first_count ) + { + Kokkos::deep_copy( num_send, 0 ); + Kokkos::parallel_for( "get_halo_ids_again", policy, halo_ids_func ); + } + + // Shift periodic coordinates in send buffers. +} + +//---------------------------------------------------------------------------// +// Of the 27 potential local grids figure out which are in our topology. +// Some of the ranks in this list may be invalid. This needs to be updated +// after computing destination ranks to only contain unique and valid. +template +std::vector getTopology( const LocalGridType &local_grid ) +{ + std::vector topology( 27, -1 ); + int nr = 0; + for ( int k = -1; k < 2; ++k ) + for ( int j = -1; j < 2; ++j ) + for ( int i = -1; i < 2; ++i, ++nr ) + topology[nr] = local_grid.neighborRank( i, j, k ); + return topology; +} + +//---------------------------------------------------------------------------// +// Make the topology a list of unique and valid ranks. +std::vector getUniqueTopology( std::vector topology ) +{ + auto remove_end = std::remove( topology.begin(), topology.end(), -1 ); + std::sort( topology.begin(), remove_end ); + auto unique_end = std::unique( topology.begin(), remove_end ); + topology.resize( std::distance( topology.begin(), unique_end ) ); + return topology; +} + +} // namespace Impl + +//---------------------------------------------------------------------------// +/*! + \brief gridDistributor determines which data should be migrated from one + uniquely-owned decomposition to another uniquely-owned decomposition, using + bounds of a Cajita grid and taking periodic boundaries into account. + + \tparam LocalGridType Cajita LocalGrid type. + + \tparam PositionContainer AoSoA type. + + \param local_grid The local grid containing periodicity and system bound + information. + + \param positions The particle positions. + + \return Distributor for later migration. +*/ +template +Distributor +gridDistributor( const LocalGridType &local_grid, PositionSliceType &positions ) +{ + using device_type = typename PositionSliceType::device_type; + + // Get all 26 neighbor ranks. + auto topology = Impl::getTopology( local_grid ); + + Kokkos::View + neighbor_ranks( topology.data(), topology.size() ); + auto nr_mirror = + Kokkos::create_mirror_view_and_copy( device_type(), neighbor_ranks ); + Kokkos::View destinations( + Kokkos::ViewAllocateWithoutInitializing( "destinations" ), + positions.size() ); + + // Determine destination ranks for all particles. + Impl::getMigrateDestinations( local_grid, nr_mirror, destinations, + positions ); + + // Wrap positions across periodic boundaries. + periodicWrap( local_grid, positions ); + + // Ensure neighbor ranks are unique. + auto unique_topology = Impl::getUniqueTopology( topology ); + + // Create the Cabana distributor. + Distributor distributor( local_grid.globalGrid().comm(), + destinations, unique_topology ); + return distributor; +} + +//---------------------------------------------------------------------------// +/*! + \brief gridMigrate migrates data from one uniquely-owned decomposition to + another uniquely-owned decomposition, using the bounds and periodic boundaries + of a Cajita grid to determine which particles should be moved. In-place +variant. + +\tparam LocalGridType Cajita LocalGrid type. + +\tparam PositionContainer AoSoA type. + +\tparam PositionIndex Particle position index within the AoSoA. + +\param local_grid The local grid containing periodicity and system bound +information. + +\param particles The particle AoSoA, containing positions. + +\param PositionIndex Particle position index within the AoSoA. +*/ +template +void gridMigrate( const LocalGridType &local_grid, ParticleContainer &particles, + std::integral_constant ) +{ + // Get the positions. + auto positions = slice( particles ); + + auto distributor = gridDistributor( local_grid, positions ); + + // Redistribute the particles. + migrate( distributor, particles ); +} + +//---------------------------------------------------------------------------// +/*! + \brief gridMigrate migrates data from one uniquely-owned decomposition to + another uniquely-owned decomposition, using the bounds and periodic boundaries + of a Cajita grid to determine which particles should be moved. Separate AoSoA + variant. + + \tparam LocalGridType Cajita LocalGrid type. + + \tparam PositionContainer AoSoA type. + + \tparam PositionIndex Particle position index within the AoSoA. + + \param local_grid The local grid containing periodicity and system bound + information. + + \param src_particles The source particle AoSoA, containing positions. + + \param PositionIndex Particle position index within the AoSoA. + + \param src_particles The destination particle AoSoA, containing positions. +*/ +template +void gridMigrate( const LocalGridType &local_grid, + ParticleContainer &src_particles, + std::integral_constant, + ParticleContainer &dst_particles ) +{ + // Get the positions. + auto positions = slice( src_particles ); + + auto distributor = gridDistributor( local_grid, positions ); + + // Resize as needed. + dst_particles.resize( distributor.totalNumImport() ); + + // Redistribute the particles. + migrate( distributor, src_particles, dst_particles ); +} + +//---------------------------------------------------------------------------// +/*! + \brief gridHalo determines which data should be ghosted on another + decomposition, using bounds of a Cajita grid and taking periodic boundaries + into account. + + \tparam LocalGridType Cajita LocalGrid type. + + \tparam PositionContainer Slice/View type. + + \param local_grid The local grid containing periodicity and system bound + information. + + \param positions The particle positions. + + \return Halo for later scatter/gather. +*/ +template +std::pair, + Kokkos::View> +gridHalo( const LocalGridType &local_grid, PositionSliceType &positions ) +{ + using device_type = typename PositionSliceType::device_type; + using pos_value = typename PositionSliceType::value_type; + + // Get all 26 neighbor ranks. + auto topology = Impl::getTopology( local_grid ); + + Kokkos::View + neighbor_ranks( topology.data(), topology.size() ); + auto nr_mirror = + Kokkos::create_mirror_view_and_copy( device_type(), neighbor_ranks ); + + // TODO: arbitrary initial guess + std::size_t initial_size = positions.size() * 0.1; + Kokkos::View destinations( + Kokkos::ViewAllocateWithoutInitializing( "destinations" ), + initial_size ); + Kokkos::View ids( + Kokkos::ViewAllocateWithoutInitializing( "ids" ), initial_size ); + Kokkos::View shifts( + Kokkos::ViewAllocateWithoutInitializing( "shifts" ), initial_size, 3 ); + + // Determine which particles need to be ghosted to neighbors. + Impl::getHaloIds( local_grid, nr_mirror, destinations, ids, shifts, + positions ); + + // Ensure neighbor ranks are unique. + auto unique_topology = Impl::getUniqueTopology( topology ); + + // Create the Cabana Halo. + Halo halo( local_grid.globalGrid().comm(), destinations, ids, + unique_topology ); + return std::make_pair( halo, shifts ); +} + +template +struct PeriodicHalo +{ + HaloType halo; + ShiftViewType shifts; + + PeriodicHalo( HaloType halo_, ShiftViewType shifts_ ) + : halo( halo_ ) + , shifts( shifts_ ) + { + } + template + PeriodicHalo( LocalGridType &local_grid, PositionSliceType &positions ) + { + std::tie( halo, shifts ) = gridHalo( local_grid, positions ); + } + ~PeriodicHalo(){}; + + // TODO: need aosoa and slice options + void operator()( const int i, const int d ) const + { + } +}; + +template +void gridGather( const PeriodicHaloType &periodic_halo, + PositionSliceType &positions ) +{ + gather( periodic_halo.halo, positions, periodic_halo ); +} + +} // end namespace Cabana + +#endif // end CABANA_PERIODICCOMM_HPP diff --git a/core/unit_test/CMakeLists.txt b/core/unit_test/CMakeLists.txt index 1fc043d05..fe2c9a711 100644 --- a/core/unit_test/CMakeLists.txt +++ b/core/unit_test/CMakeLists.txt @@ -59,6 +59,9 @@ macro(Cabana_add_tests) target_include_directories(${_target} PRIVATE ${_dir} ${CMAKE_CURRENT_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}) target_link_libraries(${_target} cabanacore cabana_core_gtest) + if(_test STREQUAL ParticleGridCommunication) + target_link_libraries(${_target} Cajita) + endif() if(CABANA_UNIT_TEST_MPI) foreach(_np ${CABANA_UNIT_TEST_MPIEXEC_NUMPROCS}) # NOTE: When moving to CMake 3.10+ make sure to use MPIEXEC_EXECUTABLE instead @@ -86,4 +89,7 @@ if(Cabana_ENABLE_ARBORX) endif() if(Cabana_ENABLE_MPI) Cabana_add_tests(MPI NAMES CommunicationPlan Distributor Halo) + if(Cabana_ENABLE_CAJITA) + Cabana_add_tests(MPI NAMES ParticleGridCommunication) + endif() endif() diff --git a/core/unit_test/tstParticleGridCommunication.hpp b/core/unit_test/tstParticleGridCommunication.hpp new file mode 100644 index 000000000..067fa9e19 --- /dev/null +++ b/core/unit_test/tstParticleGridCommunication.hpp @@ -0,0 +1,209 @@ +/**************************************************************************** + * Copyright (c) 2018-2020 by the Cabana authors * + * All rights reserved. * + * * + * This file is part of the Cabana library. Cabana is distributed under a * + * BSD 3-clause license. For the licensing terms see the LICENSE file in * + * the top-level directory. * + * * + * SPDX-License-Identifier: BSD-3-Clause * + ****************************************************************************/ + +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include + +#include + +#include + +#include +#include +#include + +namespace Test +{ + +//---------------------------------------------------------------------------// +void testMigrate( int test_type ) +{ + // Create the MPI partitions. + Cajita::UniformDimPartitioner partitioner; + auto ranks_per_dim = partitioner.ranksPerDimension( MPI_COMM_WORLD, {} ); + + // Create the global MPI decomposition mesh. + std::array low_corner = {-3.2, -0.5, 2.2}; + std::array high_corner = {-0.2, 5.9, 4.2}; + auto global_mesh = Cajita::createUniformGlobalMesh( low_corner, high_corner, + ranks_per_dim ); + + // Create the global grid. + std::array is_periodic = {true, true, true}; + auto global_grid = Cajita::createGlobalGrid( MPI_COMM_WORLD, global_mesh, + is_periodic, partitioner ); + + // Create a grid local_grid. + auto local_grid = Cajita::createLocalGrid( global_grid, 1 ); + auto local_mesh = Cajita::createLocalMesh( *local_grid ); + + // Make some data to migrate, one for each rank neighbor. This tests only + // for neighbors within one ghost shell. + int num_data = 27; + const int pos_index = 1; + using DataTypes = Cabana::MemberTypes; + Cabana::AoSoA data_host( "host", num_data ); + auto id = Cabana::slice<0>( data_host ); + auto pos = Cabana::slice( data_host ); + + // Get mesh info. + auto shift_x = ( local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::I ) - + local_mesh.lowCorner( Cajita::Ghost(), Cajita::Dim::I ) ); + auto shift_y = ( local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::J ) - + local_mesh.lowCorner( Cajita::Ghost(), Cajita::Dim::J ) ); + auto shift_z = ( local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::K ) - + local_mesh.lowCorner( Cajita::Ghost(), Cajita::Dim::K ) ); + auto center_x = ( local_mesh.highCorner( Cajita::Own(), Cajita::Dim::I ) - + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::I ) ) / + 2.0 + + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::I ); + auto center_y = ( local_mesh.highCorner( Cajita::Own(), Cajita::Dim::J ) - + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::J ) ) / + 2.0 + + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::J ); + auto center_z = ( local_mesh.highCorner( Cajita::Own(), Cajita::Dim::K ) - + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::K ) ) / + 2.0 + + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::K ); + + // Fill the data. Place one particle within each neighbor rank's domain (IDs + // matching ranks). If a rank is not a neighbor, place that particle within + // this rank's domain. + int nr = 0; + for ( int k = -1; k < 2; ++k ) + for ( int j = -1; j < 2; ++j ) + for ( int i = -1; i < 2; ++i, ++nr ) + { + id( nr ) = nr; + pos( nr, 0 ) = center_x + i * shift_x; + pos( nr, 1 ) = center_y + j * shift_y; + pos( nr, 2 ) = center_z + k * shift_z; + } + + // Add a particle on rank zero to force some resizing for sends. + int my_rank = -1; + MPI_Comm_rank( MPI_COMM_WORLD, &my_rank ); + if ( my_rank == 0 ) + { + num_data++; + data_host.resize( num_data ); + auto pos = Cabana::slice( data_host ); + pos( num_data - 1, 0 ) = center_x + shift_x; + pos( num_data - 1, 1 ) = center_y + shift_y; + pos( num_data - 1, 2 ) = center_z + shift_z; + } + + // Copy to the device. + Cabana::AoSoA data_src( "data_src", num_data ); + Cabana::deep_copy( data_src, data_host ); + + // Do the migration in-place. + if ( test_type == 0 ) + { + Cabana::gridMigrate( *local_grid, data_src, + std::integral_constant() ); + + data_host.resize( data_src.size() ); + Cabana::deep_copy( data_host, data_src ); + } + // Do the migration with a separate destination AoSoA. + else if ( test_type == 1 ) + { + Cabana::AoSoA data_dst( "data_dst", + num_data ); + Cabana::gridMigrate( *local_grid, data_src, + std::integral_constant(), + data_dst ); + + data_host.resize( data_dst.size() ); + Cabana::deep_copy( data_host, data_dst ); + } + // Do the migration with separate slices (need to use gridDistributor + // directly since slices can't be resized). + else if ( test_type == 2 ) + { + auto pos_src = Cabana::slice( data_src ); + auto distributor = Cabana::gridDistributor( *local_grid, pos_src ); + Cabana::AoSoA data_dst( + "data_dst", distributor.totalNumImport() ); + auto pos_dst = Cabana::slice( data_dst ); + Cabana::migrate( distributor, pos_src, pos_dst ); + + data_host.resize( data_dst.size() ); + Cabana::deep_copy( data_host, data_dst ); + } + + // Check the results. + int new_num_data = data_host.size(); + auto pos_host = Cabana::slice( data_host ); + // Make sure everything was wrapped into the global domain. + for ( int i = 0; i < new_num_data; ++i ) + { + EXPECT_LE( pos_host( i, Cajita::Dim::I ), + global_mesh->highCorner( Cajita::Dim::I ) ); + EXPECT_LE( pos_host( i, Cajita::Dim::J ), + global_mesh->highCorner( Cajita::Dim::J ) ); + EXPECT_LE( pos_host( i, Cajita::Dim::K ), + global_mesh->highCorner( Cajita::Dim::K ) ); + EXPECT_GE( pos_host( i, Cajita::Dim::I ), + global_mesh->lowCorner( Cajita::Dim::I ) ); + EXPECT_GE( pos_host( i, Cajita::Dim::J ), + global_mesh->lowCorner( Cajita::Dim::J ) ); + EXPECT_GE( pos_host( i, Cajita::Dim::K ), + global_mesh->lowCorner( Cajita::Dim::K ) ); + } + // Make sure everything was wrapped into the local domain. + for ( int i = 0; i < new_num_data; ++i ) + { + EXPECT_LE( pos_host( i, Cajita::Dim::I ), + local_mesh.highCorner( Cajita::Own(), Cajita::Dim::I ) ); + EXPECT_LE( pos_host( i, Cajita::Dim::J ), + local_mesh.highCorner( Cajita::Own(), Cajita::Dim::J ) ); + EXPECT_LE( pos_host( i, Cajita::Dim::K ), + local_mesh.highCorner( Cajita::Own(), Cajita::Dim::K ) ); + EXPECT_GE( pos_host( i, Cajita::Dim::I ), + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::I ) ); + EXPECT_GE( pos_host( i, Cajita::Dim::J ), + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::J ) ); + EXPECT_GE( pos_host( i, Cajita::Dim::K ), + local_mesh.lowCorner( Cajita::Own(), Cajita::Dim::K ) ); + } + // Check that each particle is identical (on each rank) by construction. + for ( int i = 0; i < new_num_data; ++i ) + { + EXPECT_FLOAT_EQ( pos_host( i, Cajita::Dim::I ), center_x ); + EXPECT_FLOAT_EQ( pos_host( i, Cajita::Dim::J ), center_y ); + EXPECT_FLOAT_EQ( pos_host( i, Cajita::Dim::K ), center_z ); + } +} + +//---------------------------------------------------------------------------// +// RUN TESTS +//---------------------------------------------------------------------------// +TEST( TEST_CATEGORY, periodic_test_migrate_inplace ) { testMigrate( 0 ); } + +TEST( TEST_CATEGORY, periodic_test_migrate_separate ) { testMigrate( 1 ); } + +TEST( TEST_CATEGORY, periodic_test_migrate_slice ) { testMigrate( 2 ); } + +//---------------------------------------------------------------------------// + +} // end namespace Test