16#ifndef CABANA_PARTICLEGRIDDISTRIBUTOR_HPP
17#define CABANA_PARTICLEGRIDDISTRIBUTOR_HPP
27#include <Kokkos_Core.hpp>
46template <
class LocalGr
idType, std::
size_t NSD = LocalGr
idType::num_space_dim>
47std::enable_if_t<3 == NSD, std::vector<int>>
50 std::vector<int> topology( 27, -1 );
52 for (
int k = -1; k < 2; ++k )
53 for (
int j = -1; j < 2; ++j )
54 for (
int i = -1; i < 2; ++i, ++nr )
55 topology[nr] = local_grid.neighborRank( i, j, k );
65template <
class LocalGr
idType, std::
size_t NSD = LocalGr
idType::num_space_dim>
66std::enable_if_t<2 == NSD, std::vector<int>>
69 std::vector<int> topology( 9, -1 );
71 for (
int j = -1; j < 2; ++j )
72 for (
int i = -1; i < 2; ++i, ++nr )
73 topology[nr] = local_grid.neighborRank( i, j );
85template <
class LocalGridType,
class PositionSliceType,
class NeighborRankView,
86 class DestinationRankView>
87void getMigrateDestinations(
const LocalGridType& local_grid,
88 const NeighborRankView& neighbor_ranks,
89 DestinationRankView& destinations,
90 PositionSliceType& positions )
92 static constexpr std::size_t num_space_dim = LocalGridType::num_space_dim;
93 using execution_space =
typename PositionSliceType::execution_space;
96 const auto& local_mesh = createLocalMesh<Kokkos::HostSpace>( local_grid );
99 const auto& global_grid = local_grid.globalGrid();
100 const auto& global_mesh = global_grid.globalMesh();
102 Kokkos::Array<double, num_space_dim> local_low{};
103 Kokkos::Array<double, num_space_dim> local_high{};
104 Kokkos::Array<bool, num_space_dim> periodic{};
105 Kokkos::Array<double, num_space_dim> global_low{};
106 Kokkos::Array<double, num_space_dim> global_high{};
107 Kokkos::Array<double, num_space_dim> global_extent{};
109 for ( std::size_t d = 0; d < num_space_dim; ++d )
111 local_low[d] = local_mesh.lowCorner( Own(), d );
112 local_high[d] = local_mesh.highCorner( Own(), d );
113 periodic[d] = global_grid.isPeriodic( d );
114 global_low[d] = global_mesh.lowCorner( d );
115 global_high[d] = global_mesh.highCorner( d );
116 global_extent[d] = global_mesh.extent( d );
119 Kokkos::parallel_for(
120 "Cabana::Grid::ParticleGridMigrate::get_destinations",
121 Kokkos::RangePolicy<execution_space>( 0, positions.size() ),
122 KOKKOS_LAMBDA(
const int p ) {
124 int nid[num_space_dim];
125 for ( std::size_t d = 0; d < num_space_dim; ++d )
128 if ( positions( p, d ) < local_low[d] )
130 else if ( positions( p, d ) > local_high[d] )
135 int neighbor_index = nid[0];
136 for ( std::size_t d = 1; d < num_space_dim; ++d )
139 for ( std::size_t dp = 1; dp <= d; ++dp )
141 neighbor_index += npower * nid[d];
143 destinations( p ) = neighbor_ranks( neighbor_index );
146 for ( std::size_t d = 0; d < num_space_dim; ++d )
150 if ( positions( p, d ) > global_high[d] )
151 positions( p, d ) -= global_extent[d];
152 else if ( positions( p, d ) < global_low[d] )
153 positions( p, d ) += global_extent[d];
174template <
class LocalGr
idType,
class PositionSliceType>
176 const PositionSliceType& positions,
177 const int minimum_halo_width )
179 using grid_type = LocalGridType;
180 static constexpr std::size_t num_space_dim = grid_type::num_space_dim;
181 using mesh_type =
typename grid_type::mesh_type;
182 using scalar_type =
typename mesh_type::scalar_type;
184 static_assert( std::is_same<mesh_type, uniform_type>::value,
185 "Migrate count requires a uniform mesh." );
187 using execution_space =
typename PositionSliceType::execution_space;
192 Kokkos::Array<double, num_space_dim> local_low{};
193 Kokkos::Array<double, num_space_dim> local_high{};
194 for ( std::size_t d = 0; d < num_space_dim; ++d )
196 auto dx = local_grid.globalGrid().globalMesh().cellSize( d );
198 local_mesh.lowCorner(
Ghost(), d ) + minimum_halo_width * dx;
200 local_mesh.highCorner(
Ghost(), d ) - minimum_halo_width * dx;
204 Kokkos::parallel_reduce(
205 "Cabana::Grid::ParticleGridMigrate::count",
206 Kokkos::RangePolicy<execution_space>( 0, positions.size() ),
207 KOKKOS_LAMBDA(
const int p,
int& result ) {
208 for ( std::size_t d = 0; d < num_space_dim; ++d )
209 if ( positions( p, d ) < local_low[d] ||
210 positions( p, d ) > local_high[d] )
218 MPI_Allreduce( MPI_IN_PLACE, &comm_count, 1, MPI_INT, MPI_SUM,
219 local_grid.globalGrid().comm() );
239template <
class LocalGr
idType,
class PositionSliceType>
242 PositionSliceType& positions )
244 using memory_space =
typename PositionSliceType::memory_space;
249 Kokkos::View<int*, Kokkos::HostSpace, Kokkos::MemoryUnmanaged>
250 topology_host( topology.data(), topology.size() );
251 auto topology_mirror =
252 Kokkos::create_mirror_view_and_copy( memory_space(), topology_host );
253 Kokkos::View<int*, memory_space> destinations(
254 Kokkos::ViewAllocateWithoutInitializing(
"destinations" ),
259 Impl::getMigrateDestinations( local_grid, topology_mirror, destinations,
264 local_grid.globalGrid().comm(), destinations, topology );
287template <
class LocalGr
idType,
class ParticlePositions,
class ParticleContainer>
289 const ParticlePositions& positions,
290 ParticleContainer& particles,
const int min_halo_width,
291 const bool force_migrate =
false )
297 if ( !force_migrate )
300 auto comm_count =
migrateCount( local_grid, positions, min_halo_width );
303 if ( 0 == comm_count )
310 migrate( distributor, particles );
335template <
class LocalGr
idType,
class ParticlePositions,
class ParticleContainer>
337 const ParticlePositions& positions,
338 const ParticleContainer& src_particles,
339 ParticleContainer& dst_particles,
340 const int min_halo_width,
341 const bool force_migrate =
false )
347 if ( !force_migrate )
350 auto comm_count =
migrateCount( local_grid, positions, min_halo_width );
353 if ( 0 == comm_count )
363 dst_particles.resize( distributor.totalNumImport() );
366 migrate( distributor, src_particles, dst_particles );
371template <
class... Args>
372[[deprecated(
"Cabana::Grid::particleGridMigrate is now "
373 "Cabana::Grid::particleMigrate. This function wrapper will be "
374 "removed in a future release." )]]
auto
375particleGridMigrate( Args&&... args )
380template <
class... Args>
382 "Cabana::Grid::createParticleGridDistributor is now "
383 "Cabana::Grid::createParticleDistributor. This function wrapper will be "
384 "removed in a future release." )]]
auto
385createParticleGridDistributor( Args&&... args )
388 std::forward<Args>( args )... );
AoSoA and slice extensions for Kokkos deep copy and mirrors.
Multi-node particle redistribution.
LocalMesh< MemorySpace, MeshType > createLocalMesh(const LocalGrid< MeshType > &local_grid)
Creation function for local mesh.
Definition Cabana_Grid_LocalMesh.hpp:791
Cabana::Distributor< typename PositionSliceType::memory_space > createParticleDistributor(const LocalGridType &local_grid, PositionSliceType &positions)
Determine which data should be migrated from one uniquely-owned decomposition to another uniquely-own...
Definition Cabana_Grid_ParticleDistributor.hpp:241
std::enable_if_t< 3==NSD, std::vector< int > > getTopology(const LocalGridType &local_grid)
Build neighbor topology of 27 nearest 3D neighbors. Some of the ranks in this list may be invalid.
Definition Cabana_Grid_ParticleDistributor.hpp:48
int migrateCount(const LocalGridType &local_grid, const PositionSliceType &positions, const int minimum_halo_width)
Check for the number of particles that must be communicated.
Definition Cabana_Grid_ParticleDistributor.hpp:175
bool particleMigrate(const LocalGridType &local_grid, const ParticlePositions &positions, ParticleContainer &particles, const int min_halo_width, const bool force_migrate=false)
Migrate data from one uniquely-owned decomposition to another uniquely-owned decomposition,...
Definition Cabana_Grid_ParticleDistributor.hpp:288
A communication plan for migrating data from one uniquely-owned decomposition to another uniquely own...
Definition Cabana_Distributor.hpp:63
Core: particle data structures and algorithms.
Definition Cabana_AoSoA.hpp:36
void deep_copy(DstAoSoA &dst, const SrcAoSoA &src, typename std::enable_if<(is_aosoa< DstAoSoA >::value &&is_aosoa< SrcAoSoA >::value)>::type *=0)
Deep copy data between compatible AoSoA objects.
Definition Cabana_DeepCopy.hpp:158
void migrate(ExecutionSpace exec_space, const Distributor_t &distributor, const AoSoA_t &src, AoSoA_t &dst, typename std::enable_if<(is_distributor< Distributor_t >::value &&is_aosoa< AoSoA_t >::value), int >::type *=0)
Synchronously migrate data between two different decompositions using the distributor forward communi...
Definition Cabana_Distributor.hpp:335
Ghosted decomposition tag.
Definition Cabana_Grid_Types.hpp:197