Cabana 0.8.0-dev
 
Loading...
Searching...
No Matches
Cabana_Distributor.hpp
Go to the documentation of this file.
1/****************************************************************************
2 * Copyright (c) 2018-2023 by the Cabana authors *
3 * All rights reserved. *
4 * *
5 * This file is part of the Cabana library. Cabana is distributed under a *
6 * BSD 3-clause license. For the licensing terms see the LICENSE file in *
7 * the top-level directory. *
8 * *
9 * SPDX-License-Identifier: BSD-3-Clause *
10 ****************************************************************************/
11
16#ifndef CABANA_DISTRIBUTOR_HPP
17#define CABANA_DISTRIBUTOR_HPP
18
19#include <Cabana_AoSoA.hpp>
21#include <Cabana_Slice.hpp>
22
23#include <Kokkos_Core.hpp>
24#include <Kokkos_Profiling_ScopedRegion.hpp>
25
26#include <mpi.h>
27
28#include <exception>
29#include <vector>
30
31namespace Cabana
32{
33//---------------------------------------------------------------------------//
61template <class MemorySpace>
62class Distributor : public CommunicationPlan<MemorySpace>
63{
64 public:
99 template <class ViewType>
100 Distributor( MPI_Comm comm, const ViewType& element_export_ranks,
101 const std::vector<int>& neighbor_ranks )
102 : CommunicationPlan<MemorySpace>( comm )
103 {
104 auto neighbor_ids = this->createFromExportsAndTopology(
105 element_export_ranks, neighbor_ranks );
106 this->createExportSteering( neighbor_ids, element_export_ranks );
107 }
108
136 template <class ViewType>
137 Distributor( MPI_Comm comm, const ViewType& element_export_ranks )
138 : CommunicationPlan<MemorySpace>( comm )
139 {
140 auto neighbor_ids = this->createFromExportsOnly( element_export_ranks );
141 this->createExportSteering( neighbor_ids, element_export_ranks );
142 }
143};
144
145//---------------------------------------------------------------------------//
147template <typename>
148struct is_distributor_impl : public std::false_type
149{
150};
151
152template <typename MemorySpace>
153struct is_distributor_impl<Distributor<MemorySpace>> : public std::true_type
154{
155};
157
159template <class T>
161 : public is_distributor_impl<typename std::remove_cv<T>::type>::type
162{
163};
164
165//---------------------------------------------------------------------------//
166namespace Impl
167{
169//---------------------------------------------------------------------------//
170// Synchronously move data between a source and destination AoSoA by executing
171// the forward communication plan.
172template <class ExecutionSpace, class Distributor_t, class AoSoA_t>
173void distributeData(
174 ExecutionSpace, const Distributor_t& distributor, const AoSoA_t& src,
175 AoSoA_t& dst,
176 typename std::enable_if<( is_distributor<Distributor_t>::value &&
178 int>::type* = 0 )
179{
180 Kokkos::Profiling::ScopedRegion region( "Cabana::migrate" );
181
182 static_assert( is_accessible_from<typename Distributor_t::memory_space,
183 ExecutionSpace>{},
184 "" );
185
186 // Get the MPI rank we are currently on.
187 int my_rank = -1;
188 MPI_Comm_rank( distributor.comm(), &my_rank );
189
190 // Get the number of neighbors.
191 int num_n = distributor.numNeighbor();
192
193 // Calculate the number of elements that are staying on this rank and
194 // therefore can be directly copied. If any of the neighbor ranks are this
195 // rank it will be stored in first position (i.e. the first neighbor in
196 // the local list is always yourself if you are sending to yourself).
197 std::size_t num_stay =
198 ( num_n > 0 && distributor.neighborRank( 0 ) == my_rank )
199 ? distributor.numExport( 0 )
200 : 0;
201
202 // Allocate a send buffer.
203 std::size_t num_send = distributor.totalNumExport() - num_stay;
204 Kokkos::View<typename AoSoA_t::tuple_type*,
205 typename Distributor_t::memory_space>
206 send_buffer( Kokkos::ViewAllocateWithoutInitializing(
207 "distributor_send_buffer" ),
208 num_send );
209
210 // Allocate a receive buffer.
211 Kokkos::View<typename AoSoA_t::tuple_type*,
212 typename Distributor_t::memory_space>
213 recv_buffer( Kokkos::ViewAllocateWithoutInitializing(
214 "distributor_recv_buffer" ),
215 distributor.totalNumImport() );
216
217 // Get the steering vector for the sends.
218 auto steering = distributor.getExportSteering();
219
220 // Gather the exports from the source AoSoA into the tuple-contiguous send
221 // buffer or the receive buffer if the data is staying. We know that the
222 // steering vector is ordered such that the data staying on this rank
223 // comes first.
224 auto build_send_buffer_func = KOKKOS_LAMBDA( const std::size_t i )
225 {
226 auto tpl = src.getTuple( steering( i ) );
227 if ( i < num_stay )
228 recv_buffer( i ) = tpl;
229 else
230 send_buffer( i - num_stay ) = tpl;
231 };
232 Kokkos::RangePolicy<ExecutionSpace> build_send_buffer_policy(
233 0, distributor.totalNumExport() );
234 Kokkos::parallel_for( "Cabana::Impl::distributeData::build_send_buffer",
235 build_send_buffer_policy, build_send_buffer_func );
236 Kokkos::fence();
237
238 // The distributor has its own communication space so choose any tag.
239 const int mpi_tag = 1234;
240
241 // Post non-blocking receives.
242 std::vector<MPI_Request> requests;
243 requests.reserve( num_n );
244 std::pair<std::size_t, std::size_t> recv_range = { 0, 0 };
245 for ( int n = 0; n < num_n; ++n )
246 {
247 recv_range.second = recv_range.first + distributor.numImport( n );
248
249 if ( ( distributor.numImport( n ) > 0 ) &&
250 ( distributor.neighborRank( n ) != my_rank ) )
251 {
252 auto recv_subview = Kokkos::subview( recv_buffer, recv_range );
253
254 requests.push_back( MPI_Request() );
255
256 MPI_Irecv( recv_subview.data(),
257 recv_subview.size() *
258 sizeof( typename AoSoA_t::tuple_type ),
259 MPI_BYTE, distributor.neighborRank( n ), mpi_tag,
260 distributor.comm(), &( requests.back() ) );
261 }
262
263 recv_range.first = recv_range.second;
264 }
265
266 // Do blocking sends.
267 std::pair<std::size_t, std::size_t> send_range = { 0, 0 };
268 for ( int n = 0; n < num_n; ++n )
269 {
270 if ( ( distributor.numExport( n ) > 0 ) &&
271 ( distributor.neighborRank( n ) != my_rank ) )
272 {
273 send_range.second = send_range.first + distributor.numExport( n );
274
275 auto send_subview = Kokkos::subview( send_buffer, send_range );
276
277 MPI_Send( send_subview.data(),
278 send_subview.size() *
279 sizeof( typename AoSoA_t::tuple_type ),
280 MPI_BYTE, distributor.neighborRank( n ), mpi_tag,
281 distributor.comm() );
282
283 send_range.first = send_range.second;
284 }
285 }
286
287 // Wait on non-blocking receives.
288 std::vector<MPI_Status> status( requests.size() );
289 const int ec =
290 MPI_Waitall( requests.size(), requests.data(), status.data() );
291 if ( MPI_SUCCESS != ec )
292 throw std::logic_error( "Failed MPI Communication" );
293
294 // Extract the receive buffer into the destination AoSoA.
295 auto extract_recv_buffer_func = KOKKOS_LAMBDA( const std::size_t i )
296 {
297 dst.setTuple( i, recv_buffer( i ) );
298 };
299 Kokkos::RangePolicy<ExecutionSpace> extract_recv_buffer_policy(
300 0, distributor.totalNumImport() );
301 Kokkos::parallel_for( "Cabana::Impl::distributeData::extract_recv_buffer",
302 extract_recv_buffer_policy,
303 extract_recv_buffer_func );
304 Kokkos::fence();
305
306 // Barrier before completing to ensure synchronization.
307 MPI_Barrier( distributor.comm() );
308}
309
310//---------------------------------------------------------------------------//
312} // end namespace Impl
313
314//---------------------------------------------------------------------------//
334template <class ExecutionSpace, class Distributor_t, class AoSoA_t>
335void migrate( ExecutionSpace exec_space, const Distributor_t& distributor,
336 const AoSoA_t& src, AoSoA_t& dst,
337 typename std::enable_if<( is_distributor<Distributor_t>::value &&
339 int>::type* = 0 )
340{
341 // Check that src and dst are the right size.
342 if ( src.size() != distributor.exportSize() )
343 throw std::runtime_error( "Source is the wrong size for migration!" );
344 if ( dst.size() != distributor.totalNumImport() )
345 throw std::runtime_error(
346 "Destination is the wrong size for migration!" );
347
348 // Move the data.
349 Impl::distributeData( exec_space, distributor, src, dst );
350}
351
369template <class Distributor_t, class AoSoA_t>
370void migrate( const Distributor_t& distributor, const AoSoA_t& src,
371 AoSoA_t& dst,
372 typename std::enable_if<( is_distributor<Distributor_t>::value &&
374 int>::type* = 0 )
375{
376 migrate( typename Distributor_t::execution_space{}, distributor, src, dst );
377}
378
379//---------------------------------------------------------------------------//
403template <class ExecutionSpace, class Distributor_t, class AoSoA_t>
404void migrate( ExecutionSpace exec_space, const Distributor_t& distributor,
405 AoSoA_t& aosoa,
406 typename std::enable_if<( is_distributor<Distributor_t>::value &&
408 int>::type* = 0 )
409{
410 // Check that the AoSoA is the right size.
411 if ( aosoa.size() != distributor.exportSize() )
412 throw std::runtime_error( "AoSoA is the wrong size for migration!" );
413
414 // Determine if the source of destination decomposition has more data on
415 // this rank.
416 bool dst_is_bigger =
417 ( distributor.totalNumImport() > distributor.exportSize() );
418
419 // If the destination decomposition is bigger than the source
420 // decomposition resize now so we have enough space to do the operation.
421 if ( dst_is_bigger )
422 aosoa.resize( distributor.totalNumImport() );
423
424 // Move the data.
425 Impl::distributeData( exec_space, distributor, aosoa, aosoa );
426
427 // If the destination decomposition is smaller than the source
428 // decomposition resize after we have moved the data.
429 if ( !dst_is_bigger )
430 aosoa.resize( distributor.totalNumImport() );
431}
432
454template <class Distributor_t, class AoSoA_t>
455void migrate( const Distributor_t& distributor, AoSoA_t& aosoa,
456 typename std::enable_if<( is_distributor<Distributor_t>::value &&
458 int>::type* = 0 )
459{
460 migrate( typename Distributor_t::execution_space{}, distributor, aosoa );
461}
462
463//---------------------------------------------------------------------------//
484template <class ExecutionSpace, class Distributor_t, class Slice_t>
485void migrate( ExecutionSpace, const Distributor_t& distributor,
486 const Slice_t& src, Slice_t& dst,
487 typename std::enable_if<( is_distributor<Distributor_t>::value &&
489 int>::type* = 0 )
490{
491 // Check that src and dst are the right size.
492 if ( src.size() != distributor.exportSize() )
493 throw std::runtime_error( "Source is the wrong size for migration!" );
494 if ( dst.size() != distributor.totalNumImport() )
495 throw std::runtime_error(
496 "Destination is the wrong size for migration!" );
497
498 // Get the number of components in the slices.
499 size_t num_comp = 1;
500 for ( size_t d = 2; d < src.viewRank(); ++d )
501 num_comp *= src.extent( d );
502
503 // Get the raw slice data.
504 auto src_data = src.data();
505 auto dst_data = dst.data();
506
507 // Get the MPI rank we are currently on.
508 int my_rank = -1;
509 MPI_Comm_rank( distributor.comm(), &my_rank );
510
511 // Get the number of neighbors.
512 int num_n = distributor.numNeighbor();
513
514 // Calculate the number of elements that are staying on this rank and
515 // therefore can be directly copied. If any of the neighbor ranks are this
516 // rank it will be stored in first position (i.e. the first neighbor in
517 // the local list is always yourself if you are sending to yourself).
518 std::size_t num_stay =
519 ( num_n > 0 && distributor.neighborRank( 0 ) == my_rank )
520 ? distributor.numExport( 0 )
521 : 0;
522
523 // Allocate a send buffer. Note this one is layout right so the components
524 // of each element are consecutive in memory.
525 std::size_t num_send = distributor.totalNumExport() - num_stay;
526 Kokkos::View<typename Slice_t::value_type**, Kokkos::LayoutRight,
527 typename Distributor_t::memory_space>
528 send_buffer( Kokkos::ViewAllocateWithoutInitializing(
529 "distributor_send_buffer" ),
530 num_send, num_comp );
531
532 // Allocate a receive buffer. Note this one is layout right so the
533 // components of each element are consecutive in memory.
534 Kokkos::View<typename Slice_t::value_type**, Kokkos::LayoutRight,
535 typename Distributor_t::memory_space>
536 recv_buffer( Kokkos::ViewAllocateWithoutInitializing(
537 "distributor_recv_buffer" ),
538 distributor.totalNumImport(), num_comp );
539
540 // Get the steering vector for the sends.
541 auto steering = distributor.getExportSteering();
542
543 // Gather from the source Slice into the contiguous send buffer or,
544 // if it is part of the local copy, put it directly in the destination
545 // Slice.
546 auto build_send_buffer_func = KOKKOS_LAMBDA( const std::size_t i )
547 {
548 auto s_src = Slice_t::index_type::s( steering( i ) );
549 auto a_src = Slice_t::index_type::a( steering( i ) );
550 std::size_t src_offset = s_src * src.stride( 0 ) + a_src;
551 if ( i < num_stay )
552 for ( std::size_t n = 0; n < num_comp; ++n )
553 recv_buffer( i, n ) =
554 src_data[src_offset + n * Slice_t::vector_length];
555 else
556 for ( std::size_t n = 0; n < num_comp; ++n )
557 send_buffer( i - num_stay, n ) =
558 src_data[src_offset + n * Slice_t::vector_length];
559 };
560 Kokkos::RangePolicy<ExecutionSpace> build_send_buffer_policy(
561 0, distributor.totalNumExport() );
562 Kokkos::parallel_for( "Cabana::migrate::build_send_buffer",
563 build_send_buffer_policy, build_send_buffer_func );
564 Kokkos::fence();
565
566 // The distributor has its own communication space so choose any tag.
567 const int mpi_tag = 1234;
568
569 // Post non-blocking receives.
570 std::vector<MPI_Request> requests;
571 requests.reserve( num_n );
572 std::pair<std::size_t, std::size_t> recv_range = { 0, 0 };
573 for ( int n = 0; n < num_n; ++n )
574 {
575 recv_range.second = recv_range.first + distributor.numImport( n );
576
577 if ( ( distributor.numImport( n ) > 0 ) &&
578 ( distributor.neighborRank( n ) != my_rank ) )
579 {
580 auto recv_subview =
581 Kokkos::subview( recv_buffer, recv_range, Kokkos::ALL );
582
583 requests.push_back( MPI_Request() );
584
585 MPI_Irecv( recv_subview.data(),
586 recv_subview.size() *
587 sizeof( typename Slice_t::value_type ),
588 MPI_BYTE, distributor.neighborRank( n ), mpi_tag,
589 distributor.comm(), &( requests.back() ) );
590 }
591
592 recv_range.first = recv_range.second;
593 }
594
595 // Do blocking sends.
596 std::pair<std::size_t, std::size_t> send_range = { 0, 0 };
597 for ( int n = 0; n < num_n; ++n )
598 {
599 if ( ( distributor.numExport( n ) > 0 ) &&
600 ( distributor.neighborRank( n ) != my_rank ) )
601 {
602 send_range.second = send_range.first + distributor.numExport( n );
603
604 auto send_subview =
605 Kokkos::subview( send_buffer, send_range, Kokkos::ALL );
606
607 MPI_Send( send_subview.data(),
608 send_subview.size() *
609 sizeof( typename Slice_t::value_type ),
610 MPI_BYTE, distributor.neighborRank( n ), mpi_tag,
611 distributor.comm() );
612
613 send_range.first = send_range.second;
614 }
615 }
616
617 // Wait on non-blocking receives.
618 std::vector<MPI_Status> status( requests.size() );
619 const int ec =
620 MPI_Waitall( requests.size(), requests.data(), status.data() );
621 if ( MPI_SUCCESS != ec )
622 throw std::logic_error( "Failed MPI Communication" );
623
624 // Extract the data from the receive buffer into the destination Slice.
625 auto extract_recv_buffer_func = KOKKOS_LAMBDA( const std::size_t i )
626 {
627 auto s = Slice_t::index_type::s( i );
628 auto a = Slice_t::index_type::a( i );
629 std::size_t dst_offset = s * dst.stride( 0 ) + a;
630 for ( std::size_t n = 0; n < num_comp; ++n )
631 dst_data[dst_offset + n * Slice_t::vector_length] =
632 recv_buffer( i, n );
633 };
634 Kokkos::RangePolicy<ExecutionSpace> extract_recv_buffer_policy(
635 0, distributor.totalNumImport() );
636 Kokkos::parallel_for( "Cabana::migrate::extract_recv_buffer",
637 extract_recv_buffer_policy,
638 extract_recv_buffer_func );
639 Kokkos::fence();
640
641 // Barrier before completing to ensure synchronization.
642 MPI_Barrier( distributor.comm() );
643}
644
664template <class Distributor_t, class Slice_t>
665void migrate( const Distributor_t& distributor, const Slice_t& src,
666 Slice_t& dst,
667 typename std::enable_if<( is_distributor<Distributor_t>::value &&
669 int>::type* = 0 )
670{
671 migrate( typename Distributor_t::execution_space{}, distributor, src, dst );
672}
673
674//---------------------------------------------------------------------------//
675
676} // end namespace Cabana
677
678#endif // end CABANA_DISTRIBUTOR_HPP
Array-of-Struct-of-Arrays particle data structure.
Multi-node communication patterns.
Slice a single particle property from an AoSoA.
Kokkos::View< size_type *, memory_space > createFromExportsOnly(ExecutionSpace exec_space, const ViewType &element_export_ranks)
Export rank creator. Use this when you don't know who you will receiving from - only who you are send...
Definition Cabana_CommunicationPlan.hpp:756
void createExportSteering(const PackViewType &neighbor_ids, const RankViewType &element_export_ranks)
Create the export steering vector.
Definition Cabana_CommunicationPlan.hpp:942
MPI_Comm comm() const
Get the MPI communicator.
Definition Cabana_CommunicationPlan.hpp:468
Kokkos::View< size_type *, memory_space > createFromExportsAndTopology(ExecutionSpace exec_space, const ViewType &element_export_ranks, const std::vector< int > &neighbor_ranks)
Neighbor and export rank creator. Use this when you already know which ranks neighbor each other (i....
Definition Cabana_CommunicationPlan.hpp:596
CommunicationPlan(MPI_Comm comm)
Constructor.
Definition Cabana_CommunicationPlan.hpp:446
A communication plan for migrating data from one uniquely-owned decomposition to another uniquely own...
Definition Cabana_Distributor.hpp:63
Distributor(MPI_Comm comm, const ViewType &element_export_ranks)
Export rank constructor. Use this when you don't know who you will be receiving from - only who you a...
Definition Cabana_Distributor.hpp:137
Distributor(MPI_Comm comm, const ViewType &element_export_ranks, const std::vector< int > &neighbor_ranks)
Topology and export rank constructor. Use this when you already know which ranks neighbor each other ...
Definition Cabana_Distributor.hpp:100
Core: particle data structures and algorithms.
Definition Cabana_AoSoA.hpp:36
void migrate(ExecutionSpace exec_space, const Distributor_t &distributor, const AoSoA_t &src, AoSoA_t &dst, typename std::enable_if<(is_distributor< Distributor_t >::value &&is_aosoa< AoSoA_t >::value), int >::type *=0)
Synchronously migrate data between two different decompositions using the distributor forward communi...
Definition Cabana_Distributor.hpp:335
Definition Cabana_Types.hpp:88
AoSoA static type checker.
Definition Cabana_AoSoA.hpp:61
Distributor static type checker.
Definition Cabana_Distributor.hpp:162
Slice static type checker.
Definition Cabana_Slice.hpp:861