Cabana 0.8.0-dev
 
Loading...
Searching...
No Matches
Cabana_Grid_Halo_Mpi.hpp
Go to the documentation of this file.
1/****************************************************************************
2 * Copyright (c) 2018-2025 by the Cabana authors *
3 * All rights reserved. *
4 * *
5 * This file is part of the Cabana library. Cabana is distributed under a *
6 * BSD 3-clause license. For the licensing terms see the LICENSE file in *
7 * the top-level directory. *
8 * *
9 * SPDX-License-Identifier: BSD-3-Clause *
10 ****************************************************************************/
11
16#ifndef CABANA_GRID_HALO_MPI_HPP
17#define CABANA_GRID_HALO_MPI_HPP
18
19#include <Cabana_Grid_Array.hpp>
21
23
24#include <Kokkos_Core.hpp>
25#include <Kokkos_Profiling_ScopedRegion.hpp>
26
27#include <mpi.h>
28
29#include <algorithm>
30#include <array>
31#include <cmath>
32#include <type_traits>
33#include <vector>
34
35namespace Cabana
36{
37namespace Grid
38{
39
40//---------------------------------------------------------------------------//
45template <class MemorySpace>
46class Halo<MemorySpace, Mpi> : public HaloBase<MemorySpace>
47{
48 public:
50
60 template <class Pattern, class... ArrayTypes>
61 Halo( const Pattern& pattern, const int width, const ArrayTypes&... arrays )
62 : HaloBase<MemorySpace>( pattern, width, arrays... )
63 {
64 }
65
76 template <class ExecutionSpace, class... ArrayTypes>
77 void gather( const ExecutionSpace& exec_space,
78 const ArrayTypes&... arrays ) const
79 {
80 Kokkos::Profiling::ScopedRegion region( "Cabana::Grid::gather" );
81
82 // Get the number of neighbors. Return if we have none.
83 int num_n = this->_neighbor_ranks.size();
84 if ( 0 == num_n )
85 return;
86
87 // Get the MPI communicator.
88 auto comm = this->getComm( arrays... );
89
90 // Allocate requests.
91 std::vector<MPI_Request> requests( 2 * num_n, MPI_REQUEST_NULL );
92
93 // Pick a tag to use for communication. This object has its own
94 // communication space so any tag will do.
95 const int mpi_tag = 1234;
96
97 // Post receives.
98 for ( int n = 0; n < num_n; ++n )
99 {
100 // Only process this neighbor if there is work to do.
101 if ( 0 < this->_ghosted_buffers[n].size() )
102 {
103 MPI_Irecv( this->_ghosted_buffers[n].data(),
104 this->_ghosted_buffers[n].size(), MPI_BYTE,
105 this->_neighbor_ranks[n],
106 mpi_tag + this->_receive_tags[n], comm,
107 &requests[n] );
108 }
109 }
110
111 // Pack send buffers and post sends.
112 for ( int n = 0; n < num_n; ++n )
113 {
114 // Only process this neighbor if there is work to do.
115 if ( 0 < this->_owned_buffers[n].size() )
116 {
117 // Pack the send buffer.
118 this->packBuffer( exec_space, this->_owned_buffers[n],
119 this->_owned_steering[n], arrays.view()... );
120
121 // Post a send.
122 MPI_Isend( this->_owned_buffers[n].data(),
123 this->_owned_buffers[n].size(), MPI_BYTE,
124 this->_neighbor_ranks[n],
125 mpi_tag + this->_send_tags[n], comm,
126 &requests[num_n + n] );
127 }
128 }
129
130 // Unpack receive buffers.
131 bool unpack_complete = false;
132 while ( !unpack_complete )
133 {
134 // Get the next buffer to unpack.
135 int unpack_index = MPI_UNDEFINED;
136 MPI_Waitany( num_n, requests.data(), &unpack_index,
137 MPI_STATUS_IGNORE );
138
139 // If there are no more buffers to unpack we are done.
140 if ( MPI_UNDEFINED == unpack_index )
141 {
142 unpack_complete = true;
143 }
144
145 // Otherwise unpack the next buffer.
146 else
147 {
148 this->unpackBuffer( ScatterReduce::Replace(), exec_space,
149 this->_ghosted_buffers[unpack_index],
150 this->_ghosted_steering[unpack_index],
151 arrays.view()... );
152 }
153 }
154
155 // Wait on send requests.
156 MPI_Waitall( num_n, requests.data() + num_n, MPI_STATUSES_IGNORE );
157 }
158
166 template <class ExecutionSpace, class ReduceOp, class... ArrayTypes>
167 void scatter( const ExecutionSpace& exec_space, const ReduceOp& reduce_op,
168 const ArrayTypes&... arrays ) const
169 {
170 Kokkos::Profiling::ScopedRegion region( "Cabana::Grid::scatter" );
171
172 // Get the number of neighbors. Return if we have none.
173 int num_n = this->_neighbor_ranks.size();
174 if ( 0 == num_n )
175 return;
176
177 // Get the MPI communicator.
178 auto comm = this->getComm( arrays... );
179
180 // Requests.
181 std::vector<MPI_Request> requests( 2 * num_n, MPI_REQUEST_NULL );
182
183 // Pick a tag to use for communication. This object has its own
184 // communication space so any tag will do.
185 const int mpi_tag = 2345;
186
187 // Post receives for all neighbors that are not self sends.
188 for ( int n = 0; n < num_n; ++n )
189 {
190 // Only process this neighbor if there is work to do.
191 if ( 0 < this->_owned_buffers[n].size() )
192 {
193 MPI_Irecv( this->_owned_buffers[n].data(),
194 this->_owned_buffers[n].size(), MPI_BYTE,
195 this->_neighbor_ranks[n],
196 mpi_tag + this->_receive_tags[n], comm,
197 &requests[n] );
198 }
199 }
200
201 // Pack send buffers and post sends.
202 for ( int n = 0; n < num_n; ++n )
203 {
204 // Only process this neighbor if there is work to do.
205 if ( 0 < this->_ghosted_buffers[n].size() )
206 {
207 // Pack the send buffer.
208 this->packBuffer( exec_space, this->_ghosted_buffers[n],
209 this->_ghosted_steering[n],
210 arrays.view()... );
211
212 // Post a send.
213 MPI_Isend( this->_ghosted_buffers[n].data(),
214 this->_ghosted_buffers[n].size(), MPI_BYTE,
215 this->_neighbor_ranks[n],
216 mpi_tag + this->_send_tags[n], comm,
217 &requests[num_n + n] );
218 }
219 }
220
221 // Unpack receive buffers.
222 bool unpack_complete = false;
223 while ( !unpack_complete )
224 {
225 // Get the next buffer to unpack.
226 int unpack_index = MPI_UNDEFINED;
227 MPI_Waitany( num_n, requests.data(), &unpack_index,
228 MPI_STATUS_IGNORE );
229
230 // If there are no more buffers to unpack we are done.
231 if ( MPI_UNDEFINED == unpack_index )
232 {
233 unpack_complete = true;
234 }
235
236 // Otherwise unpack the next buffer and apply the reduce operation.
237 else
238 {
239 this->unpackBuffer(
240 reduce_op, exec_space, this->_owned_buffers[unpack_index],
241 this->_owned_steering[unpack_index], arrays.view()... );
242 }
243
244 // Wait on send requests.
245 MPI_Waitall( num_n, requests.data() + num_n, MPI_STATUSES_IGNORE );
246 }
247 }
248};
249
250} // end namespace Grid
251} // end namespace Cabana
252
253#endif // end CABANA_GRID_HALO_MPI_HPP
Grid field arrays.
Logical grid indexing.
Pack variadic template parameters for device capture.
std::vector< int > _neighbor_ranks
The ranks we will send/receive from.
Definition Cabana_Grid_HaloBase.hpp:742
std::vector< Kokkos::View< char *, memory_space > > _ghosted_buffers
For each neighbor, send/receive buffers for data we ghost.
Definition Cabana_Grid_HaloBase.hpp:754
void packBuffer(const ExecutionSpace &exec_space, const Kokkos::View< char *, memory_space > &buffer, const Kokkos::View< int **, memory_space > &steering, ArrayViews... array_views) const
Pack arrays into a buffer.
Definition Cabana_Grid_HaloBase.hpp:579
std::vector< int > _receive_tags
The tag we use for receiving from each neighbor.
Definition Cabana_Grid_HaloBase.hpp:748
HaloBase(const Pattern &pattern, const int width, const ArrayTypes &... arrays)
Constructor.
Definition Cabana_Grid_HaloBase.hpp:220
std::vector< Kokkos::View< int **, memory_space > > _owned_steering
For each neighbor, steering vector for the owned buffer.
Definition Cabana_Grid_HaloBase.hpp:757
MemorySpace memory_space
Memory space.
Definition Cabana_Grid_HaloBase.hpp:207
std::vector< Kokkos::View< char *, memory_space > > _owned_buffers
For each neighbor, send/receive buffers for data we own.
Definition Cabana_Grid_HaloBase.hpp:751
std::vector< Kokkos::View< int **, memory_space > > _ghosted_steering
For each neighbor, steering vector for the ghosted buffer.
Definition Cabana_Grid_HaloBase.hpp:760
void unpackBuffer(const ReduceOp &reduce_op, const ExecutionSpace &exec_space, const Kokkos::View< char *, memory_space > &buffer, const Kokkos::View< int **, memory_space > &steering, ArrayViews... array_views) const
Unpack arrays from a buffer.
Definition Cabana_Grid_HaloBase.hpp:720
MPI_Comm getComm(const Array_t &array) const
Get the communicator.
Definition Cabana_Grid_HaloBase.hpp:285
std::vector< int > _send_tags
The tag we use for sending to each neighbor.
Definition Cabana_Grid_HaloBase.hpp:745
Halo(const Pattern &pattern, const int width, const ArrayTypes &... arrays)
Constructor.
Definition Cabana_Grid_Halo_Mpi.hpp:61
void scatter(const ExecutionSpace &exec_space, const ReduceOp &reduce_op, const ArrayTypes &... arrays) const
Scatter data from our ghosts to their owners using the given type of reduce operation.
Definition Cabana_Grid_Halo_Mpi.hpp:167
void gather(const ExecutionSpace &exec_space, const ArrayTypes &... arrays) const
Gather data into our ghosts from their owners.
Definition Cabana_Grid_Halo_Mpi.hpp:77
Core: particle data structures and algorithms.
Definition Cabana_AoSoA.hpp:36
auto size(SliceType slice, typename std::enable_if< is_slice< SliceType >::value, int >::type *=0)
Check slice size (differs from Kokkos View).
Definition Cabana_Slice.hpp:1019
Definition Cabana_Grid_HaloBase.hpp:184
Vanilla MPI backend tag - default.
Definition Cabana_Tags.hpp:28