Cabana 0.8.0-dev
 
Loading...
Searching...
No Matches
Cabana_Grid_LoadBalancer.hpp
Go to the documentation of this file.
1/****************************************************************************
2 * Copyright (c) 2018-2023 by the Cabana authors *
3 * All rights reserved. *
4 * *
5 * This file is part of the Cabana library. Cabana is distributed under a *
6 * BSD 3-clause license. For the licensing terms see the LICENSE file in *
7 * the top-level directory. *
8 * *
9 * SPDX-License-Identifier: BSD-3-Clause *
10 ****************************************************************************/
11
16#ifndef CABANA_GRID_LOADBALANCER_HPP
17#define CABANA_GRID_LOADBALANCER_HPP
18
20#include <Cabana_Grid_Types.hpp>
21
22#include <Kokkos_Core.hpp>
23#include <Kokkos_Profiling_ScopedRegion.hpp>
24
25#include <ALL.hpp>
26
27#include <mpi.h>
28
29#include <array>
30#include <cmath>
31#include <memory>
32#include <vector>
33
34namespace Cabana
35{
36namespace Grid
37{
38namespace Experimental
39{
40//---------------------------------------------------------------------------//
45template <class MeshType>
47
53template <class Scalar, std::size_t NumSpaceDim>
54class LoadBalancer<UniformMesh<Scalar, NumSpaceDim>>
55{
56 public:
59
61 static constexpr std::size_t num_space_dim = NumSpaceDim;
62
68 LoadBalancer( MPI_Comm comm,
69 const std::shared_ptr<GlobalGrid<mesh_type>>& global_grid )
70 : _global_grid( global_grid )
71 , _comm( comm )
72 {
73 // todo(sschulz): We don't need the partitioner except for creating the
74 // global grid again. It would suffice to retrieve the partitioner from
75 // the global grid, but it isn't saved there as well.
76 setupLibrary();
77 }
78
85 LoadBalancer( MPI_Comm comm,
86 const std::shared_ptr<GlobalGrid<mesh_type>>& global_grid,
87 const double min_domain_size )
88 : _global_grid( global_grid )
89 , _comm( comm )
90 {
91 setupLibrary();
92 std::vector<double> vec_min_domain_size( NumSpaceDim, min_domain_size );
93 _liball->setMinDomainSize( vec_min_domain_size );
94 }
95
102 LoadBalancer( MPI_Comm comm,
103 const std::shared_ptr<GlobalGrid<mesh_type>>& global_grid,
104 const std::array<double, NumSpaceDim> min_domain_size )
105 : _global_grid( global_grid )
106 , _comm( comm )
107 {
108 setupLibrary();
109 std::vector<double> vec_min_domain_size( min_domain_size.begin(),
110 min_domain_size.end() );
111 _liball->setMinDomainSize( vec_min_domain_size );
112 }
113
120 std::shared_ptr<GlobalGrid<mesh_type>> createBalancedGlobalGrid(
121 const std::shared_ptr<GlobalMesh<mesh_type>>& global_mesh,
122 const BlockPartitioner<NumSpaceDim>& partitioner,
123 const double local_work )
124 {
125 Kokkos::Profiling::ScopedRegion region(
126 "Cabana::Grid::LoadBalancer::balance" );
127
128 // Create new decomposition
129 _liball->setWork( local_work );
130 _liball->balance();
131 // Calculate new local cell offset and local extent
132 std::vector<ALL::Point<double>> updated_vertices =
133 _liball->getVertices();
134 std::array<double, NumSpaceDim> cell_size;
135 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
136 cell_size[d] = _global_grid->globalMesh().cellSize( d );
137 std::array<int, NumSpaceDim> cell_index_lo, cell_index_hi;
138 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
139 cell_index_lo[d] =
140 std::rint( updated_vertices.at( 0 )[d] / cell_size[d] );
141 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
142 cell_index_hi[d] =
143 std::rint( updated_vertices.at( 1 )[d] / cell_size[d] );
144 std::array<int, NumSpaceDim> num_cell;
145 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
146 num_cell[d] = cell_index_hi[d] - cell_index_lo[d];
147 // Create new global grid
148 // todo(sschulz): Can GlobalGrid be constructed with an already
149 // cartesian communicator? MPI_Cart_Create is called with the given
150 // comm.
151 std::array<bool, NumSpaceDim> periodic;
152 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
153 periodic[d] = _global_grid->isPeriodic( d );
154 std::shared_ptr<GlobalGrid<mesh_type>> global_grid =
155 createGlobalGrid( _comm, global_mesh, periodic, partitioner );
156 global_grid->setNumCellAndOffset( num_cell, cell_index_lo );
157 _global_grid = global_grid;
158
159 return _global_grid;
160 }
161
164 const std::array<double, NumSpaceDim * 2> getInternalVertices() const
165 {
166 std::array<double, NumSpaceDim * 2> internal_vertices;
167 std::vector<ALL::Point<double>> lb_vertices = _liball->getVertices();
168 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
169 internal_vertices[d] = lb_vertices.at( 0 )[d];
170 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
171 internal_vertices[d + NumSpaceDim] = lb_vertices.at( 1 )[d];
172 return internal_vertices;
173 // todo(sschulz): Is this ok to pass arrays?
174 }
175
178 const std::array<double, NumSpaceDim * 2> getVertices() const
179 {
180 std::array<double, NumSpaceDim> cell_size;
181 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
182 cell_size[d] = _global_grid->globalMesh().cellSize( d );
183 std::array<double, NumSpaceDim * 2> vertices;
184 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
185 vertices[d] = _global_grid->globalOffset( d ) * cell_size[d];
186 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
187 vertices[d + NumSpaceDim] =
188 vertices[d] + _global_grid->ownedNumCell( d ) * cell_size[d];
189 return vertices;
190 // todo(sschulz): Is this ok to pass arrays?
191 }
192
195 double getImbalance() const
196 {
197 const double local_work = _liball->getWork();
198 double max_work, min_work;
199 MPI_Allreduce( &local_work, &max_work, 1, MPI_DOUBLE, MPI_MAX, _comm );
200 MPI_Allreduce( &local_work, &min_work, 1, MPI_DOUBLE, MPI_MIN, _comm );
201 return ( max_work - min_work ) / ( max_work + min_work );
202 }
203
204 // todo(sschulz): Methods to access single values from the vertices, as in
205 // the other classes.
206
207 // todo(sschulz): Should also be relatively straight forward to extend to
208 // support non uniform mesh, since only the calculation of the vertices and
209 // inverse need to be changed. And those would be an exclusive scan of the
210 // edge coordinates. Likewise the calculation of cells is rounding to the
211 // nearest sum.
212
213 private:
215 void setupLibrary()
216 {
217 // todo(sschulz): Investigate, why usage of num_space_dim instead of
218 // NumSpaceDim causes a linker error.
219 _liball = std::make_shared<ALL::ALL<double, double>>( ALL::TENSOR,
220 NumSpaceDim, 0 );
221 std::vector<int> loc( NumSpaceDim, 0 );
222 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
223 loc[d] = _global_grid->dimBlockId( d );
224 std::vector<int> size( NumSpaceDim, 0 );
225 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
226 size[d] = _global_grid->dimNumBlock( d );
227 _liball->setProcGridParams( loc, size );
228 _liball->setCommunicator( _comm );
229 int rank;
230 MPI_Comm_rank( _global_grid->comm(), &rank );
231 _liball->setProcTag( rank );
232 _liball->setup();
233 // Set initial vertices
234 std::array<double, NumSpaceDim> cell_size;
235 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
236 cell_size[d] = _global_grid->globalMesh().cellSize( d );
237 std::vector<ALL::Point<double>> lb_vertices(
238 2, ALL::Point<double>( NumSpaceDim ) );
239 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
240 lb_vertices.at( 0 )[d] =
241 _global_grid->globalOffset( d ) * cell_size[d];
242 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
243 lb_vertices.at( 1 )[d] =
244 lb_vertices.at( 0 )[d] +
245 _global_grid->ownedNumCell( d ) * cell_size[d];
246 _liball->setVertices( lb_vertices );
247 }
248
249 std::shared_ptr<ALL::ALL<double, double>> _liball;
250 std::shared_ptr<GlobalGrid<mesh_type>> _global_grid;
251 MPI_Comm _comm;
252};
253
254//---------------------------------------------------------------------------//
255// Creation function.
256//---------------------------------------------------------------------------//
262template <class Scalar, std::size_t NumSpaceDim>
263std::shared_ptr<LoadBalancer<UniformMesh<Scalar, NumSpaceDim>>>
265 MPI_Comm comm,
266 const std::shared_ptr<GlobalGrid<UniformMesh<Scalar, NumSpaceDim>>>&
267 global_grid )
268{
269 return std::make_shared<LoadBalancer<UniformMesh<Scalar, NumSpaceDim>>>(
270 comm, global_grid );
271}
272
278template <class Scalar, std::size_t NumSpaceDim>
279std::shared_ptr<LoadBalancer<UniformMesh<Scalar, NumSpaceDim>>>
281 MPI_Comm comm,
282 const std::shared_ptr<GlobalGrid<UniformMesh<Scalar, NumSpaceDim>>>&
283 global_grid,
284 const double min_domain_size )
285{
286 return std::make_shared<LoadBalancer<UniformMesh<Scalar, NumSpaceDim>>>(
287 comm, global_grid, min_domain_size );
288}
289
296template <class Scalar, std::size_t NumSpaceDim>
297std::shared_ptr<LoadBalancer<UniformMesh<Scalar, NumSpaceDim>>>
299 MPI_Comm comm,
300 const std::shared_ptr<GlobalGrid<UniformMesh<Scalar, NumSpaceDim>>>&
301 global_grid,
302 const std::array<double, NumSpaceDim> min_domain_size )
303{
304 return std::make_shared<LoadBalancer<UniformMesh<Scalar, NumSpaceDim>>>(
305 comm, global_grid, min_domain_size );
306}
307
308} // end namespace Experimental
309} // namespace Grid
310} // namespace Cabana
311
312//---------------------------------------------------------------------------//
313
314#endif // end CABANA_GRID_LOADBALANCER_HPP
std::shared_ptr< GlobalGrid< MeshType > > createGlobalGrid(MPI_Comm comm, const std::shared_ptr< GlobalMesh< MeshType > > &global_mesh, const std::array< bool, MeshType::num_space_dim > &periodic, const BlockPartitioner< MeshType::num_space_dim > &partitioner)
Create a global grid.
Definition Cabana_Grid_GlobalGrid.hpp:199
std::shared_ptr< LoadBalancer< UniformMesh< Scalar, NumSpaceDim > > > createLoadBalancer(MPI_Comm comm, const std::shared_ptr< GlobalGrid< UniformMesh< Scalar, NumSpaceDim > > > &global_grid)
Create a load balancer.
Definition Cabana_Grid_LoadBalancer.hpp:264
Grid type tags.
Block partitioner base class.
Definition Cabana_Grid_Partitioner.hpp:37
const std::array< double, NumSpaceDim *2 > getVertices() const
Return array of low and high corner of current domain. Represents the actual domain layout.
Definition Cabana_Grid_LoadBalancer.hpp:178
LoadBalancer(MPI_Comm comm, const std::shared_ptr< GlobalGrid< mesh_type > > &global_grid)
Constructor if domains may be arbitrarily small.
Definition Cabana_Grid_LoadBalancer.hpp:68
static constexpr std::size_t num_space_dim
Spatial dimension.
Definition Cabana_Grid_LoadBalancer.hpp:61
UniformMesh< Scalar, NumSpaceDim > mesh_type
Mesh type.
Definition Cabana_Grid_LoadBalancer.hpp:58
const std::array< double, NumSpaceDim *2 > getInternalVertices() const
Return array of low and high corner of current internal domain. This is not aligned to the mesh!
Definition Cabana_Grid_LoadBalancer.hpp:164
double getImbalance() const
Return current load imbalance (wmax-wmin)/(wmax+wmin). Must be called by all ranks.
Definition Cabana_Grid_LoadBalancer.hpp:195
LoadBalancer(MPI_Comm comm, const std::shared_ptr< GlobalGrid< mesh_type > > &global_grid, const double min_domain_size)
Constructor if domains have a minimum size.
Definition Cabana_Grid_LoadBalancer.hpp:85
std::shared_ptr< GlobalGrid< mesh_type > > createBalancedGlobalGrid(const std::shared_ptr< GlobalMesh< mesh_type > > &global_mesh, const BlockPartitioner< NumSpaceDim > &partitioner, const double local_work)
Create a new, balanced global grid and return that.
Definition Cabana_Grid_LoadBalancer.hpp:120
LoadBalancer(MPI_Comm comm, const std::shared_ptr< GlobalGrid< mesh_type > > &global_grid, const std::array< double, NumSpaceDim > min_domain_size)
Constructor if domains have a minimum size.
Definition Cabana_Grid_LoadBalancer.hpp:102
Load Balancer for global grid.
Definition Cabana_Grid_LoadBalancer.hpp:46
Global logical grid.
Definition Cabana_Grid_GlobalGrid.hpp:39
Global mesh partial specialization for uniform mesh.
Definition Cabana_Grid_GlobalMesh.hpp:49
Core: particle data structures and algorithms.
Definition Cabana_AoSoA.hpp:36
auto size(SliceType slice, typename std::enable_if< is_slice< SliceType >::value, int >::type *=0)
Check slice size (differs from Kokkos View).
Definition Cabana_Slice.hpp:1012
Uniform mesh tag.
Definition Cabana_Grid_Types.hpp:227