16#ifndef CABANA_GRID_LOADBALANCER_HPP
17#define CABANA_GRID_LOADBALANCER_HPP
22#include <Kokkos_Core.hpp>
23#include <Kokkos_Profiling_ScopedRegion.hpp>
45template <
class MeshType>
53template <
class Scalar, std::
size_t NumSpaceDim>
70 : _global_grid( global_grid )
87 const double min_domain_size )
88 : _global_grid( global_grid )
92 std::vector<double> vec_min_domain_size( NumSpaceDim, min_domain_size );
93 _liball->setMinDomainSize( vec_min_domain_size );
104 const std::array<double, NumSpaceDim> min_domain_size )
105 : _global_grid( global_grid )
109 std::vector<double> vec_min_domain_size( min_domain_size.begin(),
110 min_domain_size.end() );
111 _liball->setMinDomainSize( vec_min_domain_size );
123 const double local_work )
125 Kokkos::Profiling::ScopedRegion region(
126 "Cabana::Grid::LoadBalancer::balance" );
129 _liball->setWork( local_work );
132 std::vector<ALL::Point<double>> updated_vertices =
133 _liball->getVertices();
134 std::array<double, NumSpaceDim> cell_size;
135 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
136 cell_size[d] = _global_grid->globalMesh().cellSize( d );
137 std::array<int, NumSpaceDim> cell_index_lo, cell_index_hi;
138 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
140 std::rint( updated_vertices.at( 0 )[d] / cell_size[d] );
141 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
143 std::rint( updated_vertices.at( 1 )[d] / cell_size[d] );
144 std::array<int, NumSpaceDim> num_cell;
145 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
146 num_cell[d] = cell_index_hi[d] - cell_index_lo[d];
151 std::array<bool, NumSpaceDim> periodic;
152 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
153 periodic[d] = _global_grid->isPeriodic( d );
154 std::shared_ptr<GlobalGrid<mesh_type>> global_grid =
156 global_grid->setNumCellAndOffset( num_cell, cell_index_lo );
157 _global_grid = global_grid;
166 std::array<double, NumSpaceDim * 2> internal_vertices;
167 std::vector<ALL::Point<double>> lb_vertices = _liball->getVertices();
168 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
169 internal_vertices[d] = lb_vertices.at( 0 )[d];
170 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
171 internal_vertices[d + NumSpaceDim] = lb_vertices.at( 1 )[d];
172 return internal_vertices;
180 std::array<double, NumSpaceDim> cell_size;
181 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
182 cell_size[d] = _global_grid->globalMesh().cellSize( d );
183 std::array<double, NumSpaceDim * 2> vertices;
184 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
185 vertices[d] = _global_grid->globalOffset( d ) * cell_size[d];
186 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
187 vertices[d + NumSpaceDim] =
188 vertices[d] + _global_grid->ownedNumCell( d ) * cell_size[d];
197 const double local_work = _liball->getWork();
198 double max_work, min_work;
199 MPI_Allreduce( &local_work, &max_work, 1, MPI_DOUBLE, MPI_MAX, _comm );
200 MPI_Allreduce( &local_work, &min_work, 1, MPI_DOUBLE, MPI_MIN, _comm );
201 return ( max_work - min_work ) / ( max_work + min_work );
219 _liball = std::make_shared<ALL::ALL<double, double>>( ALL::TENSOR,
221 std::vector<int> loc( NumSpaceDim, 0 );
222 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
223 loc[d] = _global_grid->dimBlockId( d );
224 std::vector<int>
size( NumSpaceDim, 0 );
225 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
226 size[d] = _global_grid->dimNumBlock( d );
227 _liball->setProcGridParams( loc,
size );
228 _liball->setCommunicator( _comm );
230 MPI_Comm_rank( _global_grid->comm(), &rank );
231 _liball->setProcTag( rank );
234 std::array<double, NumSpaceDim> cell_size;
235 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
236 cell_size[d] = _global_grid->globalMesh().cellSize( d );
237 std::vector<ALL::Point<double>> lb_vertices(
238 2, ALL::Point<double>( NumSpaceDim ) );
239 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
240 lb_vertices.at( 0 )[d] =
241 _global_grid->globalOffset( d ) * cell_size[d];
242 for ( std::size_t d = 0; d < NumSpaceDim; ++d )
243 lb_vertices.at( 1 )[d] =
244 lb_vertices.at( 0 )[d] +
245 _global_grid->ownedNumCell( d ) * cell_size[d];
246 _liball->setVertices( lb_vertices );
249 std::shared_ptr<ALL::ALL<double, double>> _liball;
250 std::shared_ptr<GlobalGrid<mesh_type>> _global_grid;
262template <
class Scalar, std::
size_t NumSpaceDim>
263std::shared_ptr<LoadBalancer<UniformMesh<Scalar, NumSpaceDim>>>
269 return std::make_shared<LoadBalancer<UniformMesh<Scalar, NumSpaceDim>>>(
278template <
class Scalar, std::
size_t NumSpaceDim>
279std::shared_ptr<LoadBalancer<UniformMesh<Scalar, NumSpaceDim>>>
284 const double min_domain_size )
286 return std::make_shared<LoadBalancer<UniformMesh<Scalar, NumSpaceDim>>>(
287 comm, global_grid, min_domain_size );
296template <
class Scalar, std::
size_t NumSpaceDim>
297std::shared_ptr<LoadBalancer<UniformMesh<Scalar, NumSpaceDim>>>
302 const std::array<double, NumSpaceDim> min_domain_size )
304 return std::make_shared<LoadBalancer<UniformMesh<Scalar, NumSpaceDim>>>(
305 comm, global_grid, min_domain_size );
std::shared_ptr< GlobalGrid< MeshType > > createGlobalGrid(MPI_Comm comm, const std::shared_ptr< GlobalMesh< MeshType > > &global_mesh, const std::array< bool, MeshType::num_space_dim > &periodic, const BlockPartitioner< MeshType::num_space_dim > &partitioner)
Create a global grid.
Definition Cabana_Grid_GlobalGrid.hpp:199
std::shared_ptr< LoadBalancer< UniformMesh< Scalar, NumSpaceDim > > > createLoadBalancer(MPI_Comm comm, const std::shared_ptr< GlobalGrid< UniformMesh< Scalar, NumSpaceDim > > > &global_grid)
Create a load balancer.
Definition Cabana_Grid_LoadBalancer.hpp:264
Block partitioner base class.
Definition Cabana_Grid_Partitioner.hpp:37
Load Balancer for global grid.
Definition Cabana_Grid_LoadBalancer.hpp:46
Global logical grid.
Definition Cabana_Grid_GlobalGrid.hpp:39
Global mesh partial specialization for uniform mesh.
Definition Cabana_Grid_GlobalMesh.hpp:49
Core: particle data structures and algorithms.
Definition Cabana_AoSoA.hpp:36
auto size(SliceType slice, typename std::enable_if< is_slice< SliceType >::value, int >::type *=0)
Check slice size (differs from Kokkos View).
Definition Cabana_Slice.hpp:1012