Skip to content

Commit

Permalink
Move initializeRecPartition into constructor
Browse files Browse the repository at this point in the history
  • Loading branch information
squarefk committed Aug 4, 2022
1 parent 16cea3e commit 88ed450
Show file tree
Hide file tree
Showing 7 changed files with 43 additions and 53 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -146,8 +146,8 @@ void performanceTest( std::ostream& stream, MPI_Comm comm,
for ( int t = 0; t < num_run; ++t )
{
// ensure every optimization process starts from the same status
partitioner.initializeRecPartition(
ave_partition[0], ave_partition[1], ave_partition[2] );
partitioner.initializePartitionByAverage( comm,
global_num_cell );

// compute local workload
local_workload_timer.start( p );
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -164,8 +164,8 @@ void performanceTest( std::ostream& stream, MPI_Comm comm,
for ( int t = 0; t < num_run; ++t )
{
// ensure every optimization process starts from the same status
partitioner.initializeRecPartition(
ave_partition[0], ave_partition[1], ave_partition[2] );
partitioner.initializePartitionByAverage( comm,
global_num_cell );

// compute local workload
local_workload_timer.start( frac );
Expand Down
43 changes: 38 additions & 5 deletions cajita/src/Cajita_DynamicPartitioner.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,7 @@ class DynamicPartitioner : public BlockPartitioner<NumSpaceDim>
// compute the ranks_per_dim from MPI communicator
allocate( global_cells_per_dim );
ranksPerDimension( comm );
initializePartitionByAverage( comm, global_cells_per_dim );
}

/*!
Expand All @@ -126,6 +127,7 @@ class DynamicPartitioner : public BlockPartitioner<NumSpaceDim>
int comm_size;
MPI_Comm_size( comm, &comm_size );
MPI_Dims_create( comm_size, num_space_dim, _ranks_per_dim.data() );
initializePartitionByAverage( comm, global_cells_per_dim );
}

/*!
Expand Down Expand Up @@ -267,18 +269,49 @@ class DynamicPartitioner : public BlockPartitioner<NumSpaceDim>
}

/*!
\brief Initialize the tile partition; partition in each dimension
\brief Initialize the tile partition by average size
\param comm The communicator to use for initializing partitioning
\param global_cells_per_dim 3D array, global cells in each dimension
*/
void initializePartitionByAverage(
MPI_Comm comm,
const std::array<int, num_space_dim>& global_cells_per_dim )
{
std::array<int, 3> global_num_tile = {
global_cells_per_dim[0] / (int)cell_num_per_tile_dim,
global_cells_per_dim[1] / (int)cell_num_per_tile_dim,
global_cells_per_dim[2] / (int)cell_num_per_tile_dim };

auto ranks_per_dim = ranksPerDimension( comm, global_cells_per_dim );
std::array<std::vector<int>, 3> rec_partitions;
for ( int d = 0; d < 3; ++d )
{
int ele = global_num_tile[d] / ranks_per_dim[d];
int part = 0;
for ( int i = 0; i < ranks_per_dim[d]; ++i )
{
rec_partitions[d].push_back( part );
part += ele;
}
rec_partitions[d].push_back( global_num_tile[d] );
}

setRecPartition( rec_partitions[0], rec_partitions[1],
rec_partitions[2] );
}

/*!
\brief Set the tile partition; partition in each dimension
has the form [0, p_1, ..., p_n, total_tile_num], so the partition
would be [0, p_1), [p_1, p_2) ... [p_n, total_tile_num]
\param rec_partition_i partition array in dimension i
\param rec_partition_j partition array in dimension j
\param rec_partition_k partition array in dimension k
*/
void initializeRecPartition( std::vector<int>& rec_partition_i,
std::vector<int>& rec_partition_j,
std::vector<int>& rec_partition_k )
void setRecPartition( std::vector<int>& rec_partition_i,
std::vector<int>& rec_partition_j,
std::vector<int>& rec_partition_k )
{

int max_size = 0;
for ( std::size_t d = 0; d < num_space_dim; ++d )
max_size =
Expand Down
4 changes: 1 addition & 3 deletions cajita/unit_test/tstGlobalGrid.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -446,8 +446,6 @@ void sparseGridTest3d()
}
rec_partitions[d].push_back( global_num_tile[d] );
}
partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1],
rec_partitions[2] );

// Create spares global grid
auto global_grid = createGlobalGrid( MPI_COMM_WORLD, global_mesh,
Expand Down Expand Up @@ -562,7 +560,7 @@ void sparseGridTest3d()
for ( int id = 1; id < ranks_per_dim[d]; id++ )
part[d][id] += 1;

partitioner.initializeRecPartition( part[0], part[1], part[2] );
partitioner.setRecPartition( part[0], part[1], part[2] );

std::array<int, 3> new_owned_num_cell;
std::array<int, 3> new_global_cell_offset;
Expand Down
17 changes: 0 additions & 17 deletions cajita/unit_test/tstParticleDynamicPartitioner.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -177,23 +177,6 @@ void random_distribution_automatic_rank( int occupy_num_per_rank )
MPI_Barrier( MPI_COMM_WORLD );
}

// init partitions (average partition)
std::array<std::vector<int>, 3> rec_partitions;
for ( int d = 0; d < 3; ++d )
{
int ele = size_tile_per_dim / ranks_per_dim[d];
int part = 0;
for ( int i = 0; i < ranks_per_dim[d]; ++i )
{
rec_partitions[d].push_back( part );
part += ele;
}
rec_partitions[d].push_back( size_tile_per_dim );
}

partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1],
rec_partitions[2] );

// basic settings for domain size and position
double cell_size = 0.1;
std::array<double, 3> global_low_corner = { 1.2, 3.3, -2.8 };
Expand Down
19 changes: 0 additions & 19 deletions cajita/unit_test/tstSparseLocalGrid.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,9 +33,6 @@ void sparseLocalGridTest( EntityType t2 )
double cell_size = 0.23;
std::array<int, 3> global_num_cell = { 16, 32, 64 };
int cell_num_per_tile_dim = 4;
std::array<int, 3> global_num_tile = { 16 / cell_num_per_tile_dim,
32 / cell_num_per_tile_dim,
64 / cell_num_per_tile_dim };
std::array<double, 3> global_low_corner = { 1.2, 3.3, -2.8 };
std::array<double, 3> global_high_corner = {
global_low_corner[0] + cell_size * global_num_cell[0],
Expand All @@ -48,22 +45,6 @@ void sparseLocalGridTest( EntityType t2 )
std::array<bool, 3> periodic = { false, false, false };
DynamicPartitioner<TEST_DEVICE, 4> partitioner( MPI_COMM_WORLD,
global_num_cell, 10 );
auto ranks_per_dim =
partitioner.ranksPerDimension( MPI_COMM_WORLD, global_num_cell );
std::array<std::vector<int>, 3> rec_partitions;
for ( int d = 0; d < 3; ++d )
{
int ele = global_num_tile[d] / ranks_per_dim[d];
int part = 0;
for ( int i = 0; i < ranks_per_dim[d]; ++i )
{
rec_partitions[d].push_back( part );
part += ele;
}
rec_partitions[d].push_back( global_num_tile[d] );
}
partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1],
rec_partitions[2] );

// Create global grid
auto global_grid_ptr = Cajita::createGlobalGrid(
Expand Down
5 changes: 0 additions & 5 deletions cajita/unit_test/tstSparseMapDynamicPartitioner.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,6 @@ void uniform_distribution_automatic_rank()
}
rec_partitions[d].push_back( size_tile_per_dim );
}
partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1],
rec_partitions[2] );

// test getCurrentPartition function
{
Expand Down Expand Up @@ -324,9 +322,6 @@ void random_distribution_automatic_rank( int occupy_num_per_rank )
rec_partitions[d].push_back( size_tile_per_dim );
}

partitioner.initializeRecPartition( rec_partitions[0], rec_partitions[1],
rec_partitions[2] );

// basic settings for domain size and position
double cell_size = 0.1;
int pre_alloc_size = size_per_dim * size_per_dim;
Expand Down

0 comments on commit 88ed450

Please sign in to comment.