Skip to content

Commit

Permalink
change omp single to master
Browse files Browse the repository at this point in the history
MPI calls have to be done from the *same* thread (`master`) instead of from a `single` thread in MPI_FUNNELED mode according to the specifications.
  • Loading branch information
terhorstd authored Feb 2, 2024
1 parent d5aa3fe commit d556da2
Showing 1 changed file with 13 additions and 10 deletions.
23 changes: 13 additions & 10 deletions nestkernel/event_delivery_manager.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -804,13 +804,14 @@ EventDeliveryManager::gather_target_data( const size_t tid )
// otherwise
gather_completed_checker_[ tid ].set_true();

#pragma omp single
#pragma omp master
{
if ( kernel().mpi_manager.adaptive_target_buffers() and buffer_size_target_data_has_changed_ )
{
resize_send_recv_buffers_target_data();
}
} // of omp single; implicit barrier
} // of omp master; (no barrier)
#pragma omp barrier

kernel().connection_manager.restore_source_table_entry_point( tid );

Expand All @@ -828,7 +829,7 @@ EventDeliveryManager::gather_target_data( const size_t tid )
#pragma omp barrier
kernel().connection_manager.clean_source_table( tid );

#pragma omp single
#pragma omp master
{
#ifdef TIMER_DETAILED
sw_communicate_target_data_.start();
Expand All @@ -837,7 +838,7 @@ EventDeliveryManager::gather_target_data( const size_t tid )
#ifdef TIMER_DETAILED
sw_communicate_target_data_.stop();
#endif
} // of omp single (implicit barrier)
} // of omp master (no barriers!)


const bool distribute_completed = distribute_target_data_buffers_( tid );
Expand All @@ -846,7 +847,7 @@ EventDeliveryManager::gather_target_data( const size_t tid )
// resize mpi buffers, if necessary and allowed
if ( gather_completed_checker_.any_false() and kernel().mpi_manager.adaptive_target_buffers() )
{
#pragma omp single
#pragma omp master
{
buffer_size_target_data_has_changed_ = kernel().mpi_manager.increase_buffer_size_target_data();
}
Expand Down Expand Up @@ -874,13 +875,14 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid )
// assume this is the last gather round and change to false otherwise
gather_completed_checker_[ tid ].set_true();

#pragma omp single
#pragma omp master
{
if ( kernel().mpi_manager.adaptive_target_buffers() and buffer_size_target_data_has_changed_ )
{
resize_send_recv_buffers_target_data();
}
} // of omp single; implicit barrier
} // of omp master; no barrier
#pragma omp barrier

TargetSendBufferPosition send_buffer_position(
assigned_ranks, kernel().mpi_manager.get_send_recv_count_target_data_per_rank() );
Expand All @@ -897,7 +899,7 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid )

#pragma omp barrier

#pragma omp single
#pragma omp master
{
#ifdef TIMER_DETAILED
sw_communicate_target_data_.start();
Expand All @@ -906,7 +908,8 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid )
#ifdef TIMER_DETAILED
sw_communicate_target_data_.stop();
#endif
} // of omp single (implicit barrier)
} // of omp master (no barrier)
#pragma omp barrier

// Up to here, gather_completed_checker_ just has local info: has this thread been able to write
// all data it is responsible for to buffers. Now combine with information on whether other ranks
Expand All @@ -917,7 +920,7 @@ EventDeliveryManager::gather_target_data_compressed( const size_t tid )
// resize mpi buffers, if necessary and allowed
if ( gather_completed_checker_.any_false() and kernel().mpi_manager.adaptive_target_buffers() )
{
#pragma omp single
#pragma omp master
{
buffer_size_target_data_has_changed_ = kernel().mpi_manager.increase_buffer_size_target_data();
}
Expand Down

0 comments on commit d556da2

Please sign in to comment.