199struct mpi_communicator
201 MPI_Comm comm = MPI_COMM_NULL;
206 mpi_communicator() =
default;
208 explicit mpi_communicator(MPI_Comm comm)
211 MPI_Comm_rank(comm, &rank);
212 MPI_Comm_size(comm, &size);
216 operator MPI_Comm()
const
222 template <
typename T>
226 std::vector<T> v(size + 1);
228 MPI_Allgather(&n, 1, mpi_datatype<T>(), &v[1], 1, mpi_datatype<T>(), comm);
229 std::partial_sum(v.begin(), v.end(), v.begin());
233 std::complex<long double> reduceSum(
const std::complex<long double>& lval)
const
235 return _reduceSumForComplex(lval);
237 std::complex<double> reduceSum(
const std::complex<double>& lval)
const
239 return _reduceSumForComplex(lval);
241 std::complex<float> reduceSum(
const std::complex<float>& lval)
const
243 return _reduceSumForComplex(lval);
246 template <
typename T> T reduceSum(
const T& lval)
const
248 return mpAllReduce(m_message_passing_mng.get(), MessagePassing::eReduceType::ReduceSum, lval);
251 void waitAll(ArrayView<MessagePassing::Request> requests)
const
253 mpWaitAll(m_message_passing_mng.get(), requests);
255 void wait(MessagePassing::Request request)
const
257 ArrayView<MessagePassing::Request> requests(1, &request);
258 mpWaitAll(m_message_passing_mng.get(), requests);
270 template <
class Condition,
class Message>
271 void check(
const Condition& cond,
const Message& message)
273 int lc =
static_cast<int>(cond);
274 int gc = _reduce(MPI_PROD, lc);
277 std::vector<int> c(size);
278 MPI_Gather(&lc, 1, MPI_INT, &c[0], size, MPI_INT, 0, comm);
280 std::cerr <<
"Failed assumption: " << message << std::endl;
281 std::cerr <<
"Offending processes:";
282 for (
int i = 0; i < size; ++i)
284 std::cerr <<
" " << i;
285 std::cerr << std::endl;
288 ARCCORE_FATAL(
"CheckError in MessagePassingUtils: {0}", message);
293 doIReceive(T* buf,
int count,
int source,
int tag)
const
299 return mpReceive(m_message_passing_mng.
get(), schar, msg_info);
302 template <
typename T>
void
303 doReceive(T* buf,
int count,
int source,
int tag)
const
305 using namespace Arcane::MessagePassing;
306 Span<T> s(buf, count);
307 Span<unsigned char> schar(
reinterpret_cast<unsigned char*
>(s.data()), s.sizeBytes());
308 PointToPointMessageInfo msg_info(MessageRank{ source }, MessageTag{ tag }, eBlockingType::Blocking);
309 mpReceive(m_message_passing_mng.get(), schar, msg_info);
312 template <
typename T> MessagePassing::Request
313 doISend(
const T* buf,
int count,
int dest,
int tag)
const
315 using namespace Arcane::MessagePassing;
316 Span<const T> s(buf, count);
317 Span<const unsigned char> schar(
reinterpret_cast<const unsigned char*
>(s.data()), s.sizeBytes());
318 PointToPointMessageInfo msg_info(MessageRank{ dest }, MessageTag{ tag }, eBlockingType::NonBlocking);
319 return mpSend(m_message_passing_mng.get(), schar, msg_info);
322 template <
typename T>
void
323 doSend(
const T* buf,
int count,
int dest,
int tag)
const
325 using namespace Arcane::MessagePassing;
326 Span<const T> s(buf, count);
327 Span<const unsigned char> schar(
reinterpret_cast<const unsigned char*
>(s.data()), s.sizeBytes());
328 PointToPointMessageInfo msg_info(MessageRank{ dest }, MessageTag{ tag }, eBlockingType::Blocking);
329 mpSend(m_message_passing_mng.get(), schar, msg_info);
334 template <
typename T> T _reduce(MPI_Op op,
const T& lval)
const
336 const int elems = math::static_rows<T>::value * math::static_cols<T>::value;
339 MPI_Allreduce((
void*)&lval, &gval, elems, mpi_datatype<T>(), op, comm);
343 template <
typename T> std::complex<T>
344 _reduceSumForComplex(
const std::complex<T>& lval)
const
347 FixedArray<T, 2> values = { { lval.real(), lval.imag() } };
348 mpAllReduce(m_message_passing_mng.get(), MessagePassing::eReduceType::ReduceSum, values.
view());
349 return std::complex<T>(values[0], values[1]);