Arcane  v4.1.1.0
Documentation développeur
Chargement...
Recherche...
Aucune correspondance
MpiAdapter.cc
1// -*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
2//-----------------------------------------------------------------------------
3// Copyright 2000-2025 CEA (www.cea.fr) IFPEN (www.ifpenergiesnouvelles.com)
4// See the top-level COPYRIGHT file for details.
5// SPDX-License-Identifier: Apache-2.0
6//-----------------------------------------------------------------------------
7/*---------------------------------------------------------------------------*/
8/* MpiAdapter.cc (C) 2000-2025 */
9/* */
10/* Gestionnaire de parallélisme utilisant MPI. */
11/*---------------------------------------------------------------------------*/
12/*---------------------------------------------------------------------------*/
13
14#include "arccore/message_passing_mpi/internal/MpiAdapter.h"
15
16#include "arccore/trace/ITraceMng.h"
17
18#include "arccore/collections/Array.h"
19
20#include "arccore/message_passing/Request.h"
21#include "arccore/message_passing/IStat.h"
22#include "arccore/message_passing/internal/SubRequestCompletionInfo.h"
23
24#include "arccore/base/IStackTraceService.h"
25#include "arccore/base/TimeoutException.h"
26#include "arccore/base/String.h"
27#include "arccore/base/NotImplementedException.h"
28#include "arccore/base/PlatformUtils.h"
29#include "arccore/base/FatalErrorException.h"
30#include "arccore/base/TraceInfo.h"
31
32#include "arccore/message_passing_mpi/StandaloneMpiMessagePassingMng.h"
33#include "arccore/message_passing_mpi/internal/MpiLock.h"
34#include "arccore/message_passing_mpi/internal/NoMpiProfiling.h"
35#include "arccore/message_passing_mpi/internal/MpiRequest.h"
36#include "arccore/message_passing_mpi/internal/MpiMachineMemoryWindowBaseInternalCreator.h"
37
38#include <cstdint>
39
40/*---------------------------------------------------------------------------*/
41/*---------------------------------------------------------------------------*/
42
43namespace Arcane::MessagePassing::Mpi
44{
45
46/*---------------------------------------------------------------------------*/
47/*---------------------------------------------------------------------------*/
48
50: public TraceAccessor
51{
52 public:
54 {
55 TraceInfo m_trace;
56 String m_stack_trace;
57 };
58 public:
59 typedef std::map<MPI_Request,RequestInfo>::iterator Iterator;
60 public:
61
62 explicit RequestSet(ITraceMng* tm)
63 : TraceAccessor(tm)
64 {
65 m_trace_mng_ref = makeRef(tm);
66 if (arccoreIsCheck()){
67 m_no_check_request = false;
68 m_request_error_is_fatal = true;
69 }
70 if (Platform::getEnvironmentVariable("ARCCORE_NOREPORT_ERROR_MPIREQUEST")=="TRUE")
71 m_is_report_error_in_request = false;
72 if (Platform::getEnvironmentVariable("ARCCORE_MPIREQUEST_STACKTRACE")=="TRUE")
73 m_use_trace_full_stack = true;
74 if (Platform::getEnvironmentVariable("ARCCORE_TRACE_MPIREQUEST")=="TRUE")
75 m_trace_mpirequest = true;
76 }
77 public:
78 void addRequest(MPI_Request request)
79 {
81 return;
82 if (m_trace_mpirequest)
83 info() << "MpiAdapter: AddRequest r=" << request;
84 _addRequest(request,TraceInfo());
85 }
86 void addRequest(MPI_Request request,const TraceInfo& ti)
87 {
89 return;
90 if (m_trace_mpirequest)
91 info() << "MpiAdapter: AddRequest r=" << request;
92 _addRequest(request,ti);
93 }
94 void removeRequest(MPI_Request request)
95 {
97 return;
98 if (m_trace_mpirequest)
99 info() << "MpiAdapter: RemoveRequest r=" << request;
100 _removeRequest(request);
101 }
102 void removeRequest(Iterator request_iter)
103 {
105 return;
106 if (request_iter==m_allocated_requests.end()){
107 if (m_trace_mpirequest)
108 info() << "MpiAdapter: RemoveRequestIter null iterator";
109 return;
110 }
111 if (m_trace_mpirequest)
112 info() << "MpiAdapter: RemoveRequestIter r=" << request_iter->first;
113 m_allocated_requests.erase(request_iter);
114 }
116 Iterator findRequest(MPI_Request request)
117 {
119 return m_allocated_requests.end();
120
121 if (_isEmptyRequest(request))
122 return m_allocated_requests.end();
123 auto ireq = m_allocated_requests.find(request);
124 if (ireq==m_allocated_requests.end()){
125 if (m_is_report_error_in_request || m_request_error_is_fatal){
126 error() << "MpiAdapter::testRequest() request not referenced "
127 << " id=" << request;
128 _checkFatalInRequest();
129 }
130 }
131 return ireq;
132 }
133 private:
137 void _addRequest(MPI_Request request,const TraceInfo& trace_info)
138 {
139 if (request==MPI_REQUEST_NULL){
140 if (m_is_report_error_in_request || m_request_error_is_fatal){
141 error() << "MpiAdapter::_addRequest() trying to add null request";
142 _checkFatalInRequest();
143 }
144 return;
145 }
146 if (_isEmptyRequest(request))
147 return;
148 ++m_total_added_request;
149 //info() << "MPI_ADAPTER:ADD REQUEST " << request;
150 auto i = m_allocated_requests.find(request);
151 if (i!=m_allocated_requests.end()){
152 if (m_is_report_error_in_request || m_request_error_is_fatal){
153 error() << "MpiAdapter::_addRequest() request already referenced "
154 << " id=" << request;
155 _checkFatalInRequest();
156 }
157 return;
158 }
159 RequestInfo rinfo;
160 rinfo.m_trace = trace_info;
161 if (m_use_trace_full_stack)
162 rinfo.m_stack_trace = Platform::getStackTrace();
163 m_allocated_requests.insert(std::make_pair(request,rinfo));
164 }
165
169 void _removeRequest(MPI_Request request)
170 {
171 //info() << "MPI_ADAPTER:REMOVE REQUEST " << request;
172 if (request==MPI_REQUEST_NULL){
173 if (m_is_report_error_in_request || m_request_error_is_fatal){
174 error() << "MpiAdapter::_removeRequest() null request (" << MPI_REQUEST_NULL << ")";
175 _checkFatalInRequest();
176 }
177 return;
178 }
179 if (_isEmptyRequest(request))
180 return;
181 auto i = m_allocated_requests.find(request);
182 if (i==m_allocated_requests.end()){
183 if (m_is_report_error_in_request || m_request_error_is_fatal){
184 error() << "MpiAdapter::_removeRequest() request not referenced "
185 << " id=" << request;
186 _checkFatalInRequest();
187 }
188 }
189 else
190 m_allocated_requests.erase(i);
191 }
192 public:
193 void _checkFatalInRequest()
194 {
195 if (m_request_error_is_fatal)
196 ARCCORE_FATAL("Error in requests management");
197 }
198 Int64 nbRequest() const { return m_allocated_requests.size(); }
199 Int64 totalAddedRequest() const { return m_total_added_request; }
200 void printRequests() const
201 {
202 info() << "PRINT REQUESTS\n";
203 for( auto& x : m_allocated_requests ){
204 info() << "Request id=" << x.first << " trace=" << x.second.m_trace
205 << " stack=" << x.second.m_stack_trace;
206 }
207 }
208 void setEmptyRequests(MPI_Request r1,MPI_Request r2)
209 {
210 m_empty_request1 = r1;
211 m_empty_request2 = r2;
212 }
213 public:
214 bool m_request_error_is_fatal = false;
215 bool m_is_report_error_in_request = true;
216 bool m_trace_mpirequest = false;
219 private:
220 std::map<MPI_Request,RequestInfo> m_allocated_requests;
221 bool m_use_trace_full_stack = false;
222 MPI_Request m_empty_request1 = MPI_REQUEST_NULL;
223 MPI_Request m_empty_request2 = MPI_REQUEST_NULL;
224 Int64 m_total_added_request = 0;
225 Ref<ITraceMng> m_trace_mng_ref;
226 private:
227 bool _isEmptyRequest(MPI_Request r) const
228 {
229 return (r==m_empty_request1 || r==m_empty_request2);
230 }
231};
232
233#define ARCCORE_ADD_REQUEST(request)\
234 m_request_set->addRequest(request,A_FUNCINFO);
235
236/*---------------------------------------------------------------------------*/
237/*---------------------------------------------------------------------------*/
238
239namespace
240{
241int _checkSize(Int64 i64_size)
242{
243 if (i64_size>INT32_MAX)
244 ARCCORE_FATAL("Can not convert '{0}' to type integer",i64_size);
245 return (int)i64_size;
246}
247}
248
249/*---------------------------------------------------------------------------*/
250/*---------------------------------------------------------------------------*/
251
253MpiAdapter(ITraceMng* trace,IStat* stat,MPI_Comm comm,
254 MpiLock* mpi_lock, IMpiProfiling* mpi_op)
255: TraceAccessor(trace)
256, m_stat(stat)
257, m_mpi_lock(mpi_lock)
258, m_mpi_prof(mpi_op)
259, m_communicator(comm)
260, m_comm_rank(0)
261, m_comm_size(0)
262, m_empty_request1(MPI_REQUEST_NULL)
263, m_empty_request2(MPI_REQUEST_NULL)
264{
265 m_request_set = new RequestSet(trace);
266
267 if (Platform::getEnvironmentVariable("ARCCORE_TRACE_MPI")=="TRUE")
268 m_is_trace = true;
269 {
270 String s = Platform::getEnvironmentVariable("ARCCORE_ALLOW_NULL_RANK_FOR_MPI_ANY_SOURCE");
271 if (s == "1" || s == "TRUE")
272 m_is_allow_null_rank_for_any_source = true;
273 if (s == "0" || s == "FALSE")
274 m_is_allow_null_rank_for_any_source = false;
275 }
276
277 ::MPI_Comm_rank(m_communicator,&m_comm_rank);
278 ::MPI_Comm_size(m_communicator,&m_comm_size);
279
280 // Par defaut, on ne fait pas de profiling MPI, on utilisera la methode set idoine pour changer
281 if (!m_mpi_prof)
282 m_mpi_prof = new NoMpiProfiling();
283
295 MPI_Irecv(m_recv_buffer_for_empty_request, 1, MPI_CHAR, MPI_PROC_NULL,
297
298 /*
299 * A partir de la version 4 de openmpi, il semble aussi que les send avec
300 * de petits buffers génèrent toujours la même requête. Il faut donc aussi
301 * la supprimer des requêtes à tester. On poste aussi le MPI_Recv correspondant
302 * pour éviter que le MPI_ISend puisse involontairement être utilisé dans un
303 * MPI_Recv de l'utilisateur (via par exemple un MPI_Recv(MPI_ANY_TAG).
304 */
305 m_send_buffer_for_empty_request2[0] = 0;
306 MPI_Isend(m_send_buffer_for_empty_request2, 1, MPI_CHAR, m_comm_rank,
307 50505, m_communicator, &m_empty_request2);
308
309 MPI_Recv(m_recv_buffer_for_empty_request2, 1, MPI_CHAR, m_comm_rank,
310 50505, m_communicator, MPI_STATUS_IGNORE);
311
312 m_request_set->setEmptyRequests(m_empty_request1,m_empty_request2);
313}
314
315/*---------------------------------------------------------------------------*/
316/*---------------------------------------------------------------------------*/
317
318MpiAdapter::
319~MpiAdapter()
320{
321 if (m_empty_request1 != MPI_REQUEST_NULL)
322 MPI_Request_free(&m_empty_request1);
323 if (m_empty_request2 != MPI_REQUEST_NULL)
324 MPI_Request_free(&m_empty_request2);
325
326 delete m_request_set;
327 delete m_mpi_prof;
328}
329
330/*---------------------------------------------------------------------------*/
331/*---------------------------------------------------------------------------*/
332
334buildRequest(int ret,MPI_Request mpi_request)
335{
336 return MpiRequest(ret,this,mpi_request);
337}
338
339/*---------------------------------------------------------------------------*/
340/*---------------------------------------------------------------------------*/
341
342void MpiAdapter::
343_checkHasNoRequests()
344{
345 Int64 nb_request = m_request_set->nbRequest();
346 // On ne peut pas faire ce test dans le destructeur car il peut
347 // potentiellement lancé une exception et cela ne doit pas être fait
348 // dans un destructeur.
349 if (nb_request!=0){
350 warning() << " Pending mpi requests size=" << nb_request;
351 m_request_set->printRequests();
352 _checkFatalInRequest();
353 }
354}
355
356/*---------------------------------------------------------------------------*/
357/*---------------------------------------------------------------------------*/
358
360destroy()
361{
362 _checkHasNoRequests();
363 delete this;
364}
365
366/*---------------------------------------------------------------------------*/
367/*---------------------------------------------------------------------------*/
368
371{
372 m_request_set->m_request_error_is_fatal = v;
373}
374bool MpiAdapter::
375isRequestErrorAreFatal() const
376{
377 return m_request_set->m_request_error_is_fatal;
378}
379
382{
383 m_request_set->m_is_report_error_in_request = v;
384}
385bool MpiAdapter::
386isPrintRequestError() const
387{
388 return m_request_set->m_is_report_error_in_request;
389}
390
392setCheckRequest(bool v)
393{
394 m_request_set->m_no_check_request = !v;
395}
396
397bool MpiAdapter::
398isCheckRequest() const
399{
400 return !m_request_set->m_no_check_request;
401}
402
403/*---------------------------------------------------------------------------*/
404/*---------------------------------------------------------------------------*/
405
406int MpiAdapter::
407toMPISize(Int64 count)
408{
409 return _checkSize(count);
410}
411
412/*---------------------------------------------------------------------------*/
413/*---------------------------------------------------------------------------*/
414
415void MpiAdapter::
416_trace(const char* function)
417{
418 if (m_is_trace) {
420 if (stack_service)
421 info() << "MPI_TRACE: " << function << "\n" << stack_service->stackTrace().toString();
422 else
423 info() << "MPI_TRACE: " << function;
424 }
425}
426
427/*---------------------------------------------------------------------------*/
428/*---------------------------------------------------------------------------*/
429
430void MpiAdapter::
431broadcast(void* buf,Int64 nb_elem,Int32 root,MPI_Datatype datatype)
432{
433 int _nb_elem = _checkSize(nb_elem);
434 _trace(MpiInfo(eMpiName::Bcast).name().localstr());
435 double begin_time = MPI_Wtime();
436 if (m_is_trace)
437 info() << "MPI_TRACE: MPI broadcast: before"
438 << " buf=" << buf
439 << " nb_elem=" << nb_elem
440 << " root=" << root
441 << " datatype=" << datatype;
442
443 m_mpi_prof->broadcast(buf, _nb_elem, datatype, root, m_communicator);
444 double end_time = MPI_Wtime();
445 double sr_time = (end_time-begin_time);
446 //TODO determiner la taille des messages
447 m_stat->add(MpiInfo(eMpiName::Bcast).name(),sr_time,0);
448}
449
450/*---------------------------------------------------------------------------*/
451/*---------------------------------------------------------------------------*/
452
453Request MpiAdapter::
454nonBlockingBroadcast(void* buf,Int64 nb_elem,Int32 root,MPI_Datatype datatype)
455{
456 MPI_Request mpi_request = MPI_REQUEST_NULL;
457 int ret = -1;
458 int _nb_elem = _checkSize(nb_elem);
459 _trace(" MPI_Bcast");
460 double begin_time = MPI_Wtime();
461 ret = MPI_Ibcast(buf,_nb_elem,datatype,root,m_communicator,&mpi_request);
462 double end_time = MPI_Wtime();
463 double sr_time = (end_time-begin_time);
464 //TODO determiner la taille des messages
465 m_stat->add("IBroadcast",sr_time,0);
466 ARCCORE_ADD_REQUEST(mpi_request);
467 return buildRequest(ret,mpi_request);
468}
469
470/*---------------------------------------------------------------------------*/
471/*---------------------------------------------------------------------------*/
472
473void MpiAdapter::
474gather(const void* send_buf,void* recv_buf,Int64 nb_elem,Int32 root,MPI_Datatype datatype)
475{
476 void* _sbuf = const_cast<void*>(send_buf);
477 int _nb_elem = _checkSize(nb_elem);
478 int _root = static_cast<int>(root);
479 _trace(MpiInfo(eMpiName::Gather).name().localstr());
480 double begin_time = MPI_Wtime();
481 m_mpi_prof->gather(_sbuf, _nb_elem, datatype, recv_buf, _nb_elem, datatype, _root, m_communicator);
482 double end_time = MPI_Wtime();
483 double sr_time = (end_time-begin_time);
484 //TODO determiner la taille des messages
485 m_stat->add(MpiInfo(eMpiName::Gather).name(),sr_time,0);
486}
487
488/*---------------------------------------------------------------------------*/
489/*---------------------------------------------------------------------------*/
490
491Request MpiAdapter::
492nonBlockingGather(const void* send_buf,void* recv_buf,
493 Int64 nb_elem,Int32 root,MPI_Datatype datatype)
494{
495 MPI_Request mpi_request = MPI_REQUEST_NULL;
496 int ret = -1;
497 void* _sbuf = const_cast<void*>(send_buf);
498 int _nb_elem = _checkSize(nb_elem);
499 int _root = static_cast<int>(root);
500 _trace("MPI_Igather");
501 double begin_time = MPI_Wtime();
502 ret = MPI_Igather(_sbuf,_nb_elem,datatype,recv_buf,_nb_elem,datatype,_root,
503 m_communicator,&mpi_request);
504 double end_time = MPI_Wtime();
505 double sr_time = (end_time-begin_time);
506 //TODO determiner la taille des messages
507 m_stat->add("IGather",sr_time,0);
508 ARCCORE_ADD_REQUEST(mpi_request);
509 return buildRequest(ret,mpi_request);
510}
511
512/*---------------------------------------------------------------------------*/
513/*---------------------------------------------------------------------------*/
514
515void MpiAdapter::
516allGather(const void* send_buf,void* recv_buf,
517 Int64 nb_elem,MPI_Datatype datatype)
518{
519 void* _sbuf = const_cast<void*>(send_buf);
520 int _nb_elem = _checkSize(nb_elem);
521 _trace(MpiInfo(eMpiName::Allgather).name().localstr());
522 double begin_time = MPI_Wtime();
523 m_mpi_prof->allGather(_sbuf, _nb_elem, datatype, recv_buf, _nb_elem, datatype, m_communicator);
524 double end_time = MPI_Wtime();
525 double sr_time = (end_time-begin_time);
526 //TODO determiner la taille des messages
527 m_stat->add(MpiInfo(eMpiName::Allgather).name(),sr_time,0);
528}
529
530/*---------------------------------------------------------------------------*/
531/*---------------------------------------------------------------------------*/
532
533Request MpiAdapter::
534nonBlockingAllGather(const void* send_buf,void* recv_buf,
535 Int64 nb_elem,MPI_Datatype datatype)
536{
537 MPI_Request mpi_request = MPI_REQUEST_NULL;
538 int ret = -1;
539 void* _sbuf = const_cast<void*>(send_buf);
540 int _nb_elem = _checkSize(nb_elem);
541 _trace("MPI_Iallgather");
542 double begin_time = MPI_Wtime();
543 ret = MPI_Iallgather(_sbuf,_nb_elem,datatype,recv_buf,_nb_elem,datatype,
544 m_communicator,&mpi_request);
545 double end_time = MPI_Wtime();
546 double sr_time = (end_time-begin_time);
547 //TODO determiner la taille des messages
548 m_stat->add("IAllGather",sr_time,0);
549 ARCCORE_ADD_REQUEST(mpi_request);
550 return buildRequest(ret,mpi_request);
551}
552
553/*---------------------------------------------------------------------------*/
554/*---------------------------------------------------------------------------*/
555
556void MpiAdapter::
557gatherVariable(const void* send_buf,void* recv_buf,const int* recv_counts,
558 const int* recv_indexes,Int64 nb_elem,Int32 root,MPI_Datatype datatype)
559{
560 void* _sbuf = const_cast<void*>(send_buf);
561 int _nb_elem = _checkSize(nb_elem);
562 int _root = static_cast<int>(root);
563 _trace(MpiInfo(eMpiName::Gatherv).name().localstr());
564 double begin_time = MPI_Wtime();
565 m_mpi_prof->gatherVariable(_sbuf, _nb_elem, datatype, recv_buf, recv_counts, recv_indexes, datatype, _root, m_communicator);
566 double end_time = MPI_Wtime();
567 double sr_time = (end_time-begin_time);
568 //TODO determiner la taille des messages
569 m_stat->add(MpiInfo(eMpiName::Gatherv).name().localstr(),sr_time,0);
570}
571
572/*---------------------------------------------------------------------------*/
573/*---------------------------------------------------------------------------*/
574
575void MpiAdapter::
576allGatherVariable(const void* send_buf,void* recv_buf,const int* recv_counts,
577 const int* recv_indexes,Int64 nb_elem,MPI_Datatype datatype)
578{
579 void* _sbuf = const_cast<void*>(send_buf);
580 int _nb_elem = _checkSize(nb_elem);
581 _trace(MpiInfo(eMpiName::Allgatherv).name().localstr());
582 //info() << " ALLGATHERV N=" << _nb_elem;
583 //for( int i=0; i<m_comm_size; ++i )
584 //info() << " ALLGATHERV I=" << i << " recv_count=" << recv_counts[i]
585 // << " recv_indexes=" << recv_indexes[i];
586 double begin_time = MPI_Wtime();
587 m_mpi_prof->allGatherVariable(_sbuf, _nb_elem, datatype, recv_buf, recv_counts, recv_indexes, datatype, m_communicator);
588 double end_time = MPI_Wtime();
589 double sr_time = (end_time-begin_time);
590 //TODO determiner la taille des messages
591 m_stat->add(MpiInfo(eMpiName::Allgatherv).name().localstr(),sr_time,0);
592}
593
594/*---------------------------------------------------------------------------*/
595/*---------------------------------------------------------------------------*/
596
597void MpiAdapter::
598scatterVariable(const void* send_buf,const int* send_count,const int* send_indexes,
599 void* recv_buf,Int64 nb_elem,Int32 root,MPI_Datatype datatype)
600{
601 void* _sbuf = const_cast<void*>(send_buf);
602 int* _send_count = const_cast<int*>(send_count);
603 int* _send_indexes = const_cast<int*>(send_indexes);
604 int _nb_elem = _checkSize(nb_elem);
605 _trace(MpiInfo(eMpiName::Scatterv).name().localstr());
606 double begin_time = MPI_Wtime();
607 m_mpi_prof->scatterVariable(_sbuf,
608 _send_count,
609 _send_indexes,
610 datatype,
611 recv_buf,
612 _nb_elem,
613 datatype,
614 root,
616 double end_time = MPI_Wtime();
617 double sr_time = (end_time-begin_time);
618 //TODO determiner la taille des messages
619 m_stat->add(MpiInfo(eMpiName::Scatterv).name(),sr_time,0);
620}
621
622/*---------------------------------------------------------------------------*/
623/*---------------------------------------------------------------------------*/
624
625void MpiAdapter::
626allToAll(const void* send_buf,void* recv_buf,Integer count,MPI_Datatype datatype)
627{
628 void* _sbuf = const_cast<void*>(send_buf);
629 int icount = _checkSize(count);
630 _trace(MpiInfo(eMpiName::Alltoall).name().localstr());
631 double begin_time = MPI_Wtime();
632 m_mpi_prof->allToAll(_sbuf, icount, datatype, recv_buf, icount, datatype, m_communicator);
633 double end_time = MPI_Wtime();
634 double sr_time = (end_time-begin_time);
635 //TODO determiner la taille des messages
636 m_stat->add(MpiInfo(eMpiName::Alltoall).name().localstr(),sr_time,0);
637}
638
639/*---------------------------------------------------------------------------*/
640/*---------------------------------------------------------------------------*/
641
642Request MpiAdapter::
643nonBlockingAllToAll(const void* send_buf,void* recv_buf,Integer count,MPI_Datatype datatype)
644{
645 MPI_Request mpi_request = MPI_REQUEST_NULL;
646 int ret = -1;
647 void* _sbuf = const_cast<void*>(send_buf);
648 int icount = _checkSize(count);
649 _trace("MPI_IAlltoall");
650 double begin_time = MPI_Wtime();
651 ret = MPI_Ialltoall(_sbuf,icount,datatype,recv_buf,icount,datatype,m_communicator,&mpi_request);
652 double end_time = MPI_Wtime();
653 double sr_time = (end_time-begin_time);
654 //TODO determiner la taille des messages
655 m_stat->add("IAllToAll",sr_time,0);
656 ARCCORE_ADD_REQUEST(mpi_request);
657 return buildRequest(ret,mpi_request);
658}
659
660/*---------------------------------------------------------------------------*/
661/*---------------------------------------------------------------------------*/
662
663void MpiAdapter::
664allToAllVariable(const void* send_buf,const int* send_counts,
665 const int* send_indexes,void* recv_buf,const int* recv_counts,
666 const int* recv_indexes,MPI_Datatype datatype)
667{
668 void* _sbuf = const_cast<void*>(send_buf);
669 int* _send_counts = const_cast<int*>(send_counts);
670 int* _send_indexes = const_cast<int*>(send_indexes);
671 int* _recv_counts = const_cast<int*>(recv_counts);
672 int* _recv_indexes = const_cast<int*>(recv_indexes);
673
674 _trace(MpiInfo(eMpiName::Alltoallv).name().localstr());
675 double begin_time = MPI_Wtime();
676 m_mpi_prof->allToAllVariable(_sbuf, _send_counts, _send_indexes, datatype,
677 recv_buf, _recv_counts, _recv_indexes, datatype, m_communicator);
678 double end_time = MPI_Wtime();
679 double sr_time = (end_time-begin_time);
680 //TODO determiner la taille des messages
681 m_stat->add(MpiInfo(eMpiName::Alltoallv).name(),sr_time,0);
682}
683
684/*---------------------------------------------------------------------------*/
685/*---------------------------------------------------------------------------*/
686
687Request MpiAdapter::
688nonBlockingAllToAllVariable(const void* send_buf,const int* send_counts,
689 const int* send_indexes,void* recv_buf,const int* recv_counts,
690 const int* recv_indexes,MPI_Datatype datatype)
691{
692 MPI_Request mpi_request = MPI_REQUEST_NULL;
693 int ret = -1;
694 void* _sbuf = const_cast<void*>(send_buf);
695 int* _send_counts = const_cast<int*>(send_counts);
696 int* _send_indexes = const_cast<int*>(send_indexes);
697 int* _recv_counts = const_cast<int*>(recv_counts);
698 int* _recv_indexes = const_cast<int*>(recv_indexes);
699
700 _trace("MPI_Ialltoallv");
701 double begin_time = MPI_Wtime();
702 ret = MPI_Ialltoallv(_sbuf,_send_counts,_send_indexes,datatype,
703 recv_buf,_recv_counts,_recv_indexes,datatype,
704 m_communicator,&mpi_request);
705 double end_time = MPI_Wtime();
706 double sr_time = (end_time-begin_time);
707 //TODO determiner la taille des messages
708 m_stat->add("IAllToAll",sr_time,0);
709 ARCCORE_ADD_REQUEST(mpi_request);
710 return buildRequest(ret,mpi_request);
711}
712
713/*---------------------------------------------------------------------------*/
714/*---------------------------------------------------------------------------*/
715
716void MpiAdapter::
717barrier()
718{
719 // TODO: a priori on ne devrait pas avoir de requêtes en vol possible
720 // entre deux barrières pour éviter tout problème.
721 // _checkHasNoRequests();
722 // TODO ajouter trace correspondante pour le profiling.
723 MPI_Barrier(m_communicator);
724}
725
726/*---------------------------------------------------------------------------*/
727/*---------------------------------------------------------------------------*/
728
729Request MpiAdapter::
730nonBlockingBarrier()
731{
732 MPI_Request mpi_request = MPI_REQUEST_NULL;
733 int ret = -1;
734 ret = MPI_Ibarrier(m_communicator,&mpi_request);
735 ARCCORE_ADD_REQUEST(mpi_request);
736 return buildRequest(ret,mpi_request);
737}
738
739/*---------------------------------------------------------------------------*/
740/*---------------------------------------------------------------------------*/
741
742void MpiAdapter::
743allReduce(const void* send_buf,void* recv_buf,Int64 count,MPI_Datatype datatype,MPI_Op op)
744{
745 void* _sbuf = const_cast<void*>(send_buf);
746 int _n = _checkSize(count);
747 double begin_time = MPI_Wtime();
748 _trace(MpiInfo(eMpiName::Allreduce).name().localstr());
749 try{
750 ++m_nb_all_reduce;
751 m_mpi_prof->allReduce(_sbuf, recv_buf, _n, datatype, op, m_communicator);
752 }
753 catch(TimeoutException& ex)
754 {
755 std::ostringstream ostr;
756 ostr << "MPI_Allreduce"
757 << " send_buf=" << send_buf
758 << " recv_buf=" << recv_buf
759 << " n=" << count
760 << " datatype=" << datatype
761 << " op=" << op
762 << " NB=" << m_nb_all_reduce;
763 ex.setAdditionalInfo(ostr.str());
764 throw;
765 }
766 double end_time = MPI_Wtime();
767 m_stat->add(MpiInfo(eMpiName::Allreduce).name(),end_time-begin_time,count);
768}
769
770/*---------------------------------------------------------------------------*/
771/*---------------------------------------------------------------------------*/
772
773Request MpiAdapter::
774nonBlockingAllReduce(const void* send_buf,void* recv_buf,Int64 count,MPI_Datatype datatype,MPI_Op op)
775{
776 MPI_Request mpi_request = MPI_REQUEST_NULL;
777 int ret = -1;
778 void* _sbuf = const_cast<void*>(send_buf);
779 int _n = _checkSize(count);
780 double begin_time = MPI_Wtime();
781 _trace("MPI_IAllreduce");
782 ret = MPI_Iallreduce(_sbuf,recv_buf,_n,datatype,op,m_communicator,&mpi_request);
783 double end_time = MPI_Wtime();
784 m_stat->add("IReduce",end_time-begin_time,_n);
785 ARCCORE_ADD_REQUEST(mpi_request);
786 return buildRequest(ret,mpi_request);
787}
788
789/*---------------------------------------------------------------------------*/
790/*---------------------------------------------------------------------------*/
791
792void MpiAdapter::
793reduce(const void* send_buf,void* recv_buf,Int64 count,MPI_Datatype datatype,MPI_Op op,Integer root)
794{
795 void* _sbuf = const_cast<void*>(send_buf);
796 int _n = _checkSize(count);
797 int _root = static_cast<int>(root);
798 double begin_time = MPI_Wtime();
799 _trace(MpiInfo(eMpiName::Reduce).name().localstr());
800 try{
801 ++m_nb_reduce;
802 m_mpi_prof->reduce(_sbuf, recv_buf, _n, datatype, op, _root, m_communicator);
803 }
804 catch(TimeoutException& ex)
805 {
806 std::ostringstream ostr;
807 ostr << "MPI_reduce"
808 << " send_buf=" << send_buf
809 << " recv_buf=" << recv_buf
810 << " n=" << count
811 << " datatype=" << datatype
812 << " op=" << op
813 << " root=" << root
814 << " NB=" << m_nb_reduce;
815 ex.setAdditionalInfo(ostr.str());
816 throw;
817 }
818
819 double end_time = MPI_Wtime();
820 m_stat->add(MpiInfo(eMpiName::Reduce).name(),end_time-begin_time,0);
821}
822
823/*---------------------------------------------------------------------------*/
824/*---------------------------------------------------------------------------*/
825
826void MpiAdapter::
827scan(const void* send_buf,void* recv_buf,Int64 count,MPI_Datatype datatype,MPI_Op op)
828{
829 void* _sbuf = const_cast<void*>(send_buf);
830 int _n = _checkSize(count);
831 double begin_time = MPI_Wtime();
832 _trace(MpiInfo(eMpiName::Scan).name().localstr());
833 m_mpi_prof->scan(_sbuf, recv_buf, _n, datatype, op, m_communicator);
834 double end_time = MPI_Wtime();
835 m_stat->add(MpiInfo(eMpiName::Scan).name(),end_time-begin_time,count);
836}
837
838/*---------------------------------------------------------------------------*/
839/*---------------------------------------------------------------------------*/
840
841void MpiAdapter::
842directSendRecv(const void* send_buffer,Int64 send_buffer_size,
843 void* recv_buffer,Int64 recv_buffer_size,
844 Int32 proc,Int64 elem_size,MPI_Datatype data_type)
845{
846 void* v_send_buffer = const_cast<void*>(send_buffer);
847 MPI_Status mpi_status;
848 double begin_time = MPI_Wtime();
849 _trace(MpiInfo(eMpiName::Sendrecv).name().localstr());
850 int sbuf_size = _checkSize(send_buffer_size);
851 int rbuf_size = _checkSize(recv_buffer_size);
852 m_mpi_prof->sendRecv(v_send_buffer, sbuf_size, data_type, proc, 99,
853 recv_buffer, rbuf_size, data_type, proc, 99,
854 m_communicator, &mpi_status);
855 double end_time = MPI_Wtime();
856 Int64 send_size = send_buffer_size * elem_size;
857 Int64 recv_size = recv_buffer_size * elem_size;
858 double sr_time = (end_time-begin_time);
859
860 //debug(Trace::High) << "MPI SendRecv: send " << send_size << " recv "
861 // << recv_size << " time " << sr_time ;
862 m_stat->add(MpiInfo(eMpiName::Sendrecv).name(),sr_time,send_size+recv_size);
863}
864
865/*---------------------------------------------------------------------------*/
866/*---------------------------------------------------------------------------*/
867
869sendNonBlockingNoStat(const void* send_buffer,Int64 send_buffer_size,
870 Int32 dest_rank,MPI_Datatype data_type,int mpi_tag)
871{
872 void* v_send_buffer = const_cast<void*>(send_buffer);
873 MPI_Request mpi_request = MPI_REQUEST_NULL;
874 int sbuf_size = _checkSize(send_buffer_size);
875 int ret = 0;
876 m_mpi_prof->iSend(v_send_buffer, sbuf_size, data_type, dest_rank, mpi_tag, m_communicator, &mpi_request);
877 if (m_is_trace)
878 info() << " ISend ret=" << ret << " proc=" << dest_rank << " tag=" << mpi_tag << " request=" << mpi_request;
879 ARCCORE_ADD_REQUEST(mpi_request);
880 return buildRequest(ret,mpi_request);
881}
882
883/*---------------------------------------------------------------------------*/
884/*---------------------------------------------------------------------------*/
885
886Request MpiAdapter::
887directSend(const void* send_buffer,Int64 send_buffer_size,
888 Int32 proc,Int64 elem_size,MPI_Datatype data_type,
889 int mpi_tag,bool is_blocked
890 )
891{
892 void* v_send_buffer = const_cast<void*>(send_buffer);
893 MPI_Request mpi_request = MPI_REQUEST_NULL;
894
895 double begin_time = 0.0;
896 double end_time = 0.0;
897 Int64 send_size = send_buffer_size * elem_size;
898 int ret = 0;
899 if (m_is_trace)
900 info() << "MPI_TRACE: MPI Send: send before"
901 << " size=" << send_size
902 << " dest=" << proc
903 << " tag=" << mpi_tag
904 << " datatype=" << data_type
905 << " blocking " << is_blocked;
906 if (is_blocked){
907 // si m_mpi_lock n'est pas nul, il faut
908 // utiliser un MPI_ISend suivi d'une boucle
909 // active de MPI_Test pour eviter tout probleme
910 // de dead lock.
911 if (m_mpi_lock){
912 {
913 MpiLock::Section mls(m_mpi_lock);
914 begin_time = MPI_Wtime();
915 int sbuf_size = _checkSize(send_buffer_size);
916 m_mpi_prof->iSend(v_send_buffer, sbuf_size, data_type, proc, mpi_tag, m_communicator, &mpi_request);
917 }
918 int is_finished = 0;
919 MPI_Status mpi_status;
920 while (is_finished==0){
921 MpiLock::Section mls(m_mpi_lock);
922 MPI_Request_get_status(mpi_request,&is_finished,&mpi_status);
923 if (is_finished!=0){
924 m_mpi_prof->wait(&mpi_request, (MPI_Status *) MPI_STATUS_IGNORE);
925 end_time = MPI_Wtime();
926 mpi_request = MPI_REQUEST_NULL;
927 }
928 }
929 }
930 else{
931 MpiLock::Section mls(m_mpi_lock);
932 begin_time = MPI_Wtime();
933 int sbuf_size = _checkSize(send_buffer_size);
934 m_mpi_prof->send(v_send_buffer, sbuf_size, data_type, proc, mpi_tag, m_communicator);
935 end_time = MPI_Wtime();
936 }
937 }
938 else{
939 {
940 MpiLock::Section mls(m_mpi_lock);
941 begin_time = MPI_Wtime();
942 int sbuf_size = _checkSize(send_buffer_size);
943 m_mpi_prof->iSend(v_send_buffer, sbuf_size, data_type, proc, mpi_tag, m_communicator, &mpi_request);
944 if (m_is_trace)
945 info() << " ISend ret=" << ret << " proc=" << proc << " tag=" << mpi_tag << " request=" << mpi_request;
946 end_time = MPI_Wtime();
947 ARCCORE_ADD_REQUEST(mpi_request);
948 }
949 if (m_is_trace){
950 info() << "MPI Send: send after"
951 << " request=" << mpi_request;
952 }
953 }
954 double sr_time = (end_time-begin_time);
955
956 debug(Trace::High) << "MPI Send: send " << send_size
957 << " time " << sr_time << " blocking " << is_blocked;
958 // TODO(FL): regarder comment faire pour profiler le Isend
959 m_stat->add(MpiInfo(eMpiName::Send).name(),end_time-begin_time,send_size);
960 return buildRequest(ret,mpi_request);
961}
962
963/*---------------------------------------------------------------------------*/
964/*---------------------------------------------------------------------------*/
965
966Request MpiAdapter::
967directSendPack(const void* send_buffer,Int64 send_buffer_size,
968 Int32 proc,int mpi_tag,bool is_blocked)
969{
970 return directSend(send_buffer,send_buffer_size,proc,1,MPI_PACKED,mpi_tag,is_blocked);
971}
972
973/*---------------------------------------------------------------------------*/
974/*---------------------------------------------------------------------------*/
975
976MpiMessagePassingMng* MpiAdapter::
977commSplit(bool keep)
978{
979 MPI_Comm new_comm;
980
981 MPI_Comm_split(m_communicator, (keep) ? 1 : MPI_UNDEFINED, commRank(), &new_comm);
982 if (keep) {
983 // Failed if new_comm is MPI_COMM_NULL
984 return StandaloneMpiMessagePassingMng::create(new_comm, true);
985 }
986 return nullptr;
987}
988
989/*---------------------------------------------------------------------------*/
990/*---------------------------------------------------------------------------*/
991
993receiveNonBlockingNoStat(void* recv_buffer,Int64 recv_buffer_size,
994 Int32 source_rank,MPI_Datatype data_type,int mpi_tag)
995{
996 int rbuf_size = _checkSize(recv_buffer_size);
997 int ret = 0;
998 MPI_Request mpi_request = MPI_REQUEST_NULL;
999 m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, source_rank, mpi_tag, m_communicator, &mpi_request);
1000 ARCCORE_ADD_REQUEST(mpi_request);
1001 return buildRequest(ret,mpi_request);
1002}
1003
1004/*---------------------------------------------------------------------------*/
1005/*---------------------------------------------------------------------------*/
1006
1007Request MpiAdapter::
1008directRecv(void* recv_buffer,Int64 recv_buffer_size,
1009 Int32 proc,Int64 elem_size,MPI_Datatype data_type,
1010 int mpi_tag,bool is_blocked)
1011{
1012 MPI_Status mpi_status;
1013 MPI_Request mpi_request = MPI_REQUEST_NULL;
1014 int ret = 0;
1015 double begin_time = 0.0;
1016 double end_time = 0.0;
1017
1018 int i_proc = 0;
1019 if (proc==A_PROC_NULL_RANK)
1020 ARCCORE_THROW(NotImplementedException,"Receive with MPI_PROC_NULL");
1021 if (proc == A_NULL_RANK && !m_is_allow_null_rank_for_any_source)
1022 ARCCORE_FATAL("Can not use A_NULL_RANK for any source. Use A_ANY_SOURCE_RANK instead");
1023 if (proc==A_NULL_RANK || proc==A_ANY_SOURCE_RANK)
1024 i_proc = MPI_ANY_SOURCE;
1025 else
1026 i_proc = static_cast<int>(proc);
1027
1028 Int64 recv_size = recv_buffer_size * elem_size;
1029 if (m_is_trace){
1030 info() << "MPI_TRACE: MPI Recv: recv before "
1031 << " size=" << recv_size
1032 << " from=" << i_proc
1033 << " tag=" << mpi_tag
1034 << " datatype=" << data_type
1035 << " blocking=" << is_blocked;
1036 }
1037 if (is_blocked){
1038 // si m_mpi_lock n'est pas nul, il faut
1039 // utiliser un MPI_IRecv suivi d'une boucle
1040 // active de MPI_Test pour eviter tout probleme
1041 // de dead lock.
1042 if (m_mpi_lock){
1043 {
1044 MpiLock::Section mls(m_mpi_lock);
1045 begin_time = MPI_Wtime();
1046 int rbuf_size = _checkSize(recv_buffer_size);
1047 m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_request);
1048 }
1049 int is_finished = 0;
1050 MPI_Status mpi_status;
1051 while (is_finished==0){
1052 MpiLock::Section mls(m_mpi_lock);
1053 MPI_Request_get_status(mpi_request,&is_finished,&mpi_status);
1054 if (is_finished!=0){
1055 end_time = MPI_Wtime();
1056 m_mpi_prof->wait(&mpi_request, (MPI_Status *) MPI_STATUS_IGNORE);
1057 mpi_request = MPI_REQUEST_NULL;
1058 }
1059 }
1060 }
1061 else{
1062 MpiLock::Section mls(m_mpi_lock);
1063 begin_time = MPI_Wtime();
1064 int rbuf_size = _checkSize(recv_buffer_size);
1065 m_mpi_prof->recv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_status);
1066 end_time = MPI_Wtime();
1067 }
1068 }
1069 else{
1070 {
1071 MpiLock::Section mls(m_mpi_lock);
1072 begin_time = MPI_Wtime();
1073 int rbuf_size = _checkSize(recv_buffer_size);
1074 m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_request);
1075 end_time = MPI_Wtime();
1076 ARCCORE_ADD_REQUEST(mpi_request);
1077 }
1078 if (m_is_trace){
1079 info() << "MPI Recv: recv after "
1080 << " request=" << mpi_request;
1081 }
1082 }
1083 double sr_time = (end_time-begin_time);
1084
1085 debug(Trace::High) << "MPI Recv: recv after " << recv_size
1086 << " time " << sr_time << " blocking " << is_blocked;
1087 m_stat->add(MpiInfo(eMpiName::Recv).name(),end_time-begin_time,recv_size);
1088 return buildRequest(ret,mpi_request);
1089}
1090
1091/*---------------------------------------------------------------------------*/
1092/*---------------------------------------------------------------------------*/
1093
1094void MpiAdapter::
1095probeRecvPack(UniqueArray<Byte>& recv_buffer,Int32 proc)
1096{
1097 double begin_time = MPI_Wtime();
1098 MPI_Status status;
1099 int recv_buffer_size = 0;
1100 _trace("MPI_Probe");
1101 m_mpi_prof->probe(proc, 101, m_communicator, &status);
1102 m_mpi_prof->getCount(&status, MPI_PACKED, &recv_buffer_size);
1103
1104 recv_buffer.resize(recv_buffer_size);
1105 m_mpi_prof->recv(recv_buffer.data(), recv_buffer_size, MPI_PACKED, proc, 101, m_communicator, &status);
1106
1107 double end_time = MPI_Wtime();
1108 Int64 recv_size = recv_buffer_size;
1109 double sr_time = (end_time-begin_time);
1110 debug(Trace::High) << "MPI probeRecvPack " << recv_size
1111 << " time " << sr_time;
1112 m_stat->add(MpiInfo(eMpiName::Recv).name(),end_time-begin_time,recv_size);
1113}
1114
1115/*---------------------------------------------------------------------------*/
1116/*---------------------------------------------------------------------------*/
1117
1118MessageSourceInfo MpiAdapter::
1119_buildSourceInfoFromStatus(const MPI_Status& mpi_status)
1120{
1121 // Récupère la taille en octet du message.
1122 MPI_Count message_size = 0;
1123 MPI_Get_elements_x(&mpi_status,MPI_BYTE,&message_size);
1124 MessageTag tag(mpi_status.MPI_TAG);
1125 MessageRank rank(mpi_status.MPI_SOURCE);
1126 return MessageSourceInfo(rank,tag,message_size);
1127}
1128
1129/*---------------------------------------------------------------------------*/
1130/*---------------------------------------------------------------------------*/
1131
1132MessageId MpiAdapter::
1133_probeMessage(MessageRank source,MessageTag tag,bool is_blocking)
1134{
1135 MPI_Status mpi_status;
1136 int has_message = 0;
1137 MPI_Message message;
1138 int ret = 0;
1139 int mpi_source = source.value();
1140 if (source.isProcNull())
1141 ARCCORE_THROW(NotImplementedException,"Probe with MPI_PROC_NULL");
1142 if (source.isNull() && !m_is_allow_null_rank_for_any_source)
1143 ARCCORE_FATAL("Can not use MPI_Mprobe with null rank. Use MessageRank::anySourceRank() instead");
1144 if (source.isNull() || source.isAnySource())
1145 mpi_source = MPI_ANY_SOURCE;
1146 int mpi_tag = tag.value();
1147 if (tag.isNull())
1148 mpi_tag = MPI_ANY_TAG;
1149 if (is_blocking){
1150 ret = MPI_Mprobe(mpi_source,mpi_tag,m_communicator,&message,&mpi_status);
1151 has_message = true;
1152 }
1153 else {
1154 ret = MPI_Improbe(mpi_source, mpi_tag, m_communicator, &has_message, &message, &mpi_status);
1155 }
1156 if (ret!=0)
1157 ARCCORE_FATAL("Error during call to MPI_Mprobe r={0}",ret);
1158 MessageId ret_message;
1159 if (has_message!=0){
1160 MessageSourceInfo si(_buildSourceInfoFromStatus(mpi_status));
1161 ret_message = MessageId(si,message);
1162 }
1163 return ret_message;
1164}
1165
1166/*---------------------------------------------------------------------------*/
1167/*---------------------------------------------------------------------------*/
1168
1169MessageId MpiAdapter::
1170probeMessage(PointToPointMessageInfo message)
1171{
1172 if (!message.isValid())
1173 return MessageId();
1174
1175 // Il faut avoir initialisé le message avec un couple (rang/tag).
1176 if (!message.isRankTag())
1177 ARCCORE_FATAL("Invalid message_info: message.isRankTag() is false");
1178
1179 return _probeMessage(message.destinationRank(),message.tag(),message.isBlocking());
1180}
1181
1182/*---------------------------------------------------------------------------*/
1183/*---------------------------------------------------------------------------*/
1184
1185MessageSourceInfo MpiAdapter::
1186_legacyProbeMessage(MessageRank source,MessageTag tag,bool is_blocking)
1187{
1188 MPI_Status mpi_status;
1189 int has_message = 0;
1190 int ret = 0;
1191 int mpi_source = source.value();
1192 if (source.isProcNull())
1193 ARCCORE_THROW(NotImplementedException,"Probe with MPI_PROC_NULL");
1194 if (source.isNull() && !m_is_allow_null_rank_for_any_source)
1195 ARCCORE_FATAL("Can not use MPI_Probe with null rank. Use MessageRank::anySourceRank() instead");
1196 if (source.isNull() || source.isAnySource())
1197 mpi_source = MPI_ANY_SOURCE;
1198 int mpi_tag = tag.value();
1199 if (tag.isNull())
1200 mpi_tag = MPI_ANY_TAG;
1201 if (is_blocking){
1202 ret = MPI_Probe(mpi_source,mpi_tag,m_communicator,&mpi_status);
1203 has_message = true;
1204 }
1205 else
1206 ret = MPI_Iprobe(mpi_source,mpi_tag,m_communicator,&has_message,&mpi_status);
1207 if (ret!=0)
1208 ARCCORE_FATAL("Error during call to MPI_Mprobe r={0}",ret);
1209 if (has_message!=0)
1210 return _buildSourceInfoFromStatus(mpi_status);
1211 return {};
1212}
1213
1214/*---------------------------------------------------------------------------*/
1215/*---------------------------------------------------------------------------*/
1216
1217MessageSourceInfo MpiAdapter::
1218legacyProbeMessage(PointToPointMessageInfo message)
1219{
1220 if (!message.isValid())
1221 return {};
1222
1223 // Il faut avoir initialisé le message avec un couple (rang/tag).
1224 if (!message.isRankTag())
1225 ARCCORE_FATAL("Invalid message_info: message.isRankTag() is false");
1226
1227 return _legacyProbeMessage(message.destinationRank(),message.tag(),message.isBlocking());
1228}
1229
1230/*---------------------------------------------------------------------------*/
1231/*---------------------------------------------------------------------------*/
1233Request MpiAdapter::
1234directRecv(void* recv_buffer,Int64 recv_buffer_size,
1235 MessageId message,Int64 elem_size,MPI_Datatype data_type,
1236 bool is_blocked)
1237{
1238 MPI_Status mpi_status;
1239 MPI_Request mpi_request = MPI_REQUEST_NULL;
1240 MPI_Message mpi_message = (MPI_Message)message;
1241 int ret = 0;
1242 double begin_time = 0.0;
1243 double end_time = 0.0;
1244
1245 Int64 recv_size = recv_buffer_size * elem_size;
1246 if (m_is_trace){
1247 info() << "MPI_TRACE: MPI Mrecv: recv before "
1248 << " size=" << recv_size
1249 << " from_msg=" << message
1250 << " datatype=" << data_type
1251 << " blocking=" << is_blocked;
1252 }
1253 if (is_blocked){
1254 // si m_mpi_lock n'est pas nul, il faut
1255 // utiliser un MPI_IRecv suivi d'une boucle
1256 // active de MPI_Test pour eviter tout probleme
1257 // de dead lock.
1258 if (m_mpi_lock){
1259 {
1260 MpiLock::Section mls(m_mpi_lock);
1261 begin_time = MPI_Wtime();
1262 int rbuf_size = _checkSize(recv_buffer_size);
1263 MPI_Imrecv(recv_buffer,rbuf_size,data_type,&mpi_message,&mpi_request);
1264 //m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_request);
1265 }
1266 int is_finished = 0;
1267 MPI_Status mpi_status;
1268 while (is_finished==0){
1269 MpiLock::Section mls(m_mpi_lock);
1270 MPI_Request_get_status(mpi_request,&is_finished,&mpi_status);
1271 if (is_finished!=0){
1272 end_time = MPI_Wtime();
1273 m_mpi_prof->wait(&mpi_request, (MPI_Status *) MPI_STATUS_IGNORE);
1274 mpi_request = MPI_REQUEST_NULL;
1275 }
1276 }
1277 }
1278 else{
1279 MpiLock::Section mls(m_mpi_lock);
1280 begin_time = MPI_Wtime();
1281 int rbuf_size = _checkSize(recv_buffer_size);
1282 MPI_Mrecv(recv_buffer,rbuf_size,data_type,&mpi_message,&mpi_status);
1283 //m_mpi_prof->recv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_status);
1284 end_time = MPI_Wtime();
1285 }
1286 }
1287 else{
1288 {
1289 MpiLock::Section mls(m_mpi_lock);
1290 begin_time = MPI_Wtime();
1291 int rbuf_size = _checkSize(recv_buffer_size);
1292 //m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_request);
1293 ret = MPI_Imrecv(recv_buffer,rbuf_size,data_type,&mpi_message,&mpi_request);
1294 //m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_request);
1295 end_time = MPI_Wtime();
1296 ARCCORE_ADD_REQUEST(mpi_request);
1297 }
1298 if (m_is_trace){
1299 info() << "MPI Recv: recv after "
1300 << " request=" << mpi_request;
1301 }
1302 }
1303 double sr_time = (end_time-begin_time);
1304
1305 debug(Trace::High) << "MPI Recv: recv after " << recv_size
1306 << " time " << sr_time << " blocking " << is_blocked;
1307 m_stat->add(MpiInfo(eMpiName::Recv).name(),end_time-begin_time,recv_size);
1308 return buildRequest(ret,mpi_request);
1309}
1310
1311/*---------------------------------------------------------------------------*/
1312/*---------------------------------------------------------------------------*/
1313
1314Request MpiAdapter::
1315directRecvPack(void* recv_buffer,Int64 recv_buffer_size,
1316 Int32 proc,int mpi_tag,bool is_blocking)
1317{
1318 return directRecv(recv_buffer,recv_buffer_size,proc,1,MPI_PACKED,mpi_tag,is_blocking);
1319}
1320
1321/*---------------------------------------------------------------------------*/
1322/*---------------------------------------------------------------------------*/
1323
1324// FIXME: Implement direct method with MPI_STATUS_IGNORE
1325void MpiAdapter::
1326waitAllRequests(ArrayView<Request> requests)
1327{
1328 UniqueArray<bool> indexes(requests.size());
1329 UniqueArray<MPI_Status> mpi_status(requests.size());
1330 while (_waitAllRequestsMPI(requests, indexes, mpi_status)){
1331 ; // Continue tant qu'il y a des requêtes.
1332 }
1333}
1334
1335/*---------------------------------------------------------------------------*/
1336/*---------------------------------------------------------------------------*/
1337
1338// FIXME: Implement direct method with MPI_STATUS_IGNORE
1339void MpiAdapter::
1340waitSomeRequests(ArrayView<Request> requests,
1341 ArrayView<bool> indexes,
1342 bool is_non_blocking)
1343{
1344 UniqueArray<MPI_Status> mpi_status(requests.size());
1345 waitSomeRequestsMPI(requests, indexes, mpi_status, is_non_blocking);
1346}
1347
1348/*---------------------------------------------------------------------------*/
1349/*---------------------------------------------------------------------------*/
1350
1352{
1353 SubRequestInfo(Ref<ISubRequest> sr, Integer i, int source_rank, int source_tag)
1354 : sub_request(sr)
1355 , index(i)
1356 , mpi_source_rank(source_rank)
1357 , mpi_source_tag(source_tag)
1358 {}
1359
1360 Ref<ISubRequest> sub_request;
1361 Integer index = -1;
1362 int mpi_source_rank = MPI_PROC_NULL;
1363 int mpi_source_tag = 0;
1364};
1365
1366/*---------------------------------------------------------------------------*/
1367/*---------------------------------------------------------------------------*/
1368
1369bool MpiAdapter::
1370_handleEndRequests(ArrayView<Request> requests,ArrayView<bool> done_indexes,
1371 ArrayView<MPI_Status> status)
1372{
1373 UniqueArray<SubRequestInfo> new_requests;
1374 Integer size = requests.size();
1375 {
1376 MpiLock::Section mls(m_mpi_lock);
1377 for( Integer i=0; i<size; ++i ) {
1378 if (done_indexes[i]){
1379 // Attention à bien utiliser une référence sinon le reset ne
1380 // s'applique pas à la bonne variable
1381 Request& r = requests[i];
1382 // Note: la requête peut ne pas être valide (par exemple s'il s'agit)
1383 // d'une requête bloquante mais avoir tout de même une sous-requête.
1384 if (r.hasSubRequest()){
1385 if (m_is_trace)
1386 info() << "Done request with sub-request r=" << r << " mpi_r=" << r << " i=" << i
1387 << " source_rank=" << status[i].MPI_SOURCE
1388 << " source_tag=" << status[i].MPI_TAG;
1389 new_requests.add(SubRequestInfo(r.subRequest(), i, status[i].MPI_SOURCE, status[i].MPI_TAG));
1390 }
1391 if (r.isValid()){
1392 _removeRequest((MPI_Request)(r));
1393 r.reset();
1394 }
1395 }
1396 }
1397 }
1398
1399 // NOTE: les appels aux sous-requêtes peuvent générer d'autres requêtes.
1400 // Il faut faire attention à ne pas utiliser les sous-requêtes avec
1401 // le verrou actif.
1402 bool has_new_request = false;
1403 if (!new_requests.empty()){
1404 // Contient le status de la \a ième requête
1405 UniqueArray<MPI_Status> old_status(size);
1406 {
1407 Integer index = 0;
1408 for( Integer i=0; i<size; ++i ){
1409 if (done_indexes[i]){
1410 old_status[i] = status[index];
1411 ++index;
1412 }
1413 }
1414 }
1415 // S'il y a des nouvelles requêtes, il faut décaler les valeurs de 'status'
1416 for( SubRequestInfo& sri : new_requests ){
1417 Integer index = sri.index;
1418 if (m_is_trace)
1419 info() << "Before handle new request index=" << index
1420 << " sri.source_rank=" << sri.mpi_source_rank
1421 << " sri.source_tag=" << sri.mpi_source_tag;
1422 SubRequestCompletionInfo completion_info(MessageRank(old_status[index].MPI_SOURCE), MessageTag(old_status[index].MPI_TAG));
1423 Request r = sri.sub_request->executeOnCompletion(completion_info);
1424 if (m_is_trace)
1425 info() << "Handle new request index=" << index << " old_r=" << requests[index] << " new_r=" << r;
1426 // S'il y a une nouvelle requête, alors elle remplace
1427 // l'ancienne et donc il faut faire comme si
1428 // la requête d'origine n'est pas terminée.
1429 if (r.isValid()){
1430 has_new_request = true;
1431 requests[index] = r;
1432 done_indexes[index] = false;
1433 }
1434 }
1435 {
1436 Integer index = 0;
1437 for( Integer i=0; i<size; ++i ){
1438 if (done_indexes[i]){
1439 status[index] = old_status[i];
1440 ++index;
1441 }
1442 }
1443 }
1444
1445 }
1446 return has_new_request;
1447}
1448
1449/*---------------------------------------------------------------------------*/
1450/*---------------------------------------------------------------------------*/
1451
1452bool MpiAdapter::
1453_waitAllRequestsMPI(ArrayView<Request> requests,
1454 ArrayView<bool> indexes,
1455 ArrayView<MPI_Status> mpi_status)
1456{
1457 Integer size = requests.size();
1458 if (size==0)
1459 return false;
1460 //ATTENTION: Mpi modifie en retour de MPI_Waitall ce tableau
1461 UniqueArray<MPI_Request> mpi_request(size);
1462 for( Integer i=0; i<size; ++i ){
1463 mpi_request[i] = (MPI_Request)(requests[i]);
1464 }
1465 if (m_is_trace)
1466 info() << " MPI_waitall begin size=" << size;
1467 double diff_time = 0.0;
1468 if (m_mpi_lock){
1469 double begin_time = MPI_Wtime();
1470 for( Integer i=0; i<size; ++i ){
1471 MPI_Request request = (MPI_Request)(mpi_request[i]);
1472 int is_finished = 0;
1473 while (is_finished==0){
1474 MpiLock::Section mls(m_mpi_lock);
1475 m_mpi_prof->test(&request, &is_finished, (MPI_Status *) MPI_STATUS_IGNORE);
1476 }
1477 }
1478 double end_time = MPI_Wtime();
1479 diff_time = end_time - begin_time;
1480 }
1481 else{
1482 //TODO: transformer en boucle while et MPI_Testall si m_mpi_lock est non nul
1483 MpiLock::Section mls(m_mpi_lock);
1484 double begin_time = MPI_Wtime();
1485 m_mpi_prof->waitAll(size, mpi_request.data(), mpi_status.data());
1486 double end_time = MPI_Wtime();
1487 diff_time = end_time - begin_time;
1488 }
1489
1490 // Indique que chaque requête est traitée car on a fait un waitall.
1491 for( Integer i=0; i<size; ++i ){
1492 indexes[i] = true;
1493 }
1494
1495 bool has_new_request = _handleEndRequests(requests,indexes,mpi_status);
1496 if (m_is_trace)
1497 info() << " MPI_waitall end size=" << size;
1498 m_stat->add(MpiInfo(eMpiName::Waitall).name(),diff_time,size);
1499 return has_new_request;
1500}
1501
1502/*---------------------------------------------------------------------------*/
1503/*---------------------------------------------------------------------------*/
1504
1505void MpiAdapter::
1506waitSomeRequestsMPI(ArrayView<Request> requests,ArrayView<bool> indexes,
1507 ArrayView<MPI_Status> mpi_status,bool is_non_blocking)
1508{
1509 Integer size = requests.size();
1510 if (size==0)
1511 return;
1512 //TODO: utiliser des StackArray (quand ils seront disponibles...)
1513 UniqueArray<MPI_Request> mpi_request(size);
1514 UniqueArray<MPI_Request> saved_mpi_request(size);
1515 UniqueArray<int> completed_requests(size);
1516 int nb_completed_request = 0;
1517
1518 // Sauve la requete pour la desallouer dans m_allocated_requests,
1519 // car sa valeur ne sera plus valide après appel à MPI_Wait*
1520 for (Integer i = 0; i < size; ++i) {
1521 // Dans le cas où l'on appelle cette méthode plusieurs fois
1522 // avec le même tableau de requests, il se peut qu'il y ai des
1523 // requests invalides qui feront planter l'appel à MPI.
1524 if (!requests[i].isValid()) {
1525 saved_mpi_request[i] = MPI_REQUEST_NULL;
1526 }
1527 else {
1528 saved_mpi_request[i] = static_cast<MPI_Request>(requests[i]);
1529 }
1530 }
1531
1532 // N'affiche le debug que en mode bloquant ou si explicitement demandé pour
1533 // éviter trop de messages
1534 bool is_print_debug = m_is_trace || (!is_non_blocking);
1535 if (is_print_debug)
1536 debug() << "WaitRequestBegin is_non_blocking=" << is_non_blocking << " n=" << size;
1537
1538 double begin_time = MPI_Wtime();
1539
1540 try{
1541 if (is_non_blocking){
1542 _trace(MpiInfo(eMpiName::Testsome).name().localstr());
1543 {
1544 MpiLock::Section mls(m_mpi_lock);
1545 m_mpi_prof->testSome(size, saved_mpi_request.data(), &nb_completed_request,
1546 completed_requests.data(), mpi_status.data());
1547 }
1548 //If there is no active handle in the list, it returns outcount = MPI_UNDEFINED.
1549 if (nb_completed_request == MPI_UNDEFINED) // Si aucune requete n'etait valide.
1550 nb_completed_request = 0;
1551 if (is_print_debug)
1552 debug() << "WaitSomeRequestMPI: TestSome nb_completed=" << nb_completed_request;
1553 }
1554 else{
1555 _trace(MpiInfo(eMpiName::Waitsome).name().localstr());
1556 {
1557 // TODO: si le verrou existe, il faut faire une boucle de testSome() pour ne
1558 // pas bloquer.
1559 MpiLock::Section mls(m_mpi_lock);
1560 m_mpi_prof->waitSome(size, saved_mpi_request.data(), &nb_completed_request,
1561 completed_requests.data(), mpi_status.data());
1562 }
1563 // Il ne faut pas utiliser mpi_request[i] car il est modifié par Mpi
1564 // mpi_request[i] == MPI_REQUEST_NULL
1565 if (nb_completed_request == MPI_UNDEFINED) // Si aucune requete n'etait valide.
1566 nb_completed_request = 0;
1567 if (is_print_debug)
1568 debug() << "WaitSomeRequest nb_completed=" << nb_completed_request;
1569 }
1570 }
1571 catch(TimeoutException& ex)
1572 {
1573 std::ostringstream ostr;
1574 if (is_non_blocking)
1575 ostr << MpiInfo(eMpiName::Testsome).name();
1576 else
1577 ostr << MpiInfo(eMpiName::Waitsome).name();
1578 ostr << " size=" << size
1579 << " is_non_blocking=" << is_non_blocking;
1580 ex.setAdditionalInfo(ostr.str());
1581 throw;
1582 }
1583
1584 for( int z=0; z<nb_completed_request; ++z ){
1585 int index = completed_requests[z];
1586 if (is_print_debug)
1587 debug() << "Completed my_rank=" << m_comm_rank << " z=" << z
1588 << " index=" << index
1589 << " tag=" << mpi_status[z].MPI_TAG
1590 << " source=" << mpi_status[z].MPI_SOURCE;
1591
1592 indexes[index] = true;
1593 }
1594
1595 bool has_new_request = _handleEndRequests(requests,indexes,mpi_status);
1596 if (has_new_request){
1597 // Si on a de nouvelles requêtes, alors il est possible qu'aucune
1598 // requête n'aie aboutie. En cas de testSome, cela n'est pas grave.
1599 // En cas de waitSome, cela signifie qu'il faut attendre à nouveau.
1600 }
1601 double end_time = MPI_Wtime();
1602 m_stat->add(MpiInfo(eMpiName::Waitsome).name(),end_time-begin_time,size);
1603}
1604
1605/*---------------------------------------------------------------------------*/
1606/*---------------------------------------------------------------------------*/
1607
1608void MpiAdapter::
1609freeRequest(Request& request)
1610{
1611 if (!request.isValid()){
1612 warning() << "MpiAdapter::freeRequest() null request r=" << (MPI_Request)request;
1613 _checkFatalInRequest();
1614 return;
1615 }
1616 {
1617 MpiLock::Section mls(m_mpi_lock);
1618
1619 auto mr = (MPI_Request)request;
1620 _removeRequest(mr);
1621 MPI_Request_free(&mr);
1622 }
1623 request.reset();
1624}
1625
1626/*---------------------------------------------------------------------------*/
1627/*---------------------------------------------------------------------------*/
1628
1629bool MpiAdapter::
1630testRequest(Request& request)
1631{
1632 // Il est autorisé par MPI de faire un test avec une requête nulle.
1633 if (!request.isValid())
1634 return true;
1635
1636 auto mr = (MPI_Request)request;
1637 int is_finished = 0;
1638
1639 {
1640 MpiLock::Section mls(m_mpi_lock);
1641
1642 // Il faut d'abord recuperer l'emplacement de la requete car si elle
1643 // est finie, elle sera automatiquement libérée par MPI lors du test
1644 // et du coup on ne pourra plus la supprimer
1645 RequestSet::Iterator request_iter = m_request_set->findRequest(mr);
1646
1647 m_mpi_prof->test(&mr, &is_finished, (MPI_Status *) MPI_STATUS_IGNORE);
1648 //info() << "** TEST REQUEST r=" << mr << " is_finished=" << is_finished;
1649 if (is_finished!=0){
1650 m_request_set->removeRequest(request_iter);
1651 if (request.hasSubRequest())
1652 ARCCORE_THROW(NotImplementedException,"SubRequest support");
1653 request.reset();
1654 return true;
1655 }
1656 }
1657
1658 return false;
1659}
1660
1661/*---------------------------------------------------------------------------*/
1662/*---------------------------------------------------------------------------*/
1667_addRequest(MPI_Request request)
1668{
1669 m_request_set->addRequest(request);
1670}
1671
1672/*---------------------------------------------------------------------------*/
1673/*---------------------------------------------------------------------------*/
1678_removeRequest(MPI_Request request)
1679{
1680 m_request_set->removeRequest(request);
1681}
1682
1683/*---------------------------------------------------------------------------*/
1684/*---------------------------------------------------------------------------*/
1685
1686void MpiAdapter::
1687enableDebugRequest(bool enable_debug_request)
1688{
1689 m_stat->enable(enable_debug_request);
1690 //if (!m_enable_debug_request)
1691 //info() << "WARNING: Mpi adpater debug request is disabled (multi-threading)";
1692}
1693
1694/*---------------------------------------------------------------------------*/
1695/*---------------------------------------------------------------------------*/
1696
1697void MpiAdapter::
1698_checkFatalInRequest()
1699{
1700 if (isRequestErrorAreFatal())
1701 ARCCORE_FATAL("Error in requests management");
1702}
1703
1704/*---------------------------------------------------------------------------*/
1705/*---------------------------------------------------------------------------*/
1706
1707void MpiAdapter::
1708setMpiProfiling(IMpiProfiling* mpi_profiling)
1709{
1710 m_mpi_prof = mpi_profiling;
1711}
1712
1713/*---------------------------------------------------------------------------*/
1714/*---------------------------------------------------------------------------*/
1715
1716IMpiProfiling* MpiAdapter::
1717getMpiProfiling() const
1718{
1719 return m_mpi_prof;
1720}
1721
1722/*---------------------------------------------------------------------------*/
1723/*---------------------------------------------------------------------------*/
1724
1725void MpiAdapter::
1726setProfiler(IProfiler* profiler)
1727{
1728 if (!profiler){
1729 m_mpi_prof = nullptr;
1730 return;
1731 }
1732
1733 IMpiProfiling* p = dynamic_cast<IMpiProfiling*>(profiler);
1734 if (!p)
1735 ARCCORE_FATAL("Invalid profiler. Profiler has to implemented interface 'IMpiProfiling'");
1736 m_mpi_prof = p;
1737}
1738
1739/*---------------------------------------------------------------------------*/
1740/*---------------------------------------------------------------------------*/
1741
1742IProfiler* MpiAdapter::
1743profiler() const
1744{
1745 return m_mpi_prof;
1746}
1747
1748/*---------------------------------------------------------------------------*/
1749/*---------------------------------------------------------------------------*/
1750
1752windowCreator(MPI_Comm comm_machine)
1753{
1754 if (m_window_creator.isNull()) {
1755 Integer machine_comm_rank = 0;
1756 Integer machine_comm_size = 0;
1757 ::MPI_Comm_rank(comm_machine, &machine_comm_rank);
1758 ::MPI_Comm_size(comm_machine, &machine_comm_size);
1759 m_window_creator = makeRef(new MpiMachineMemoryWindowBaseInternalCreator(comm_machine, machine_comm_rank, machine_comm_size, m_communicator, m_comm_size));
1760 }
1761 return m_window_creator.get();
1762}
1763
1764/*---------------------------------------------------------------------------*/
1765/*---------------------------------------------------------------------------*/
1766
1767} // End namespace Arcane::MessagePassing::Mpi
1768
1769/*---------------------------------------------------------------------------*/
1770/*---------------------------------------------------------------------------*/
bool empty() const
Capacité (nombre d'éléments alloués) du vecteur.
Vue modifiable d'un tableau d'un type T.
constexpr Integer size() const noexcept
Retourne la taille du tableau.
void add(ConstReferenceType val)
Ajoute l'élément val à la fin du tableau.
Interface d'un service de trace des appels de fonctions.
virtual StackTrace stackTrace(int first_function=0)=0
Chaîne de caractère indiquant la pile d'appel.
Interface du gestionnaire de traces.
Interface d'un profiler pour les échanges de messages.
Definition IProfiler.h:31
virtual void enable(bool is_enabled)=0
Active ou désactive les statistiques.
Informations sur la source d'un message.
Interface d'abstraction pour les operations MPI. Sert principalement a utiliser un decorateur pour le...
Iterator findRequest(MPI_Request request)
Vérifie que la requête est dans la liste.
bool m_no_check_request
Vrai si on vérifie pas les requêtes.
void _addRequest(MPI_Request request, const TraceInfo &trace_info)
MpiAdapter(ITraceMng *msg, IStat *stat, MPI_Comm comm, MpiLock *mpi_lock, IMpiProfiling *mpi_prof=nullptr)
void setRequestErrorAreFatal(bool v)
Indique si les erreurs dans la liste des requêtes sont fatales.
int commRank() const
Rang de cette instance dans le communicateur.
Definition MpiAdapter.h:141
void destroy()
Détruit l'instance. Elle ne doit plus être utilisée par la suite.
MPI_Comm m_communicator
Communicateur MPI.
Definition MpiAdapter.h:219
Request sendNonBlockingNoStat(const void *send_buffer, Int64 send_buffer_size, Int32 proc, MPI_Datatype data_type, int mpi_tag)
Version non bloquante de send sans statistique temporelle.
void setCheckRequest(bool v)
Indique si on vérifie les requêtes.
void setPrintRequestError(bool v)
Indique si on affiche des messages pour les erreurs dans les requêtes.
Request buildRequest(int ret, MPI_Request request)
Construit une requête Arccore à partir d'une requête MPI.
MPI_Request m_empty_request1
Requêtes vides. Voir MpiAdapter.cc pour plus d'infos.
Definition MpiAdapter.h:227
void _addRequest(MPI_Request request)
Request receiveNonBlockingNoStat(void *recv_buffer, Int64 recv_buffer_size, Int32 source_rank, MPI_Datatype data_type, int mpi_tag)
Version non bloquante de receive sans statistiques temporelles.
void _removeRequest(MPI_Request request)
Structure informative liee aux enumerationx pour les operations MPI. Donne le nom associe a l'enum ai...
Verrou pour les appels MPI.
Definition MpiLock.h:37
Implémentation MPI du gestionnaire des échanges de messages.
Spécialisation MPI d'une 'Request'.
Definition MpiRequest.h:35
Implementation de l'interface des operations MPI. Correspond a un simple appel aux fonctions MPI du m...
static MpiMessagePassingMng * create(MPI_Comm comm, bool clean_comm=false)
Créé un gestionnaire associé au communicateur comm.
Informations pour envoyer/recevoir un message point à point.
Requête d'un message.
Definition Request.h:77
Exception lorsqu'une fonction n'est pas implémentée.
Référence à une instance.
const String & toString() const
Chaîne de caractères indiquant la pile d'appel.
Chaîne de caractères unicode.
TraceAccessor(ITraceMng *m)
Construit un accesseur via le gestionnaire de trace m.
TraceMessageDbg debug(Trace::eDebugLevel=Trace::Medium) const
Flot pour un message de debug.
TraceMessage info() const
Flot pour un message d'information.
TraceMessage error() const
Flot pour un message d'erreur.
TraceMessage warning() const
Flot pour un message d'avertissement.
Vecteur 1D de données avec sémantique par valeur (style STL).
ARCCORE_BASE_EXPORT IStackTraceService * getStackTraceService()
Service utilisé pour obtenir la pile d'appel.
ARCCORE_BASE_EXPORT String getStackTrace()
Retourne une chaîne de caractere contenant la pile d'appel.
ARCCORE_BASE_EXPORT String getEnvironmentVariable(const String &name)
Variable d'environnement du nom name.
std::int64_t Int64
Type entier signé sur 64 bits.
Int32 Integer
Type représentant un entier.
auto makeRef(InstanceType *t) -> Ref< InstanceType >
Créé une référence sur un pointeur.
ARCCORE_BASE_EXPORT bool arccoreIsCheck()
Vrai si on est en mode vérification.
std::int32_t Int32
Type entier signé sur 32 bits.