14#include "arccore/message_passing_mpi/internal/MpiAdapter.h"
16#include "arccore/trace/ITraceMng.h"
18#include "arccore/collections/Array.h"
20#include "arccore/message_passing/Request.h"
21#include "arccore/message_passing/IStat.h"
22#include "arccore/message_passing/internal/SubRequestCompletionInfo.h"
24#include "arccore/base/IStackTraceService.h"
25#include "arccore/base/TimeoutException.h"
26#include "arccore/base/String.h"
27#include "arccore/base/NotImplementedException.h"
28#include "arccore/base/PlatformUtils.h"
29#include "arccore/base/FatalErrorException.h"
30#include "arccore/base/TraceInfo.h"
32#include "arccore/message_passing_mpi/StandaloneMpiMessagePassingMng.h"
33#include "arccore/message_passing_mpi/internal/MpiLock.h"
34#include "arccore/message_passing_mpi/internal/NoMpiProfiling.h"
35#include "arccore/message_passing_mpi/internal/MpiRequest.h"
36#include "arccore/message_passing_mpi/internal/MpiMachineMemoryWindowBaseInternalCreator.h"
43namespace Arcane::MessagePassing::Mpi
59 typedef std::map<MPI_Request,RequestInfo>::iterator Iterator;
68 m_request_error_is_fatal =
true;
71 m_is_report_error_in_request =
false;
73 m_use_trace_full_stack =
true;
75 m_trace_mpirequest =
true;
78 void addRequest(MPI_Request request)
82 if (m_trace_mpirequest)
83 info() <<
"MpiAdapter: AddRequest r=" << request;
86 void addRequest(MPI_Request request,
const TraceInfo& ti)
90 if (m_trace_mpirequest)
91 info() <<
"MpiAdapter: AddRequest r=" << request;
92 _addRequest(request,ti);
94 void removeRequest(MPI_Request request)
98 if (m_trace_mpirequest)
99 info() <<
"MpiAdapter: RemoveRequest r=" << request;
100 _removeRequest(request);
102 void removeRequest(Iterator request_iter)
106 if (request_iter==m_allocated_requests.end()){
107 if (m_trace_mpirequest)
108 info() <<
"MpiAdapter: RemoveRequestIter null iterator";
111 if (m_trace_mpirequest)
112 info() <<
"MpiAdapter: RemoveRequestIter r=" << request_iter->first;
113 m_allocated_requests.erase(request_iter);
119 return m_allocated_requests.end();
121 if (_isEmptyRequest(request))
122 return m_allocated_requests.end();
123 auto ireq = m_allocated_requests.find(request);
124 if (ireq==m_allocated_requests.end()){
125 if (m_is_report_error_in_request || m_request_error_is_fatal){
126 error() <<
"MpiAdapter::testRequest() request not referenced "
127 <<
" id=" << request;
128 _checkFatalInRequest();
137 void _addRequest(MPI_Request request,
const TraceInfo& trace_info)
139 if (request==MPI_REQUEST_NULL){
140 if (m_is_report_error_in_request || m_request_error_is_fatal){
141 error() <<
"MpiAdapter::_addRequest() trying to add null request";
142 _checkFatalInRequest();
146 if (_isEmptyRequest(request))
148 ++m_total_added_request;
150 auto i = m_allocated_requests.find(request);
151 if (i!=m_allocated_requests.end()){
152 if (m_is_report_error_in_request || m_request_error_is_fatal){
153 error() <<
"MpiAdapter::_addRequest() request already referenced "
154 <<
" id=" << request;
155 _checkFatalInRequest();
160 rinfo.m_trace = trace_info;
161 if (m_use_trace_full_stack)
163 m_allocated_requests.insert(std::make_pair(request,rinfo));
169 void _removeRequest(MPI_Request request)
172 if (request==MPI_REQUEST_NULL){
173 if (m_is_report_error_in_request || m_request_error_is_fatal){
174 error() <<
"MpiAdapter::_removeRequest() null request (" << MPI_REQUEST_NULL <<
")";
175 _checkFatalInRequest();
179 if (_isEmptyRequest(request))
181 auto i = m_allocated_requests.find(request);
182 if (i==m_allocated_requests.end()){
183 if (m_is_report_error_in_request || m_request_error_is_fatal){
184 error() <<
"MpiAdapter::_removeRequest() request not referenced "
185 <<
" id=" << request;
186 _checkFatalInRequest();
190 m_allocated_requests.erase(i);
193 void _checkFatalInRequest()
195 if (m_request_error_is_fatal)
196 ARCCORE_FATAL(
"Error in requests management");
198 Int64 nbRequest()
const {
return m_allocated_requests.size(); }
199 Int64 totalAddedRequest()
const {
return m_total_added_request; }
200 void printRequests()
const
202 info() <<
"PRINT REQUESTS\n";
203 for(
auto& x : m_allocated_requests ){
204 info() <<
"Request id=" << x.first <<
" trace=" << x.second.m_trace
205 <<
" stack=" << x.second.m_stack_trace;
208 void setEmptyRequests(MPI_Request r1,MPI_Request r2)
210 m_empty_request1 = r1;
211 m_empty_request2 = r2;
214 bool m_request_error_is_fatal =
false;
215 bool m_is_report_error_in_request =
true;
216 bool m_trace_mpirequest =
false;
220 std::map<MPI_Request,RequestInfo> m_allocated_requests;
221 bool m_use_trace_full_stack =
false;
222 MPI_Request m_empty_request1 = MPI_REQUEST_NULL;
223 MPI_Request m_empty_request2 = MPI_REQUEST_NULL;
224 Int64 m_total_added_request = 0;
227 bool _isEmptyRequest(MPI_Request r)
const
229 return (r==m_empty_request1 || r==m_empty_request2);
233#define ARCCORE_ADD_REQUEST(request)\
234 m_request_set->addRequest(request,A_FUNCINFO);
241int _checkSize(
Int64 i64_size)
243 if (i64_size>INT32_MAX)
244 ARCCORE_FATAL(
"Can not convert '{0}' to type integer",i64_size);
245 return (
int)i64_size;
254 MpiLock* mpi_lock, IMpiProfiling* mpi_op)
257, m_mpi_lock(mpi_lock)
259, m_communicator(comm)
262, m_machine_communicator(MPI_COMM_NULL)
263, m_empty_request1(MPI_REQUEST_NULL)
264, m_empty_request2(MPI_REQUEST_NULL)
265, m_window_creator(nullptr)
267 m_request_set =
new RequestSet(trace);
273 if (s ==
"1" || s ==
"TRUE")
274 m_is_allow_null_rank_for_any_source =
true;
275 if (s ==
"0" || s ==
"FALSE")
276 m_is_allow_null_rank_for_any_source =
false;
279 ::MPI_Comm_rank(m_communicator,&m_comm_rank);
280 ::MPI_Comm_size(m_communicator,&m_comm_size);
284 m_mpi_prof =
new NoMpiProfiling();
297 MPI_Irecv(m_recv_buffer_for_empty_request, 1, MPI_CHAR, MPI_PROC_NULL,
298 50505, m_communicator, &m_empty_request1);
307 m_send_buffer_for_empty_request2[0] = 0;
308 MPI_Isend(m_send_buffer_for_empty_request2, 1, MPI_CHAR, m_comm_rank,
309 50505, m_communicator, &m_empty_request2);
311 MPI_Recv(m_recv_buffer_for_empty_request2, 1, MPI_CHAR, m_comm_rank,
312 50505, m_communicator, MPI_STATUS_IGNORE);
314 m_request_set->setEmptyRequests(m_empty_request1,m_empty_request2);
323 if (m_empty_request1 != MPI_REQUEST_NULL)
324 MPI_Request_free(&m_empty_request1);
325 if (m_empty_request2 != MPI_REQUEST_NULL)
326 MPI_Request_free(&m_empty_request2);
328 delete m_request_set;
330 delete m_window_creator;
331 if (m_machine_communicator != MPI_COMM_NULL)
332 MPI_Comm_free(&m_machine_communicator);
339buildRequest(
int ret,MPI_Request mpi_request)
341 return MpiRequest(ret,
this,mpi_request);
350 Int64 nb_request = m_request_set->nbRequest();
355 warning() <<
" Pending mpi requests size=" << nb_request;
356 m_request_set->printRequests();
357 _checkFatalInRequest();
367 _checkHasNoRequests();
375setRequestErrorAreFatal(
bool v)
377 m_request_set->m_request_error_is_fatal = v;
380isRequestErrorAreFatal()
const
382 return m_request_set->m_request_error_is_fatal;
386setPrintRequestError(
bool v)
388 m_request_set->m_is_report_error_in_request = v;
391isPrintRequestError()
const
393 return m_request_set->m_is_report_error_in_request;
397setCheckRequest(
bool v)
399 m_request_set->m_no_check_request = !v;
403isCheckRequest()
const
405 return !m_request_set->m_no_check_request;
412toMPISize(
Int64 count)
414 return _checkSize(count);
421_trace(
const char* function)
426 info() <<
"MPI_TRACE: " << function <<
"\n" << stack_service->stackTrace().toString();
428 info() <<
"MPI_TRACE: " << function;
436broadcast(
void* buf,
Int64 nb_elem,
Int32 root,MPI_Datatype datatype)
438 int _nb_elem = _checkSize(nb_elem);
439 _trace(MpiInfo(eMpiName::Bcast).name().localstr());
440 double begin_time = MPI_Wtime();
442 info() <<
"MPI_TRACE: MPI broadcast: before"
444 <<
" nb_elem=" << nb_elem
446 <<
" datatype=" << datatype;
448 m_mpi_prof->broadcast(buf, _nb_elem, datatype, root, m_communicator);
449 double end_time = MPI_Wtime();
450 double sr_time = (end_time-begin_time);
452 m_stat->add(MpiInfo(eMpiName::Bcast).name(),sr_time,0);
459nonBlockingBroadcast(
void* buf,
Int64 nb_elem,
Int32 root,MPI_Datatype datatype)
461 MPI_Request mpi_request = MPI_REQUEST_NULL;
463 int _nb_elem = _checkSize(nb_elem);
464 _trace(
" MPI_Bcast");
465 double begin_time = MPI_Wtime();
466 ret = MPI_Ibcast(buf,_nb_elem,datatype,root,m_communicator,&mpi_request);
467 double end_time = MPI_Wtime();
468 double sr_time = (end_time-begin_time);
470 m_stat->add(
"IBroadcast",sr_time,0);
471 ARCCORE_ADD_REQUEST(mpi_request);
472 return buildRequest(ret,mpi_request);
479gather(
const void* send_buf,
void* recv_buf,
Int64 nb_elem,
Int32 root,MPI_Datatype datatype)
481 void* _sbuf =
const_cast<void*
>(send_buf);
482 int _nb_elem = _checkSize(nb_elem);
483 int _root =
static_cast<int>(root);
484 _trace(MpiInfo(eMpiName::Gather).name().localstr());
485 double begin_time = MPI_Wtime();
486 m_mpi_prof->gather(_sbuf, _nb_elem, datatype, recv_buf, _nb_elem, datatype, _root, m_communicator);
487 double end_time = MPI_Wtime();
488 double sr_time = (end_time-begin_time);
490 m_stat->add(MpiInfo(eMpiName::Gather).name(),sr_time,0);
497nonBlockingGather(
const void* send_buf,
void* recv_buf,
498 Int64 nb_elem,
Int32 root,MPI_Datatype datatype)
500 MPI_Request mpi_request = MPI_REQUEST_NULL;
502 void* _sbuf =
const_cast<void*
>(send_buf);
503 int _nb_elem = _checkSize(nb_elem);
504 int _root =
static_cast<int>(root);
505 _trace(
"MPI_Igather");
506 double begin_time = MPI_Wtime();
507 ret = MPI_Igather(_sbuf,_nb_elem,datatype,recv_buf,_nb_elem,datatype,_root,
508 m_communicator,&mpi_request);
509 double end_time = MPI_Wtime();
510 double sr_time = (end_time-begin_time);
512 m_stat->add(
"IGather",sr_time,0);
513 ARCCORE_ADD_REQUEST(mpi_request);
514 return buildRequest(ret,mpi_request);
521allGather(
const void* send_buf,
void* recv_buf,
522 Int64 nb_elem,MPI_Datatype datatype)
524 void* _sbuf =
const_cast<void*
>(send_buf);
525 int _nb_elem = _checkSize(nb_elem);
526 _trace(MpiInfo(eMpiName::Allgather).name().localstr());
527 double begin_time = MPI_Wtime();
528 m_mpi_prof->allGather(_sbuf, _nb_elem, datatype, recv_buf, _nb_elem, datatype, m_communicator);
529 double end_time = MPI_Wtime();
530 double sr_time = (end_time-begin_time);
532 m_stat->add(MpiInfo(eMpiName::Allgather).name(),sr_time,0);
539nonBlockingAllGather(
const void* send_buf,
void* recv_buf,
540 Int64 nb_elem,MPI_Datatype datatype)
542 MPI_Request mpi_request = MPI_REQUEST_NULL;
544 void* _sbuf =
const_cast<void*
>(send_buf);
545 int _nb_elem = _checkSize(nb_elem);
546 _trace(
"MPI_Iallgather");
547 double begin_time = MPI_Wtime();
548 ret = MPI_Iallgather(_sbuf,_nb_elem,datatype,recv_buf,_nb_elem,datatype,
549 m_communicator,&mpi_request);
550 double end_time = MPI_Wtime();
551 double sr_time = (end_time-begin_time);
553 m_stat->add(
"IAllGather",sr_time,0);
554 ARCCORE_ADD_REQUEST(mpi_request);
555 return buildRequest(ret,mpi_request);
562gatherVariable(
const void* send_buf,
void* recv_buf,
const int* recv_counts,
563 const int* recv_indexes,
Int64 nb_elem,
Int32 root,MPI_Datatype datatype)
565 void* _sbuf =
const_cast<void*
>(send_buf);
566 int _nb_elem = _checkSize(nb_elem);
567 int _root =
static_cast<int>(root);
568 _trace(MpiInfo(eMpiName::Gatherv).name().localstr());
569 double begin_time = MPI_Wtime();
570 m_mpi_prof->gatherVariable(_sbuf, _nb_elem, datatype, recv_buf, recv_counts, recv_indexes, datatype, _root, m_communicator);
571 double end_time = MPI_Wtime();
572 double sr_time = (end_time-begin_time);
574 m_stat->add(MpiInfo(eMpiName::Gatherv).name().localstr(),sr_time,0);
581allGatherVariable(
const void* send_buf,
void* recv_buf,
const int* recv_counts,
582 const int* recv_indexes,
Int64 nb_elem,MPI_Datatype datatype)
584 void* _sbuf =
const_cast<void*
>(send_buf);
585 int _nb_elem = _checkSize(nb_elem);
586 _trace(MpiInfo(eMpiName::Allgatherv).name().localstr());
591 double begin_time = MPI_Wtime();
592 m_mpi_prof->allGatherVariable(_sbuf, _nb_elem, datatype, recv_buf, recv_counts, recv_indexes, datatype, m_communicator);
593 double end_time = MPI_Wtime();
594 double sr_time = (end_time-begin_time);
596 m_stat->add(MpiInfo(eMpiName::Allgatherv).name().localstr(),sr_time,0);
603scatterVariable(
const void* send_buf,
const int* send_count,
const int* send_indexes,
604 void* recv_buf,
Int64 nb_elem,
Int32 root,MPI_Datatype datatype)
606 void* _sbuf =
const_cast<void*
>(send_buf);
607 int* _send_count =
const_cast<int*
>(send_count);
608 int* _send_indexes =
const_cast<int*
>(send_indexes);
609 int _nb_elem = _checkSize(nb_elem);
610 _trace(MpiInfo(eMpiName::Scatterv).name().localstr());
611 double begin_time = MPI_Wtime();
612 m_mpi_prof->scatterVariable(_sbuf,
621 double end_time = MPI_Wtime();
622 double sr_time = (end_time-begin_time);
624 m_stat->add(MpiInfo(eMpiName::Scatterv).name(),sr_time,0);
631allToAll(
const void* send_buf,
void* recv_buf,
Integer count,MPI_Datatype datatype)
633 void* _sbuf =
const_cast<void*
>(send_buf);
634 int icount = _checkSize(count);
635 _trace(MpiInfo(eMpiName::Alltoall).name().localstr());
636 double begin_time = MPI_Wtime();
637 m_mpi_prof->allToAll(_sbuf, icount, datatype, recv_buf, icount, datatype, m_communicator);
638 double end_time = MPI_Wtime();
639 double sr_time = (end_time-begin_time);
641 m_stat->add(MpiInfo(eMpiName::Alltoall).name().localstr(),sr_time,0);
648nonBlockingAllToAll(
const void* send_buf,
void* recv_buf,
Integer count,MPI_Datatype datatype)
650 MPI_Request mpi_request = MPI_REQUEST_NULL;
652 void* _sbuf =
const_cast<void*
>(send_buf);
653 int icount = _checkSize(count);
654 _trace(
"MPI_IAlltoall");
655 double begin_time = MPI_Wtime();
656 ret = MPI_Ialltoall(_sbuf,icount,datatype,recv_buf,icount,datatype,m_communicator,&mpi_request);
657 double end_time = MPI_Wtime();
658 double sr_time = (end_time-begin_time);
660 m_stat->add(
"IAllToAll",sr_time,0);
661 ARCCORE_ADD_REQUEST(mpi_request);
662 return buildRequest(ret,mpi_request);
669allToAllVariable(
const void* send_buf,
const int* send_counts,
670 const int* send_indexes,
void* recv_buf,
const int* recv_counts,
671 const int* recv_indexes,MPI_Datatype datatype)
673 void* _sbuf =
const_cast<void*
>(send_buf);
674 int* _send_counts =
const_cast<int*
>(send_counts);
675 int* _send_indexes =
const_cast<int*
>(send_indexes);
676 int* _recv_counts =
const_cast<int*
>(recv_counts);
677 int* _recv_indexes =
const_cast<int*
>(recv_indexes);
679 _trace(MpiInfo(eMpiName::Alltoallv).name().localstr());
680 double begin_time = MPI_Wtime();
681 m_mpi_prof->allToAllVariable(_sbuf, _send_counts, _send_indexes, datatype,
682 recv_buf, _recv_counts, _recv_indexes, datatype, m_communicator);
683 double end_time = MPI_Wtime();
684 double sr_time = (end_time-begin_time);
686 m_stat->add(MpiInfo(eMpiName::Alltoallv).name(),sr_time,0);
693nonBlockingAllToAllVariable(
const void* send_buf,
const int* send_counts,
694 const int* send_indexes,
void* recv_buf,
const int* recv_counts,
695 const int* recv_indexes,MPI_Datatype datatype)
697 MPI_Request mpi_request = MPI_REQUEST_NULL;
699 void* _sbuf =
const_cast<void*
>(send_buf);
700 int* _send_counts =
const_cast<int*
>(send_counts);
701 int* _send_indexes =
const_cast<int*
>(send_indexes);
702 int* _recv_counts =
const_cast<int*
>(recv_counts);
703 int* _recv_indexes =
const_cast<int*
>(recv_indexes);
705 _trace(
"MPI_Ialltoallv");
706 double begin_time = MPI_Wtime();
707 ret = MPI_Ialltoallv(_sbuf,_send_counts,_send_indexes,datatype,
708 recv_buf,_recv_counts,_recv_indexes,datatype,
709 m_communicator,&mpi_request);
710 double end_time = MPI_Wtime();
711 double sr_time = (end_time-begin_time);
713 m_stat->add(
"IAllToAll",sr_time,0);
714 ARCCORE_ADD_REQUEST(mpi_request);
715 return buildRequest(ret,mpi_request);
728 MPI_Barrier(m_communicator);
737 MPI_Request mpi_request = MPI_REQUEST_NULL;
739 ret = MPI_Ibarrier(m_communicator,&mpi_request);
740 ARCCORE_ADD_REQUEST(mpi_request);
741 return buildRequest(ret,mpi_request);
748allReduce(
const void* send_buf,
void* recv_buf,
Int64 count,MPI_Datatype datatype,MPI_Op op)
750 void* _sbuf =
const_cast<void*
>(send_buf);
751 int _n = _checkSize(count);
752 double begin_time = MPI_Wtime();
753 _trace(MpiInfo(eMpiName::Allreduce).name().localstr());
756 m_mpi_prof->allReduce(_sbuf, recv_buf, _n, datatype, op, m_communicator);
758 catch(TimeoutException& ex)
760 std::ostringstream ostr;
761 ostr <<
"MPI_Allreduce"
762 <<
" send_buf=" << send_buf
763 <<
" recv_buf=" << recv_buf
765 <<
" datatype=" << datatype
767 <<
" NB=" << m_nb_all_reduce;
768 ex.setAdditionalInfo(ostr.str());
771 double end_time = MPI_Wtime();
772 m_stat->add(MpiInfo(eMpiName::Allreduce).name(),end_time-begin_time,count);
779nonBlockingAllReduce(
const void* send_buf,
void* recv_buf,
Int64 count,MPI_Datatype datatype,MPI_Op op)
781 MPI_Request mpi_request = MPI_REQUEST_NULL;
783 void* _sbuf =
const_cast<void*
>(send_buf);
784 int _n = _checkSize(count);
785 double begin_time = MPI_Wtime();
786 _trace(
"MPI_IAllreduce");
787 ret = MPI_Iallreduce(_sbuf,recv_buf,_n,datatype,op,m_communicator,&mpi_request);
788 double end_time = MPI_Wtime();
789 m_stat->add(
"IReduce",end_time-begin_time,_n);
790 ARCCORE_ADD_REQUEST(mpi_request);
791 return buildRequest(ret,mpi_request);
798reduce(
const void* send_buf,
void* recv_buf,
Int64 count,MPI_Datatype datatype,MPI_Op op,
Integer root)
800 void* _sbuf =
const_cast<void*
>(send_buf);
801 int _n = _checkSize(count);
802 int _root =
static_cast<int>(root);
803 double begin_time = MPI_Wtime();
804 _trace(MpiInfo(eMpiName::Reduce).name().localstr());
807 m_mpi_prof->reduce(_sbuf, recv_buf, _n, datatype, op, _root, m_communicator);
809 catch(TimeoutException& ex)
811 std::ostringstream ostr;
813 <<
" send_buf=" << send_buf
814 <<
" recv_buf=" << recv_buf
816 <<
" datatype=" << datatype
819 <<
" NB=" << m_nb_reduce;
820 ex.setAdditionalInfo(ostr.str());
824 double end_time = MPI_Wtime();
825 m_stat->add(MpiInfo(eMpiName::Reduce).name(),end_time-begin_time,0);
832scan(
const void* send_buf,
void* recv_buf,
Int64 count,MPI_Datatype datatype,MPI_Op op)
834 void* _sbuf =
const_cast<void*
>(send_buf);
835 int _n = _checkSize(count);
836 double begin_time = MPI_Wtime();
837 _trace(MpiInfo(eMpiName::Scan).name().localstr());
838 m_mpi_prof->scan(_sbuf, recv_buf, _n, datatype, op, m_communicator);
839 double end_time = MPI_Wtime();
840 m_stat->add(MpiInfo(eMpiName::Scan).name(),end_time-begin_time,count);
847directSendRecv(
const void* send_buffer,
Int64 send_buffer_size,
848 void* recv_buffer,
Int64 recv_buffer_size,
849 Int32 proc,
Int64 elem_size,MPI_Datatype data_type)
851 void* v_send_buffer =
const_cast<void*
>(send_buffer);
852 MPI_Status mpi_status;
853 double begin_time = MPI_Wtime();
854 _trace(MpiInfo(eMpiName::Sendrecv).name().localstr());
855 int sbuf_size = _checkSize(send_buffer_size);
856 int rbuf_size = _checkSize(recv_buffer_size);
857 m_mpi_prof->sendRecv(v_send_buffer, sbuf_size, data_type, proc, 99,
858 recv_buffer, rbuf_size, data_type, proc, 99,
859 m_communicator, &mpi_status);
860 double end_time = MPI_Wtime();
861 Int64 send_size = send_buffer_size * elem_size;
862 Int64 recv_size = recv_buffer_size * elem_size;
863 double sr_time = (end_time-begin_time);
867 m_stat->add(MpiInfo(eMpiName::Sendrecv).name(),sr_time,send_size+recv_size);
874sendNonBlockingNoStat(
const void* send_buffer,
Int64 send_buffer_size,
875 Int32 dest_rank,MPI_Datatype data_type,
int mpi_tag)
877 void* v_send_buffer =
const_cast<void*
>(send_buffer);
878 MPI_Request mpi_request = MPI_REQUEST_NULL;
879 int sbuf_size = _checkSize(send_buffer_size);
881 m_mpi_prof->iSend(v_send_buffer, sbuf_size, data_type, dest_rank, mpi_tag, m_communicator, &mpi_request);
883 info() <<
" ISend ret=" << ret <<
" proc=" << dest_rank <<
" tag=" << mpi_tag <<
" request=" << mpi_request;
884 ARCCORE_ADD_REQUEST(mpi_request);
885 return buildRequest(ret,mpi_request);
892directSend(
const void* send_buffer,
Int64 send_buffer_size,
893 Int32 proc,
Int64 elem_size,MPI_Datatype data_type,
894 int mpi_tag,
bool is_blocked
897 void* v_send_buffer =
const_cast<void*
>(send_buffer);
898 MPI_Request mpi_request = MPI_REQUEST_NULL;
900 double begin_time = 0.0;
901 double end_time = 0.0;
902 Int64 send_size = send_buffer_size * elem_size;
905 info() <<
"MPI_TRACE: MPI Send: send before"
906 <<
" size=" << send_size
908 <<
" tag=" << mpi_tag
909 <<
" datatype=" << data_type
910 <<
" blocking " << is_blocked;
918 MpiLock::Section mls(m_mpi_lock);
919 begin_time = MPI_Wtime();
920 int sbuf_size = _checkSize(send_buffer_size);
921 m_mpi_prof->iSend(v_send_buffer, sbuf_size, data_type, proc, mpi_tag, m_communicator, &mpi_request);
924 MPI_Status mpi_status;
925 while (is_finished==0){
926 MpiLock::Section mls(m_mpi_lock);
927 MPI_Request_get_status(mpi_request,&is_finished,&mpi_status);
929 m_mpi_prof->wait(&mpi_request, (MPI_Status *) MPI_STATUS_IGNORE);
930 end_time = MPI_Wtime();
931 mpi_request = MPI_REQUEST_NULL;
936 MpiLock::Section mls(m_mpi_lock);
937 begin_time = MPI_Wtime();
938 int sbuf_size = _checkSize(send_buffer_size);
939 m_mpi_prof->send(v_send_buffer, sbuf_size, data_type, proc, mpi_tag, m_communicator);
940 end_time = MPI_Wtime();
945 MpiLock::Section mls(m_mpi_lock);
946 begin_time = MPI_Wtime();
947 int sbuf_size = _checkSize(send_buffer_size);
948 m_mpi_prof->iSend(v_send_buffer, sbuf_size, data_type, proc, mpi_tag, m_communicator, &mpi_request);
950 info() <<
" ISend ret=" << ret <<
" proc=" << proc <<
" tag=" << mpi_tag <<
" request=" << mpi_request;
951 end_time = MPI_Wtime();
952 ARCCORE_ADD_REQUEST(mpi_request);
955 info() <<
"MPI Send: send after"
956 <<
" request=" << mpi_request;
959 double sr_time = (end_time-begin_time);
961 debug(
Trace::High) <<
"MPI Send: send " << send_size
962 <<
" time " << sr_time <<
" blocking " << is_blocked;
964 m_stat->add(MpiInfo(eMpiName::Send).name(),end_time-begin_time,send_size);
965 return buildRequest(ret,mpi_request);
972directSendPack(
const void* send_buffer,
Int64 send_buffer_size,
973 Int32 proc,
int mpi_tag,
bool is_blocked)
975 return directSend(send_buffer,send_buffer_size,proc,1,MPI_PACKED,mpi_tag,is_blocked);
981MpiMessagePassingMng* MpiAdapter::
986 MPI_Comm_split(m_communicator, (keep) ? 1 : MPI_UNDEFINED, commRank(), &new_comm);
998receiveNonBlockingNoStat(
void* recv_buffer,
Int64 recv_buffer_size,
999 Int32 source_rank,MPI_Datatype data_type,
int mpi_tag)
1001 int rbuf_size = _checkSize(recv_buffer_size);
1003 MPI_Request mpi_request = MPI_REQUEST_NULL;
1004 m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, source_rank, mpi_tag, m_communicator, &mpi_request);
1005 ARCCORE_ADD_REQUEST(mpi_request);
1006 return buildRequest(ret,mpi_request);
1013directRecv(
void* recv_buffer,
Int64 recv_buffer_size,
1014 Int32 proc,
Int64 elem_size,MPI_Datatype data_type,
1015 int mpi_tag,
bool is_blocked)
1017 MPI_Status mpi_status;
1018 MPI_Request mpi_request = MPI_REQUEST_NULL;
1020 double begin_time = 0.0;
1021 double end_time = 0.0;
1024 if (proc==A_PROC_NULL_RANK)
1025 ARCCORE_THROW(NotImplementedException,
"Receive with MPI_PROC_NULL");
1026 if (proc == A_NULL_RANK && !m_is_allow_null_rank_for_any_source)
1027 ARCCORE_FATAL(
"Can not use A_NULL_RANK for any source. Use A_ANY_SOURCE_RANK instead");
1028 if (proc==A_NULL_RANK || proc==A_ANY_SOURCE_RANK)
1029 i_proc = MPI_ANY_SOURCE;
1031 i_proc =
static_cast<int>(proc);
1033 Int64 recv_size = recv_buffer_size * elem_size;
1035 info() <<
"MPI_TRACE: MPI Recv: recv before "
1036 <<
" size=" << recv_size
1037 <<
" from=" << i_proc
1038 <<
" tag=" << mpi_tag
1039 <<
" datatype=" << data_type
1040 <<
" blocking=" << is_blocked;
1049 MpiLock::Section mls(m_mpi_lock);
1050 begin_time = MPI_Wtime();
1051 int rbuf_size = _checkSize(recv_buffer_size);
1052 m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_request);
1054 int is_finished = 0;
1055 MPI_Status mpi_status;
1056 while (is_finished==0){
1057 MpiLock::Section mls(m_mpi_lock);
1058 MPI_Request_get_status(mpi_request,&is_finished,&mpi_status);
1059 if (is_finished!=0){
1060 end_time = MPI_Wtime();
1061 m_mpi_prof->wait(&mpi_request, (MPI_Status *) MPI_STATUS_IGNORE);
1062 mpi_request = MPI_REQUEST_NULL;
1067 MpiLock::Section mls(m_mpi_lock);
1068 begin_time = MPI_Wtime();
1069 int rbuf_size = _checkSize(recv_buffer_size);
1070 m_mpi_prof->recv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_status);
1071 end_time = MPI_Wtime();
1076 MpiLock::Section mls(m_mpi_lock);
1077 begin_time = MPI_Wtime();
1078 int rbuf_size = _checkSize(recv_buffer_size);
1079 m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_request);
1080 end_time = MPI_Wtime();
1081 ARCCORE_ADD_REQUEST(mpi_request);
1084 info() <<
"MPI Recv: recv after "
1085 <<
" request=" << mpi_request;
1088 double sr_time = (end_time-begin_time);
1090 debug(
Trace::High) <<
"MPI Recv: recv after " << recv_size
1091 <<
" time " << sr_time <<
" blocking " << is_blocked;
1092 m_stat->add(MpiInfo(eMpiName::Recv).name(),end_time-begin_time,recv_size);
1093 return buildRequest(ret,mpi_request);
1100probeRecvPack(UniqueArray<Byte>& recv_buffer,
Int32 proc)
1102 double begin_time = MPI_Wtime();
1104 int recv_buffer_size = 0;
1105 _trace(
"MPI_Probe");
1106 m_mpi_prof->probe(proc, 101, m_communicator, &status);
1107 m_mpi_prof->getCount(&status, MPI_PACKED, &recv_buffer_size);
1109 recv_buffer.resize(recv_buffer_size);
1110 m_mpi_prof->recv(recv_buffer.data(), recv_buffer_size, MPI_PACKED, proc, 101, m_communicator, &status);
1112 double end_time = MPI_Wtime();
1113 Int64 recv_size = recv_buffer_size;
1114 double sr_time = (end_time-begin_time);
1115 debug(
Trace::High) <<
"MPI probeRecvPack " << recv_size
1116 <<
" time " << sr_time;
1117 m_stat->add(MpiInfo(eMpiName::Recv).name(),end_time-begin_time,recv_size);
1123MessageSourceInfo MpiAdapter::
1124_buildSourceInfoFromStatus(
const MPI_Status& mpi_status)
1127 MPI_Count message_size = 0;
1128 MPI_Get_elements_x(&mpi_status,MPI_BYTE,&message_size);
1129 MessageTag tag(mpi_status.MPI_TAG);
1130 MessageRank rank(mpi_status.MPI_SOURCE);
1131 return MessageSourceInfo(rank,tag,message_size);
1137MessageId MpiAdapter::
1138_probeMessage(MessageRank source,MessageTag tag,
bool is_blocking)
1140 MPI_Status mpi_status;
1141 int has_message = 0;
1142 MPI_Message message;
1144 int mpi_source = source.value();
1145 if (source.isProcNull())
1146 ARCCORE_THROW(NotImplementedException,
"Probe with MPI_PROC_NULL");
1147 if (source.isNull() && !m_is_allow_null_rank_for_any_source)
1148 ARCCORE_FATAL(
"Can not use MPI_Mprobe with null rank. Use MessageRank::anySourceRank() instead");
1149 if (source.isNull() || source.isAnySource())
1150 mpi_source = MPI_ANY_SOURCE;
1151 int mpi_tag = tag.value();
1153 mpi_tag = MPI_ANY_TAG;
1155 ret = MPI_Mprobe(mpi_source,mpi_tag,m_communicator,&message,&mpi_status);
1159 ret = MPI_Improbe(mpi_source, mpi_tag, m_communicator, &has_message, &message, &mpi_status);
1162 ARCCORE_FATAL(
"Error during call to MPI_Mprobe r={0}",ret);
1163 MessageId ret_message;
1164 if (has_message!=0){
1165 MessageSourceInfo si(_buildSourceInfoFromStatus(mpi_status));
1166 ret_message = MessageId(si,message);
1174MessageId MpiAdapter::
1175probeMessage(PointToPointMessageInfo message)
1177 if (!message.isValid())
1181 if (!message.isRankTag())
1182 ARCCORE_FATAL(
"Invalid message_info: message.isRankTag() is false");
1184 return _probeMessage(message.destinationRank(),message.tag(),message.isBlocking());
1190MessageSourceInfo MpiAdapter::
1191_legacyProbeMessage(MessageRank source,MessageTag tag,
bool is_blocking)
1193 MPI_Status mpi_status;
1194 int has_message = 0;
1196 int mpi_source = source.value();
1197 if (source.isProcNull())
1198 ARCCORE_THROW(NotImplementedException,
"Probe with MPI_PROC_NULL");
1199 if (source.isNull() && !m_is_allow_null_rank_for_any_source)
1200 ARCCORE_FATAL(
"Can not use MPI_Probe with null rank. Use MessageRank::anySourceRank() instead");
1201 if (source.isNull() || source.isAnySource())
1202 mpi_source = MPI_ANY_SOURCE;
1203 int mpi_tag = tag.value();
1205 mpi_tag = MPI_ANY_TAG;
1207 ret = MPI_Probe(mpi_source,mpi_tag,m_communicator,&mpi_status);
1211 ret = MPI_Iprobe(mpi_source,mpi_tag,m_communicator,&has_message,&mpi_status);
1213 ARCCORE_FATAL(
"Error during call to MPI_Mprobe r={0}",ret);
1215 return _buildSourceInfoFromStatus(mpi_status);
1222MessageSourceInfo MpiAdapter::
1223legacyProbeMessage(PointToPointMessageInfo message)
1225 if (!message.isValid())
1229 if (!message.isRankTag())
1230 ARCCORE_FATAL(
"Invalid message_info: message.isRankTag() is false");
1232 return _legacyProbeMessage(message.destinationRank(),message.tag(),message.isBlocking());
1239directRecv(
void* recv_buffer,
Int64 recv_buffer_size,
1240 MessageId message,
Int64 elem_size,MPI_Datatype data_type,
1243 MPI_Status mpi_status;
1244 MPI_Request mpi_request = MPI_REQUEST_NULL;
1245 MPI_Message mpi_message = (MPI_Message)message;
1247 double begin_time = 0.0;
1248 double end_time = 0.0;
1250 Int64 recv_size = recv_buffer_size * elem_size;
1252 info() <<
"MPI_TRACE: MPI Mrecv: recv before "
1253 <<
" size=" << recv_size
1254 <<
" from_msg=" << message
1255 <<
" datatype=" << data_type
1256 <<
" blocking=" << is_blocked;
1265 MpiLock::Section mls(m_mpi_lock);
1266 begin_time = MPI_Wtime();
1267 int rbuf_size = _checkSize(recv_buffer_size);
1268 MPI_Imrecv(recv_buffer,rbuf_size,data_type,&mpi_message,&mpi_request);
1271 int is_finished = 0;
1272 MPI_Status mpi_status;
1273 while (is_finished==0){
1274 MpiLock::Section mls(m_mpi_lock);
1275 MPI_Request_get_status(mpi_request,&is_finished,&mpi_status);
1276 if (is_finished!=0){
1277 end_time = MPI_Wtime();
1278 m_mpi_prof->wait(&mpi_request, (MPI_Status *) MPI_STATUS_IGNORE);
1279 mpi_request = MPI_REQUEST_NULL;
1284 MpiLock::Section mls(m_mpi_lock);
1285 begin_time = MPI_Wtime();
1286 int rbuf_size = _checkSize(recv_buffer_size);
1287 MPI_Mrecv(recv_buffer,rbuf_size,data_type,&mpi_message,&mpi_status);
1289 end_time = MPI_Wtime();
1294 MpiLock::Section mls(m_mpi_lock);
1295 begin_time = MPI_Wtime();
1296 int rbuf_size = _checkSize(recv_buffer_size);
1298 ret = MPI_Imrecv(recv_buffer,rbuf_size,data_type,&mpi_message,&mpi_request);
1300 end_time = MPI_Wtime();
1301 ARCCORE_ADD_REQUEST(mpi_request);
1304 info() <<
"MPI Recv: recv after "
1305 <<
" request=" << mpi_request;
1308 double sr_time = (end_time-begin_time);
1310 debug(
Trace::High) <<
"MPI Recv: recv after " << recv_size
1311 <<
" time " << sr_time <<
" blocking " << is_blocked;
1312 m_stat->add(MpiInfo(eMpiName::Recv).name(),end_time-begin_time,recv_size);
1313 return buildRequest(ret,mpi_request);
1320directRecvPack(
void* recv_buffer,
Int64 recv_buffer_size,
1321 Int32 proc,
int mpi_tag,
bool is_blocking)
1323 return directRecv(recv_buffer,recv_buffer_size,proc,1,MPI_PACKED,mpi_tag,is_blocking);
1331waitAllRequests(ArrayView<Request> requests)
1333 UniqueArray<bool> indexes(requests.size());
1334 UniqueArray<MPI_Status> mpi_status(requests.size());
1335 while (_waitAllRequestsMPI(requests, indexes, mpi_status)){
1345waitSomeRequests(ArrayView<Request> requests,
1346 ArrayView<bool> indexes,
1347 bool is_non_blocking)
1349 UniqueArray<MPI_Status> mpi_status(requests.size());
1350 waitSomeRequestsMPI(requests, indexes, mpi_status, is_non_blocking);
1361 , mpi_source_rank(source_rank)
1362 , mpi_source_tag(source_tag)
1367 int mpi_source_rank = MPI_PROC_NULL;
1368 int mpi_source_tag = 0;
1378 UniqueArray<SubRequestInfo> new_requests;
1381 MpiLock::Section mls(m_mpi_lock);
1382 for(
Integer i=0; i<size; ++i ) {
1383 if (done_indexes[i]){
1389 if (r.hasSubRequest()){
1391 info() <<
"Done request with sub-request r=" << r <<
" mpi_r=" << r <<
" i=" << i
1392 <<
" source_rank=" << status[i].MPI_SOURCE
1393 <<
" source_tag=" << status[i].MPI_TAG;
1394 new_requests.add(
SubRequestInfo(r.subRequest(), i, status[i].MPI_SOURCE, status[i].MPI_TAG));
1397 _removeRequest((MPI_Request)(r));
1407 bool has_new_request =
false;
1408 if (!new_requests.empty()){
1410 UniqueArray<MPI_Status> old_status(size);
1413 for(
Integer i=0; i<size; ++i ){
1414 if (done_indexes[i]){
1415 old_status[i] = status[index];
1424 info() <<
"Before handle new request index=" << index
1425 <<
" sri.source_rank=" << sri.mpi_source_rank
1426 <<
" sri.source_tag=" << sri.mpi_source_tag;
1427 SubRequestCompletionInfo completion_info(MessageRank(old_status[index].MPI_SOURCE), MessageTag(old_status[index].MPI_TAG));
1428 Request r = sri.sub_request->executeOnCompletion(completion_info);
1430 info() <<
"Handle new request index=" << index <<
" old_r=" << requests[index] <<
" new_r=" << r;
1435 has_new_request =
true;
1436 requests[index] = r;
1437 done_indexes[index] =
false;
1442 for(
Integer i=0; i<size; ++i ){
1443 if (done_indexes[i]){
1444 status[index] = old_status[i];
1451 return has_new_request;
1458_waitAllRequestsMPI(ArrayView<Request> requests,
1459 ArrayView<bool> indexes,
1460 ArrayView<MPI_Status> mpi_status)
1462 Integer size = requests.size();
1466 UniqueArray<MPI_Request> mpi_request(size);
1467 for(
Integer i=0; i<size; ++i ){
1468 mpi_request[i] = (MPI_Request)(requests[i]);
1471 info() <<
" MPI_waitall begin size=" << size;
1472 double diff_time = 0.0;
1474 double begin_time = MPI_Wtime();
1475 for(
Integer i=0; i<size; ++i ){
1476 MPI_Request request = (MPI_Request)(mpi_request[i]);
1477 int is_finished = 0;
1478 while (is_finished==0){
1479 MpiLock::Section mls(m_mpi_lock);
1480 m_mpi_prof->test(&request, &is_finished, (MPI_Status *) MPI_STATUS_IGNORE);
1483 double end_time = MPI_Wtime();
1484 diff_time = end_time - begin_time;
1488 MpiLock::Section mls(m_mpi_lock);
1489 double begin_time = MPI_Wtime();
1490 m_mpi_prof->waitAll(size, mpi_request.data(), mpi_status.data());
1491 double end_time = MPI_Wtime();
1492 diff_time = end_time - begin_time;
1496 for(
Integer i=0; i<size; ++i ){
1500 bool has_new_request = _handleEndRequests(requests,indexes,mpi_status);
1502 info() <<
" MPI_waitall end size=" << size;
1503 m_stat->add(MpiInfo(eMpiName::Waitall).name(),diff_time,size);
1504 return has_new_request;
1511waitSomeRequestsMPI(ArrayView<Request> requests,ArrayView<bool> indexes,
1512 ArrayView<MPI_Status> mpi_status,
bool is_non_blocking)
1514 Integer size = requests.size();
1518 UniqueArray<MPI_Request> mpi_request(size);
1519 UniqueArray<MPI_Request> saved_mpi_request(size);
1520 UniqueArray<int> completed_requests(size);
1521 int nb_completed_request = 0;
1525 for (
Integer i = 0; i < size; ++i) {
1529 if (!requests[i].isValid()) {
1530 saved_mpi_request[i] = MPI_REQUEST_NULL;
1533 saved_mpi_request[i] =
static_cast<MPI_Request
>(requests[i]);
1539 bool is_print_debug = m_is_trace || (!is_non_blocking);
1541 debug() <<
"WaitRequestBegin is_non_blocking=" << is_non_blocking <<
" n=" << size;
1543 double begin_time = MPI_Wtime();
1546 if (is_non_blocking){
1547 _trace(MpiInfo(eMpiName::Testsome).name().localstr());
1549 MpiLock::Section mls(m_mpi_lock);
1550 m_mpi_prof->testSome(size, saved_mpi_request.data(), &nb_completed_request,
1551 completed_requests.data(), mpi_status.data());
1554 if (nb_completed_request == MPI_UNDEFINED)
1555 nb_completed_request = 0;
1557 debug() <<
"WaitSomeRequestMPI: TestSome nb_completed=" << nb_completed_request;
1560 _trace(MpiInfo(eMpiName::Waitsome).name().localstr());
1564 MpiLock::Section mls(m_mpi_lock);
1565 m_mpi_prof->waitSome(size, saved_mpi_request.data(), &nb_completed_request,
1566 completed_requests.data(), mpi_status.data());
1570 if (nb_completed_request == MPI_UNDEFINED)
1571 nb_completed_request = 0;
1573 debug() <<
"WaitSomeRequest nb_completed=" << nb_completed_request;
1576 catch(TimeoutException& ex)
1578 std::ostringstream ostr;
1579 if (is_non_blocking)
1580 ostr << MpiInfo(eMpiName::Testsome).name();
1582 ostr << MpiInfo(eMpiName::Waitsome).name();
1583 ostr <<
" size=" << size
1584 <<
" is_non_blocking=" << is_non_blocking;
1585 ex.setAdditionalInfo(ostr.str());
1589 for(
int z=0; z<nb_completed_request; ++z ){
1590 int index = completed_requests[z];
1592 debug() <<
"Completed my_rank=" << m_comm_rank <<
" z=" << z
1593 <<
" index=" << index
1594 <<
" tag=" << mpi_status[z].MPI_TAG
1595 <<
" source=" << mpi_status[z].MPI_SOURCE;
1597 indexes[index] =
true;
1600 bool has_new_request = _handleEndRequests(requests,indexes,mpi_status);
1601 if (has_new_request){
1606 double end_time = MPI_Wtime();
1607 m_stat->add(MpiInfo(eMpiName::Waitsome).name(),end_time-begin_time,size);
1614freeRequest(Request& request)
1616 if (!request.isValid()){
1617 warning() <<
"MpiAdapter::freeRequest() null request r=" << (MPI_Request)request;
1618 _checkFatalInRequest();
1622 MpiLock::Section mls(m_mpi_lock);
1624 auto mr = (MPI_Request)request;
1626 MPI_Request_free(&mr);
1635testRequest(Request& request)
1638 if (!request.isValid())
1641 auto mr = (MPI_Request)request;
1642 int is_finished = 0;
1645 MpiLock::Section mls(m_mpi_lock);
1650 RequestSet::Iterator request_iter = m_request_set->findRequest(mr);
1652 m_mpi_prof->test(&mr, &is_finished, (MPI_Status *) MPI_STATUS_IGNORE);
1654 if (is_finished!=0){
1655 m_request_set->removeRequest(request_iter);
1656 if (request.hasSubRequest())
1657 ARCCORE_THROW(NotImplementedException,
"SubRequest support");
1672_addRequest(MPI_Request request)
1674 m_request_set->addRequest(request);
1683_removeRequest(MPI_Request request)
1685 m_request_set->removeRequest(request);
1692enableDebugRequest(
bool enable_debug_request)
1694 m_stat->enable(enable_debug_request);
1703_checkFatalInRequest()
1705 if (isRequestErrorAreFatal())
1706 ARCCORE_FATAL(
"Error in requests management");
1713setMpiProfiling(IMpiProfiling* mpi_profiling)
1715 m_mpi_prof = mpi_profiling;
1721IMpiProfiling* MpiAdapter::
1722getMpiProfiling()
const
1731setProfiler(IProfiler* profiler)
1734 m_mpi_prof =
nullptr;
1738 IMpiProfiling* p =
dynamic_cast<IMpiProfiling*
>(profiler);
1740 ARCCORE_FATAL(
"Invalid profiler. Profiler has to implemented interface 'IMpiProfiling'");
1747IProfiler* MpiAdapter::
1756MpiMachineMemoryWindowBaseInternalCreator* MpiAdapter::
1759 if (m_window_creator ==
nullptr) {
1760 MPI_Comm_split_type(m_communicator, MPI_COMM_TYPE_SHARED, m_comm_rank, MPI_INFO_NULL, &m_machine_communicator);
1761 MPI_Comm_rank(m_machine_communicator, &m_machine_comm_rank);
1762 MPI_Comm_size(m_machine_communicator, &m_machine_comm_size);
1763 m_window_creator =
new MpiMachineMemoryWindowBaseInternalCreator(m_machine_communicator, m_machine_comm_rank, m_machine_comm_size, m_communicator, m_comm_size);
1765 return m_window_creator;
Vue modifiable d'un tableau d'un type T.
constexpr Integer size() const noexcept
Retourne la taille du tableau.
Interface du gestionnaire de traces.
Statistiques sur le parallélisme.
Iterator findRequest(MPI_Request request)
Vérifie que la requête est dans la liste.
bool m_no_check_request
Vrai si on vérifie pas les requêtes.
static MpiMessagePassingMng * create(MPI_Comm comm, bool clean_comm=false)
Créé un gestionnaire associé au communicateur comm.
Référence à une instance.
Chaîne de caractères unicode.
Classe d'accès aux traces.
TraceAccessor(ITraceMng *m)
Construit un accesseur via le gestionnaire de trace m.
TraceMessage info() const
Flot pour un message d'information.
TraceMessage error() const
Flot pour un message d'erreur.
std::int64_t Int64
Type entier signé sur 64 bits.
Int32 Integer
Type représentant un entier.
auto makeRef(InstanceType *t) -> Ref< InstanceType >
Créé une référence sur un pointeur.
ARCCORE_BASE_EXPORT bool arccoreIsCheck()
Vrai si on est en mode vérification.
std::int32_t Int32
Type entier signé sur 32 bits.