Arcane  v3.16.6.0
Documentation utilisateur
Chargement...
Recherche...
Aucune correspondance
MpiAdapter.cc
1// -*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
2//-----------------------------------------------------------------------------
3// Copyright 2000-2025 CEA (www.cea.fr) IFPEN (www.ifpenergiesnouvelles.com)
4// See the top-level COPYRIGHT file for details.
5// SPDX-License-Identifier: Apache-2.0
6//-----------------------------------------------------------------------------
7/*---------------------------------------------------------------------------*/
8/* MpiAdapter.cc (C) 2000-2025 */
9/* */
10/* Gestionnaire de parallélisme utilisant MPI. */
11/*---------------------------------------------------------------------------*/
12/*---------------------------------------------------------------------------*/
13
14#include "arccore/message_passing_mpi/internal/MpiAdapter.h"
15
16#include "arccore/trace/ITraceMng.h"
17
18#include "arccore/collections/Array.h"
19
20#include "arccore/message_passing/Request.h"
21#include "arccore/message_passing/IStat.h"
22#include "arccore/message_passing/internal/SubRequestCompletionInfo.h"
23
24#include "arccore/base/IStackTraceService.h"
25#include "arccore/base/TimeoutException.h"
26#include "arccore/base/String.h"
27#include "arccore/base/NotImplementedException.h"
28#include "arccore/base/PlatformUtils.h"
29#include "arccore/base/FatalErrorException.h"
30#include "arccore/base/TraceInfo.h"
31
32#include "arccore/message_passing_mpi/StandaloneMpiMessagePassingMng.h"
33#include "arccore/message_passing_mpi/internal/MpiLock.h"
34#include "arccore/message_passing_mpi/internal/NoMpiProfiling.h"
35#include "arccore/message_passing_mpi/internal/MpiRequest.h"
36#include "arccore/message_passing_mpi/internal/MpiMachineMemoryWindowBaseInternalCreator.h"
37
38#include <cstdint>
39
40/*---------------------------------------------------------------------------*/
41/*---------------------------------------------------------------------------*/
42
43namespace Arcane::MessagePassing::Mpi
44{
45
46/*---------------------------------------------------------------------------*/
47/*---------------------------------------------------------------------------*/
48
50: public TraceAccessor
51{
52 public:
54 {
55 TraceInfo m_trace;
56 String m_stack_trace;
57 };
58 public:
59 typedef std::map<MPI_Request,RequestInfo>::iterator Iterator;
60 public:
61
62 explicit RequestSet(ITraceMng* tm)
63 : TraceAccessor(tm)
64 {
65 m_trace_mng_ref = makeRef(tm);
66 if (arccoreIsCheck()){
67 m_no_check_request = false;
68 m_request_error_is_fatal = true;
69 }
70 if (Platform::getEnvironmentVariable("ARCCORE_NOREPORT_ERROR_MPIREQUEST")=="TRUE")
71 m_is_report_error_in_request = false;
72 if (Platform::getEnvironmentVariable("ARCCORE_MPIREQUEST_STACKTRACE")=="TRUE")
73 m_use_trace_full_stack = true;
74 if (Platform::getEnvironmentVariable("ARCCORE_TRACE_MPIREQUEST")=="TRUE")
75 m_trace_mpirequest = true;
76 }
77 public:
78 void addRequest(MPI_Request request)
79 {
81 return;
82 if (m_trace_mpirequest)
83 info() << "MpiAdapter: AddRequest r=" << request;
84 _addRequest(request,TraceInfo());
85 }
86 void addRequest(MPI_Request request,const TraceInfo& ti)
87 {
89 return;
90 if (m_trace_mpirequest)
91 info() << "MpiAdapter: AddRequest r=" << request;
92 _addRequest(request,ti);
93 }
94 void removeRequest(MPI_Request request)
95 {
97 return;
98 if (m_trace_mpirequest)
99 info() << "MpiAdapter: RemoveRequest r=" << request;
100 _removeRequest(request);
101 }
102 void removeRequest(Iterator request_iter)
103 {
105 return;
106 if (request_iter==m_allocated_requests.end()){
107 if (m_trace_mpirequest)
108 info() << "MpiAdapter: RemoveRequestIter null iterator";
109 return;
110 }
111 if (m_trace_mpirequest)
112 info() << "MpiAdapter: RemoveRequestIter r=" << request_iter->first;
113 m_allocated_requests.erase(request_iter);
114 }
115 //! Vérifie que la requête est dans la liste
116 Iterator findRequest(MPI_Request request)
117 {
119 return m_allocated_requests.end();
120
121 if (_isEmptyRequest(request))
122 return m_allocated_requests.end();
123 auto ireq = m_allocated_requests.find(request);
124 if (ireq==m_allocated_requests.end()){
125 if (m_is_report_error_in_request || m_request_error_is_fatal){
126 error() << "MpiAdapter::testRequest() request not referenced "
127 << " id=" << request;
128 _checkFatalInRequest();
129 }
130 }
131 return ireq;
132 }
133 private:
134 /*!
135 * \warning Cette fonction doit etre appelee avec le verrou mpi_lock actif.
136 */
137 void _addRequest(MPI_Request request,const TraceInfo& trace_info)
138 {
139 if (request==MPI_REQUEST_NULL){
140 if (m_is_report_error_in_request || m_request_error_is_fatal){
141 error() << "MpiAdapter::_addRequest() trying to add null request";
142 _checkFatalInRequest();
143 }
144 return;
145 }
146 if (_isEmptyRequest(request))
147 return;
148 ++m_total_added_request;
149 //info() << "MPI_ADAPTER:ADD REQUEST " << request;
150 auto i = m_allocated_requests.find(request);
151 if (i!=m_allocated_requests.end()){
152 if (m_is_report_error_in_request || m_request_error_is_fatal){
153 error() << "MpiAdapter::_addRequest() request already referenced "
154 << " id=" << request;
155 _checkFatalInRequest();
156 }
157 return;
158 }
159 RequestInfo rinfo;
160 rinfo.m_trace = trace_info;
161 if (m_use_trace_full_stack)
162 rinfo.m_stack_trace = Platform::getStackTrace();
163 m_allocated_requests.insert(std::make_pair(request,rinfo));
164 }
165
166 /*!
167 * \warning Cette fonction doit être appelé avec le verrou mpi_lock actif.
168 */
169 void _removeRequest(MPI_Request request)
170 {
171 //info() << "MPI_ADAPTER:REMOVE REQUEST " << request;
172 if (request==MPI_REQUEST_NULL){
173 if (m_is_report_error_in_request || m_request_error_is_fatal){
174 error() << "MpiAdapter::_removeRequest() null request (" << MPI_REQUEST_NULL << ")";
175 _checkFatalInRequest();
176 }
177 return;
178 }
179 if (_isEmptyRequest(request))
180 return;
181 auto i = m_allocated_requests.find(request);
182 if (i==m_allocated_requests.end()){
183 if (m_is_report_error_in_request || m_request_error_is_fatal){
184 error() << "MpiAdapter::_removeRequest() request not referenced "
185 << " id=" << request;
186 _checkFatalInRequest();
187 }
188 }
189 else
190 m_allocated_requests.erase(i);
191 }
192 public:
193 void _checkFatalInRequest()
194 {
195 if (m_request_error_is_fatal)
196 ARCCORE_FATAL("Error in requests management");
197 }
198 Int64 nbRequest() const { return m_allocated_requests.size(); }
199 Int64 totalAddedRequest() const { return m_total_added_request; }
200 void printRequests() const
201 {
202 info() << "PRINT REQUESTS\n";
203 for( auto& x : m_allocated_requests ){
204 info() << "Request id=" << x.first << " trace=" << x.second.m_trace
205 << " stack=" << x.second.m_stack_trace;
206 }
207 }
208 void setEmptyRequests(MPI_Request r1,MPI_Request r2)
209 {
210 m_empty_request1 = r1;
211 m_empty_request2 = r2;
212 }
213 public:
214 bool m_request_error_is_fatal = false;
215 bool m_is_report_error_in_request = true;
216 bool m_trace_mpirequest = false;
217 //! Vrai si on vérifie pas les requêtes
219 private:
220 std::map<MPI_Request,RequestInfo> m_allocated_requests;
221 bool m_use_trace_full_stack = false;
222 MPI_Request m_empty_request1 = MPI_REQUEST_NULL;
223 MPI_Request m_empty_request2 = MPI_REQUEST_NULL;
224 Int64 m_total_added_request = 0;
225 Ref<ITraceMng> m_trace_mng_ref;
226 private:
227 bool _isEmptyRequest(MPI_Request r) const
228 {
229 return (r==m_empty_request1 || r==m_empty_request2);
230 }
231};
232
233#define ARCCORE_ADD_REQUEST(request)\
234 m_request_set->addRequest(request,A_FUNCINFO);
235
236/*---------------------------------------------------------------------------*/
237/*---------------------------------------------------------------------------*/
238
239namespace
240{
241int _checkSize(Int64 i64_size)
242{
243 if (i64_size>INT32_MAX)
244 ARCCORE_FATAL("Can not convert '{0}' to type integer",i64_size);
245 return (int)i64_size;
246}
247}
248
249/*---------------------------------------------------------------------------*/
250/*---------------------------------------------------------------------------*/
251
252MpiAdapter::
253MpiAdapter(ITraceMng* trace,IStat* stat,MPI_Comm comm,
254 MpiLock* mpi_lock, IMpiProfiling* mpi_op)
255: TraceAccessor(trace)
256, m_stat(stat)
257, m_mpi_lock(mpi_lock)
258, m_mpi_prof(mpi_op)
259, m_communicator(comm)
260, m_comm_rank(0)
261, m_comm_size(0)
262, m_machine_communicator(MPI_COMM_NULL)
263, m_empty_request1(MPI_REQUEST_NULL)
264, m_empty_request2(MPI_REQUEST_NULL)
265, m_window_creator(nullptr)
266{
267 m_request_set = new RequestSet(trace);
268
269 if (Platform::getEnvironmentVariable("ARCCORE_TRACE_MPI")=="TRUE")
270 m_is_trace = true;
271 {
272 String s = Platform::getEnvironmentVariable("ARCCORE_ALLOW_NULL_RANK_FOR_MPI_ANY_SOURCE");
273 if (s == "1" || s == "TRUE")
274 m_is_allow_null_rank_for_any_source = true;
275 if (s == "0" || s == "FALSE")
276 m_is_allow_null_rank_for_any_source = false;
277 }
278
279 ::MPI_Comm_rank(m_communicator,&m_comm_rank);
280 ::MPI_Comm_size(m_communicator,&m_comm_size);
281
282 // Par defaut, on ne fait pas de profiling MPI, on utilisera la methode set idoine pour changer
283 if (!m_mpi_prof)
284 m_mpi_prof = new NoMpiProfiling();
285
286 /*!
287 * Ce type de requête est utilisé par openmpi à partir de
288 * la version 1.8 (il faut voir pour la 1.6, sachant que la 1.4 et 1.5
289 * ne l'ont pas). Cette requête fonctionne un peu comme MPI_REQUEST_NULL
290 * et il est possible qu'elle soit retournée plusieurs fois. Il ne faut
291 * donc pas mettre cette requête dans m_allocated_requests.
292 * On ne peut pas accéder directement à l'adresse de cette requête vide
293 * mais l'implémentation 1.8 de openmpi retourne cette requête lorsqu'on
294 * appelle un IRecv avec une source MPI_PROC_NULL. On récupère donc la valeur
295 * comme cela.
296 */
297 MPI_Irecv(m_recv_buffer_for_empty_request, 1, MPI_CHAR, MPI_PROC_NULL,
298 50505, m_communicator, &m_empty_request1);
299
300 /*
301 * A partir de la version 4 de openmpi, il semble aussi que les send avec
302 * de petits buffers génèrent toujours la même requête. Il faut donc aussi
303 * la supprimer des requêtes à tester. On poste aussi le MPI_Recv correspondant
304 * pour éviter que le MPI_ISend puisse involontairement être utilisé dans un
305 * MPI_Recv de l'utilisateur (via par exemple un MPI_Recv(MPI_ANY_TAG).
306 */
307 m_send_buffer_for_empty_request2[0] = 0;
308 MPI_Isend(m_send_buffer_for_empty_request2, 1, MPI_CHAR, m_comm_rank,
309 50505, m_communicator, &m_empty_request2);
310
311 MPI_Recv(m_recv_buffer_for_empty_request2, 1, MPI_CHAR, m_comm_rank,
312 50505, m_communicator, MPI_STATUS_IGNORE);
313
314 m_request_set->setEmptyRequests(m_empty_request1,m_empty_request2);
315}
316
317/*---------------------------------------------------------------------------*/
318/*---------------------------------------------------------------------------*/
319
320MpiAdapter::
321~MpiAdapter()
322{
323 if (m_empty_request1 != MPI_REQUEST_NULL)
324 MPI_Request_free(&m_empty_request1);
325 if (m_empty_request2 != MPI_REQUEST_NULL)
326 MPI_Request_free(&m_empty_request2);
327
328 delete m_request_set;
329 delete m_mpi_prof;
330 delete m_window_creator;
331 if (m_machine_communicator != MPI_COMM_NULL)
332 MPI_Comm_free(&m_machine_communicator);
333}
334
335/*---------------------------------------------------------------------------*/
336/*---------------------------------------------------------------------------*/
337
338Request MpiAdapter::
339buildRequest(int ret,MPI_Request mpi_request)
340{
341 return MpiRequest(ret,this,mpi_request);
342}
343
344/*---------------------------------------------------------------------------*/
345/*---------------------------------------------------------------------------*/
346
347void MpiAdapter::
348_checkHasNoRequests()
349{
350 Int64 nb_request = m_request_set->nbRequest();
351 // On ne peut pas faire ce test dans le destructeur car il peut
352 // potentiellement lancé une exception et cela ne doit pas être fait
353 // dans un destructeur.
354 if (nb_request!=0){
355 warning() << " Pending mpi requests size=" << nb_request;
356 m_request_set->printRequests();
357 _checkFatalInRequest();
358 }
359}
360
361/*---------------------------------------------------------------------------*/
362/*---------------------------------------------------------------------------*/
363
364void MpiAdapter::
365destroy()
366{
367 _checkHasNoRequests();
368 delete this;
369}
370
371/*---------------------------------------------------------------------------*/
372/*---------------------------------------------------------------------------*/
373
374void MpiAdapter::
375setRequestErrorAreFatal(bool v)
376{
377 m_request_set->m_request_error_is_fatal = v;
378}
379bool MpiAdapter::
380isRequestErrorAreFatal() const
381{
382 return m_request_set->m_request_error_is_fatal;
383}
384
385void MpiAdapter::
386setPrintRequestError(bool v)
387{
388 m_request_set->m_is_report_error_in_request = v;
389}
390bool MpiAdapter::
391isPrintRequestError() const
392{
393 return m_request_set->m_is_report_error_in_request;
394}
395
396void MpiAdapter::
397setCheckRequest(bool v)
398{
399 m_request_set->m_no_check_request = !v;
400}
401
402bool MpiAdapter::
403isCheckRequest() const
404{
405 return !m_request_set->m_no_check_request;
406}
407
408/*---------------------------------------------------------------------------*/
409/*---------------------------------------------------------------------------*/
410
411int MpiAdapter::
412toMPISize(Int64 count)
413{
414 return _checkSize(count);
415}
416
417/*---------------------------------------------------------------------------*/
418/*---------------------------------------------------------------------------*/
419
420void MpiAdapter::
421_trace(const char* function)
422{
423 if (m_is_trace) {
424 IStackTraceService* stack_service = Platform::getStackTraceService();
425 if (stack_service)
426 info() << "MPI_TRACE: " << function << "\n" << stack_service->stackTrace().toString();
427 else
428 info() << "MPI_TRACE: " << function;
429 }
430}
431
432/*---------------------------------------------------------------------------*/
433/*---------------------------------------------------------------------------*/
434
435void MpiAdapter::
436broadcast(void* buf,Int64 nb_elem,Int32 root,MPI_Datatype datatype)
437{
438 int _nb_elem = _checkSize(nb_elem);
439 _trace(MpiInfo(eMpiName::Bcast).name().localstr());
440 double begin_time = MPI_Wtime();
441 if (m_is_trace)
442 info() << "MPI_TRACE: MPI broadcast: before"
443 << " buf=" << buf
444 << " nb_elem=" << nb_elem
445 << " root=" << root
446 << " datatype=" << datatype;
447
448 m_mpi_prof->broadcast(buf, _nb_elem, datatype, root, m_communicator);
449 double end_time = MPI_Wtime();
450 double sr_time = (end_time-begin_time);
451 //TODO determiner la taille des messages
452 m_stat->add(MpiInfo(eMpiName::Bcast).name(),sr_time,0);
453}
454
455/*---------------------------------------------------------------------------*/
456/*---------------------------------------------------------------------------*/
457
458Request MpiAdapter::
459nonBlockingBroadcast(void* buf,Int64 nb_elem,Int32 root,MPI_Datatype datatype)
460{
461 MPI_Request mpi_request = MPI_REQUEST_NULL;
462 int ret = -1;
463 int _nb_elem = _checkSize(nb_elem);
464 _trace(" MPI_Bcast");
465 double begin_time = MPI_Wtime();
466 ret = MPI_Ibcast(buf,_nb_elem,datatype,root,m_communicator,&mpi_request);
467 double end_time = MPI_Wtime();
468 double sr_time = (end_time-begin_time);
469 //TODO determiner la taille des messages
470 m_stat->add("IBroadcast",sr_time,0);
471 ARCCORE_ADD_REQUEST(mpi_request);
472 return buildRequest(ret,mpi_request);
473}
474
475/*---------------------------------------------------------------------------*/
476/*---------------------------------------------------------------------------*/
477
478void MpiAdapter::
479gather(const void* send_buf,void* recv_buf,Int64 nb_elem,Int32 root,MPI_Datatype datatype)
480{
481 void* _sbuf = const_cast<void*>(send_buf);
482 int _nb_elem = _checkSize(nb_elem);
483 int _root = static_cast<int>(root);
484 _trace(MpiInfo(eMpiName::Gather).name().localstr());
485 double begin_time = MPI_Wtime();
486 m_mpi_prof->gather(_sbuf, _nb_elem, datatype, recv_buf, _nb_elem, datatype, _root, m_communicator);
487 double end_time = MPI_Wtime();
488 double sr_time = (end_time-begin_time);
489 //TODO determiner la taille des messages
490 m_stat->add(MpiInfo(eMpiName::Gather).name(),sr_time,0);
491}
492
493/*---------------------------------------------------------------------------*/
494/*---------------------------------------------------------------------------*/
495
496Request MpiAdapter::
497nonBlockingGather(const void* send_buf,void* recv_buf,
498 Int64 nb_elem,Int32 root,MPI_Datatype datatype)
499{
500 MPI_Request mpi_request = MPI_REQUEST_NULL;
501 int ret = -1;
502 void* _sbuf = const_cast<void*>(send_buf);
503 int _nb_elem = _checkSize(nb_elem);
504 int _root = static_cast<int>(root);
505 _trace("MPI_Igather");
506 double begin_time = MPI_Wtime();
507 ret = MPI_Igather(_sbuf,_nb_elem,datatype,recv_buf,_nb_elem,datatype,_root,
508 m_communicator,&mpi_request);
509 double end_time = MPI_Wtime();
510 double sr_time = (end_time-begin_time);
511 //TODO determiner la taille des messages
512 m_stat->add("IGather",sr_time,0);
513 ARCCORE_ADD_REQUEST(mpi_request);
514 return buildRequest(ret,mpi_request);
515}
516
517/*---------------------------------------------------------------------------*/
518/*---------------------------------------------------------------------------*/
519
520void MpiAdapter::
521allGather(const void* send_buf,void* recv_buf,
522 Int64 nb_elem,MPI_Datatype datatype)
523{
524 void* _sbuf = const_cast<void*>(send_buf);
525 int _nb_elem = _checkSize(nb_elem);
526 _trace(MpiInfo(eMpiName::Allgather).name().localstr());
527 double begin_time = MPI_Wtime();
528 m_mpi_prof->allGather(_sbuf, _nb_elem, datatype, recv_buf, _nb_elem, datatype, m_communicator);
529 double end_time = MPI_Wtime();
530 double sr_time = (end_time-begin_time);
531 //TODO determiner la taille des messages
532 m_stat->add(MpiInfo(eMpiName::Allgather).name(),sr_time,0);
533}
534
535/*---------------------------------------------------------------------------*/
536/*---------------------------------------------------------------------------*/
537
538Request MpiAdapter::
539nonBlockingAllGather(const void* send_buf,void* recv_buf,
540 Int64 nb_elem,MPI_Datatype datatype)
541{
542 MPI_Request mpi_request = MPI_REQUEST_NULL;
543 int ret = -1;
544 void* _sbuf = const_cast<void*>(send_buf);
545 int _nb_elem = _checkSize(nb_elem);
546 _trace("MPI_Iallgather");
547 double begin_time = MPI_Wtime();
548 ret = MPI_Iallgather(_sbuf,_nb_elem,datatype,recv_buf,_nb_elem,datatype,
549 m_communicator,&mpi_request);
550 double end_time = MPI_Wtime();
551 double sr_time = (end_time-begin_time);
552 //TODO determiner la taille des messages
553 m_stat->add("IAllGather",sr_time,0);
554 ARCCORE_ADD_REQUEST(mpi_request);
555 return buildRequest(ret,mpi_request);
556}
557
558/*---------------------------------------------------------------------------*/
559/*---------------------------------------------------------------------------*/
560
561void MpiAdapter::
562gatherVariable(const void* send_buf,void* recv_buf,const int* recv_counts,
563 const int* recv_indexes,Int64 nb_elem,Int32 root,MPI_Datatype datatype)
564{
565 void* _sbuf = const_cast<void*>(send_buf);
566 int _nb_elem = _checkSize(nb_elem);
567 int _root = static_cast<int>(root);
568 _trace(MpiInfo(eMpiName::Gatherv).name().localstr());
569 double begin_time = MPI_Wtime();
570 m_mpi_prof->gatherVariable(_sbuf, _nb_elem, datatype, recv_buf, recv_counts, recv_indexes, datatype, _root, m_communicator);
571 double end_time = MPI_Wtime();
572 double sr_time = (end_time-begin_time);
573 //TODO determiner la taille des messages
574 m_stat->add(MpiInfo(eMpiName::Gatherv).name().localstr(),sr_time,0);
575}
576
577/*---------------------------------------------------------------------------*/
578/*---------------------------------------------------------------------------*/
579
580void MpiAdapter::
581allGatherVariable(const void* send_buf,void* recv_buf,const int* recv_counts,
582 const int* recv_indexes,Int64 nb_elem,MPI_Datatype datatype)
583{
584 void* _sbuf = const_cast<void*>(send_buf);
585 int _nb_elem = _checkSize(nb_elem);
586 _trace(MpiInfo(eMpiName::Allgatherv).name().localstr());
587 //info() << " ALLGATHERV N=" << _nb_elem;
588 //for( int i=0; i<m_comm_size; ++i )
589 //info() << " ALLGATHERV I=" << i << " recv_count=" << recv_counts[i]
590 // << " recv_indexes=" << recv_indexes[i];
591 double begin_time = MPI_Wtime();
592 m_mpi_prof->allGatherVariable(_sbuf, _nb_elem, datatype, recv_buf, recv_counts, recv_indexes, datatype, m_communicator);
593 double end_time = MPI_Wtime();
594 double sr_time = (end_time-begin_time);
595 //TODO determiner la taille des messages
596 m_stat->add(MpiInfo(eMpiName::Allgatherv).name().localstr(),sr_time,0);
597}
598
599/*---------------------------------------------------------------------------*/
600/*---------------------------------------------------------------------------*/
601
602void MpiAdapter::
603scatterVariable(const void* send_buf,const int* send_count,const int* send_indexes,
604 void* recv_buf,Int64 nb_elem,Int32 root,MPI_Datatype datatype)
605{
606 void* _sbuf = const_cast<void*>(send_buf);
607 int* _send_count = const_cast<int*>(send_count);
608 int* _send_indexes = const_cast<int*>(send_indexes);
609 int _nb_elem = _checkSize(nb_elem);
610 _trace(MpiInfo(eMpiName::Scatterv).name().localstr());
611 double begin_time = MPI_Wtime();
612 m_mpi_prof->scatterVariable(_sbuf,
613 _send_count,
614 _send_indexes,
615 datatype,
616 recv_buf,
617 _nb_elem,
618 datatype,
619 root,
620 m_communicator);
621 double end_time = MPI_Wtime();
622 double sr_time = (end_time-begin_time);
623 //TODO determiner la taille des messages
624 m_stat->add(MpiInfo(eMpiName::Scatterv).name(),sr_time,0);
625}
626
627/*---------------------------------------------------------------------------*/
628/*---------------------------------------------------------------------------*/
629
630void MpiAdapter::
631allToAll(const void* send_buf,void* recv_buf,Integer count,MPI_Datatype datatype)
632{
633 void* _sbuf = const_cast<void*>(send_buf);
634 int icount = _checkSize(count);
635 _trace(MpiInfo(eMpiName::Alltoall).name().localstr());
636 double begin_time = MPI_Wtime();
637 m_mpi_prof->allToAll(_sbuf, icount, datatype, recv_buf, icount, datatype, m_communicator);
638 double end_time = MPI_Wtime();
639 double sr_time = (end_time-begin_time);
640 //TODO determiner la taille des messages
641 m_stat->add(MpiInfo(eMpiName::Alltoall).name().localstr(),sr_time,0);
642}
643
644/*---------------------------------------------------------------------------*/
645/*---------------------------------------------------------------------------*/
646
647Request MpiAdapter::
648nonBlockingAllToAll(const void* send_buf,void* recv_buf,Integer count,MPI_Datatype datatype)
649{
650 MPI_Request mpi_request = MPI_REQUEST_NULL;
651 int ret = -1;
652 void* _sbuf = const_cast<void*>(send_buf);
653 int icount = _checkSize(count);
654 _trace("MPI_IAlltoall");
655 double begin_time = MPI_Wtime();
656 ret = MPI_Ialltoall(_sbuf,icount,datatype,recv_buf,icount,datatype,m_communicator,&mpi_request);
657 double end_time = MPI_Wtime();
658 double sr_time = (end_time-begin_time);
659 //TODO determiner la taille des messages
660 m_stat->add("IAllToAll",sr_time,0);
661 ARCCORE_ADD_REQUEST(mpi_request);
662 return buildRequest(ret,mpi_request);
663}
664
665/*---------------------------------------------------------------------------*/
666/*---------------------------------------------------------------------------*/
667
668void MpiAdapter::
669allToAllVariable(const void* send_buf,const int* send_counts,
670 const int* send_indexes,void* recv_buf,const int* recv_counts,
671 const int* recv_indexes,MPI_Datatype datatype)
672{
673 void* _sbuf = const_cast<void*>(send_buf);
674 int* _send_counts = const_cast<int*>(send_counts);
675 int* _send_indexes = const_cast<int*>(send_indexes);
676 int* _recv_counts = const_cast<int*>(recv_counts);
677 int* _recv_indexes = const_cast<int*>(recv_indexes);
678
679 _trace(MpiInfo(eMpiName::Alltoallv).name().localstr());
680 double begin_time = MPI_Wtime();
681 m_mpi_prof->allToAllVariable(_sbuf, _send_counts, _send_indexes, datatype,
682 recv_buf, _recv_counts, _recv_indexes, datatype, m_communicator);
683 double end_time = MPI_Wtime();
684 double sr_time = (end_time-begin_time);
685 //TODO determiner la taille des messages
686 m_stat->add(MpiInfo(eMpiName::Alltoallv).name(),sr_time,0);
687}
688
689/*---------------------------------------------------------------------------*/
690/*---------------------------------------------------------------------------*/
691
692Request MpiAdapter::
693nonBlockingAllToAllVariable(const void* send_buf,const int* send_counts,
694 const int* send_indexes,void* recv_buf,const int* recv_counts,
695 const int* recv_indexes,MPI_Datatype datatype)
696{
697 MPI_Request mpi_request = MPI_REQUEST_NULL;
698 int ret = -1;
699 void* _sbuf = const_cast<void*>(send_buf);
700 int* _send_counts = const_cast<int*>(send_counts);
701 int* _send_indexes = const_cast<int*>(send_indexes);
702 int* _recv_counts = const_cast<int*>(recv_counts);
703 int* _recv_indexes = const_cast<int*>(recv_indexes);
704
705 _trace("MPI_Ialltoallv");
706 double begin_time = MPI_Wtime();
707 ret = MPI_Ialltoallv(_sbuf,_send_counts,_send_indexes,datatype,
708 recv_buf,_recv_counts,_recv_indexes,datatype,
709 m_communicator,&mpi_request);
710 double end_time = MPI_Wtime();
711 double sr_time = (end_time-begin_time);
712 //TODO determiner la taille des messages
713 m_stat->add("IAllToAll",sr_time,0);
714 ARCCORE_ADD_REQUEST(mpi_request);
715 return buildRequest(ret,mpi_request);
716}
717
718/*---------------------------------------------------------------------------*/
719/*---------------------------------------------------------------------------*/
720
721void MpiAdapter::
722barrier()
723{
724 // TODO: a priori on ne devrait pas avoir de requêtes en vol possible
725 // entre deux barrières pour éviter tout problème.
726 // _checkHasNoRequests();
727 // TODO ajouter trace correspondante pour le profiling.
728 MPI_Barrier(m_communicator);
729}
730
731/*---------------------------------------------------------------------------*/
732/*---------------------------------------------------------------------------*/
733
734Request MpiAdapter::
735nonBlockingBarrier()
736{
737 MPI_Request mpi_request = MPI_REQUEST_NULL;
738 int ret = -1;
739 ret = MPI_Ibarrier(m_communicator,&mpi_request);
740 ARCCORE_ADD_REQUEST(mpi_request);
741 return buildRequest(ret,mpi_request);
742}
743
744/*---------------------------------------------------------------------------*/
745/*---------------------------------------------------------------------------*/
746
747void MpiAdapter::
748allReduce(const void* send_buf,void* recv_buf,Int64 count,MPI_Datatype datatype,MPI_Op op)
749{
750 void* _sbuf = const_cast<void*>(send_buf);
751 int _n = _checkSize(count);
752 double begin_time = MPI_Wtime();
753 _trace(MpiInfo(eMpiName::Allreduce).name().localstr());
754 try{
755 ++m_nb_all_reduce;
756 m_mpi_prof->allReduce(_sbuf, recv_buf, _n, datatype, op, m_communicator);
757 }
758 catch(TimeoutException& ex)
759 {
760 std::ostringstream ostr;
761 ostr << "MPI_Allreduce"
762 << " send_buf=" << send_buf
763 << " recv_buf=" << recv_buf
764 << " n=" << count
765 << " datatype=" << datatype
766 << " op=" << op
767 << " NB=" << m_nb_all_reduce;
768 ex.setAdditionalInfo(ostr.str());
769 throw;
770 }
771 double end_time = MPI_Wtime();
772 m_stat->add(MpiInfo(eMpiName::Allreduce).name(),end_time-begin_time,count);
773}
774
775/*---------------------------------------------------------------------------*/
776/*---------------------------------------------------------------------------*/
777
778Request MpiAdapter::
779nonBlockingAllReduce(const void* send_buf,void* recv_buf,Int64 count,MPI_Datatype datatype,MPI_Op op)
780{
781 MPI_Request mpi_request = MPI_REQUEST_NULL;
782 int ret = -1;
783 void* _sbuf = const_cast<void*>(send_buf);
784 int _n = _checkSize(count);
785 double begin_time = MPI_Wtime();
786 _trace("MPI_IAllreduce");
787 ret = MPI_Iallreduce(_sbuf,recv_buf,_n,datatype,op,m_communicator,&mpi_request);
788 double end_time = MPI_Wtime();
789 m_stat->add("IReduce",end_time-begin_time,_n);
790 ARCCORE_ADD_REQUEST(mpi_request);
791 return buildRequest(ret,mpi_request);
792}
793
794/*---------------------------------------------------------------------------*/
795/*---------------------------------------------------------------------------*/
796
797void MpiAdapter::
798reduce(const void* send_buf,void* recv_buf,Int64 count,MPI_Datatype datatype,MPI_Op op,Integer root)
799{
800 void* _sbuf = const_cast<void*>(send_buf);
801 int _n = _checkSize(count);
802 int _root = static_cast<int>(root);
803 double begin_time = MPI_Wtime();
804 _trace(MpiInfo(eMpiName::Reduce).name().localstr());
805 try{
806 ++m_nb_reduce;
807 m_mpi_prof->reduce(_sbuf, recv_buf, _n, datatype, op, _root, m_communicator);
808 }
809 catch(TimeoutException& ex)
810 {
811 std::ostringstream ostr;
812 ostr << "MPI_reduce"
813 << " send_buf=" << send_buf
814 << " recv_buf=" << recv_buf
815 << " n=" << count
816 << " datatype=" << datatype
817 << " op=" << op
818 << " root=" << root
819 << " NB=" << m_nb_reduce;
820 ex.setAdditionalInfo(ostr.str());
821 throw;
822 }
823
824 double end_time = MPI_Wtime();
825 m_stat->add(MpiInfo(eMpiName::Reduce).name(),end_time-begin_time,0);
826}
827
828/*---------------------------------------------------------------------------*/
829/*---------------------------------------------------------------------------*/
830
831void MpiAdapter::
832scan(const void* send_buf,void* recv_buf,Int64 count,MPI_Datatype datatype,MPI_Op op)
833{
834 void* _sbuf = const_cast<void*>(send_buf);
835 int _n = _checkSize(count);
836 double begin_time = MPI_Wtime();
837 _trace(MpiInfo(eMpiName::Scan).name().localstr());
838 m_mpi_prof->scan(_sbuf, recv_buf, _n, datatype, op, m_communicator);
839 double end_time = MPI_Wtime();
840 m_stat->add(MpiInfo(eMpiName::Scan).name(),end_time-begin_time,count);
841}
842
843/*---------------------------------------------------------------------------*/
844/*---------------------------------------------------------------------------*/
845
846void MpiAdapter::
847directSendRecv(const void* send_buffer,Int64 send_buffer_size,
848 void* recv_buffer,Int64 recv_buffer_size,
849 Int32 proc,Int64 elem_size,MPI_Datatype data_type)
850{
851 void* v_send_buffer = const_cast<void*>(send_buffer);
852 MPI_Status mpi_status;
853 double begin_time = MPI_Wtime();
854 _trace(MpiInfo(eMpiName::Sendrecv).name().localstr());
855 int sbuf_size = _checkSize(send_buffer_size);
856 int rbuf_size = _checkSize(recv_buffer_size);
857 m_mpi_prof->sendRecv(v_send_buffer, sbuf_size, data_type, proc, 99,
858 recv_buffer, rbuf_size, data_type, proc, 99,
859 m_communicator, &mpi_status);
860 double end_time = MPI_Wtime();
861 Int64 send_size = send_buffer_size * elem_size;
862 Int64 recv_size = recv_buffer_size * elem_size;
863 double sr_time = (end_time-begin_time);
864
865 //debug(Trace::High) << "MPI SendRecv: send " << send_size << " recv "
866 // << recv_size << " time " << sr_time ;
867 m_stat->add(MpiInfo(eMpiName::Sendrecv).name(),sr_time,send_size+recv_size);
868}
869
870/*---------------------------------------------------------------------------*/
871/*---------------------------------------------------------------------------*/
872
873Request MpiAdapter::
874sendNonBlockingNoStat(const void* send_buffer,Int64 send_buffer_size,
875 Int32 dest_rank,MPI_Datatype data_type,int mpi_tag)
876{
877 void* v_send_buffer = const_cast<void*>(send_buffer);
878 MPI_Request mpi_request = MPI_REQUEST_NULL;
879 int sbuf_size = _checkSize(send_buffer_size);
880 int ret = 0;
881 m_mpi_prof->iSend(v_send_buffer, sbuf_size, data_type, dest_rank, mpi_tag, m_communicator, &mpi_request);
882 if (m_is_trace)
883 info() << " ISend ret=" << ret << " proc=" << dest_rank << " tag=" << mpi_tag << " request=" << mpi_request;
884 ARCCORE_ADD_REQUEST(mpi_request);
885 return buildRequest(ret,mpi_request);
886}
887
888/*---------------------------------------------------------------------------*/
889/*---------------------------------------------------------------------------*/
890
891Request MpiAdapter::
892directSend(const void* send_buffer,Int64 send_buffer_size,
893 Int32 proc,Int64 elem_size,MPI_Datatype data_type,
894 int mpi_tag,bool is_blocked
895 )
896{
897 void* v_send_buffer = const_cast<void*>(send_buffer);
898 MPI_Request mpi_request = MPI_REQUEST_NULL;
899
900 double begin_time = 0.0;
901 double end_time = 0.0;
902 Int64 send_size = send_buffer_size * elem_size;
903 int ret = 0;
904 if (m_is_trace)
905 info() << "MPI_TRACE: MPI Send: send before"
906 << " size=" << send_size
907 << " dest=" << proc
908 << " tag=" << mpi_tag
909 << " datatype=" << data_type
910 << " blocking " << is_blocked;
911 if (is_blocked){
912 // si m_mpi_lock n'est pas nul, il faut
913 // utiliser un MPI_ISend suivi d'une boucle
914 // active de MPI_Test pour eviter tout probleme
915 // de dead lock.
916 if (m_mpi_lock){
917 {
918 MpiLock::Section mls(m_mpi_lock);
919 begin_time = MPI_Wtime();
920 int sbuf_size = _checkSize(send_buffer_size);
921 m_mpi_prof->iSend(v_send_buffer, sbuf_size, data_type, proc, mpi_tag, m_communicator, &mpi_request);
922 }
923 int is_finished = 0;
924 MPI_Status mpi_status;
925 while (is_finished==0){
926 MpiLock::Section mls(m_mpi_lock);
927 MPI_Request_get_status(mpi_request,&is_finished,&mpi_status);
928 if (is_finished!=0){
929 m_mpi_prof->wait(&mpi_request, (MPI_Status *) MPI_STATUS_IGNORE);
930 end_time = MPI_Wtime();
931 mpi_request = MPI_REQUEST_NULL;
932 }
933 }
934 }
935 else{
936 MpiLock::Section mls(m_mpi_lock);
937 begin_time = MPI_Wtime();
938 int sbuf_size = _checkSize(send_buffer_size);
939 m_mpi_prof->send(v_send_buffer, sbuf_size, data_type, proc, mpi_tag, m_communicator);
940 end_time = MPI_Wtime();
941 }
942 }
943 else{
944 {
945 MpiLock::Section mls(m_mpi_lock);
946 begin_time = MPI_Wtime();
947 int sbuf_size = _checkSize(send_buffer_size);
948 m_mpi_prof->iSend(v_send_buffer, sbuf_size, data_type, proc, mpi_tag, m_communicator, &mpi_request);
949 if (m_is_trace)
950 info() << " ISend ret=" << ret << " proc=" << proc << " tag=" << mpi_tag << " request=" << mpi_request;
951 end_time = MPI_Wtime();
952 ARCCORE_ADD_REQUEST(mpi_request);
953 }
954 if (m_is_trace){
955 info() << "MPI Send: send after"
956 << " request=" << mpi_request;
957 }
958 }
959 double sr_time = (end_time-begin_time);
960
961 debug(Trace::High) << "MPI Send: send " << send_size
962 << " time " << sr_time << " blocking " << is_blocked;
963 // TODO(FL): regarder comment faire pour profiler le Isend
964 m_stat->add(MpiInfo(eMpiName::Send).name(),end_time-begin_time,send_size);
965 return buildRequest(ret,mpi_request);
966}
967
968/*---------------------------------------------------------------------------*/
969/*---------------------------------------------------------------------------*/
970
971Request MpiAdapter::
972directSendPack(const void* send_buffer,Int64 send_buffer_size,
973 Int32 proc,int mpi_tag,bool is_blocked)
974{
975 return directSend(send_buffer,send_buffer_size,proc,1,MPI_PACKED,mpi_tag,is_blocked);
976}
977
978/*---------------------------------------------------------------------------*/
979/*---------------------------------------------------------------------------*/
980
981MpiMessagePassingMng* MpiAdapter::
982commSplit(bool keep)
983{
984 MPI_Comm new_comm;
985
986 MPI_Comm_split(m_communicator, (keep) ? 1 : MPI_UNDEFINED, commRank(), &new_comm);
987 if (keep) {
988 // Failed if new_comm is MPI_COMM_NULL
989 return StandaloneMpiMessagePassingMng::create(new_comm, true);
990 }
991 return nullptr;
992}
993
994/*---------------------------------------------------------------------------*/
995/*---------------------------------------------------------------------------*/
996
997Request MpiAdapter::
998receiveNonBlockingNoStat(void* recv_buffer,Int64 recv_buffer_size,
999 Int32 source_rank,MPI_Datatype data_type,int mpi_tag)
1000{
1001 int rbuf_size = _checkSize(recv_buffer_size);
1002 int ret = 0;
1003 MPI_Request mpi_request = MPI_REQUEST_NULL;
1004 m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, source_rank, mpi_tag, m_communicator, &mpi_request);
1005 ARCCORE_ADD_REQUEST(mpi_request);
1006 return buildRequest(ret,mpi_request);
1007}
1008
1009/*---------------------------------------------------------------------------*/
1010/*---------------------------------------------------------------------------*/
1011
1012Request MpiAdapter::
1013directRecv(void* recv_buffer,Int64 recv_buffer_size,
1014 Int32 proc,Int64 elem_size,MPI_Datatype data_type,
1015 int mpi_tag,bool is_blocked)
1016{
1017 MPI_Status mpi_status;
1018 MPI_Request mpi_request = MPI_REQUEST_NULL;
1019 int ret = 0;
1020 double begin_time = 0.0;
1021 double end_time = 0.0;
1022
1023 int i_proc = 0;
1024 if (proc==A_PROC_NULL_RANK)
1025 ARCCORE_THROW(NotImplementedException,"Receive with MPI_PROC_NULL");
1026 if (proc == A_NULL_RANK && !m_is_allow_null_rank_for_any_source)
1027 ARCCORE_FATAL("Can not use A_NULL_RANK for any source. Use A_ANY_SOURCE_RANK instead");
1028 if (proc==A_NULL_RANK || proc==A_ANY_SOURCE_RANK)
1029 i_proc = MPI_ANY_SOURCE;
1030 else
1031 i_proc = static_cast<int>(proc);
1032
1033 Int64 recv_size = recv_buffer_size * elem_size;
1034 if (m_is_trace){
1035 info() << "MPI_TRACE: MPI Recv: recv before "
1036 << " size=" << recv_size
1037 << " from=" << i_proc
1038 << " tag=" << mpi_tag
1039 << " datatype=" << data_type
1040 << " blocking=" << is_blocked;
1041 }
1042 if (is_blocked){
1043 // si m_mpi_lock n'est pas nul, il faut
1044 // utiliser un MPI_IRecv suivi d'une boucle
1045 // active de MPI_Test pour eviter tout probleme
1046 // de dead lock.
1047 if (m_mpi_lock){
1048 {
1049 MpiLock::Section mls(m_mpi_lock);
1050 begin_time = MPI_Wtime();
1051 int rbuf_size = _checkSize(recv_buffer_size);
1052 m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_request);
1053 }
1054 int is_finished = 0;
1055 MPI_Status mpi_status;
1056 while (is_finished==0){
1057 MpiLock::Section mls(m_mpi_lock);
1058 MPI_Request_get_status(mpi_request,&is_finished,&mpi_status);
1059 if (is_finished!=0){
1060 end_time = MPI_Wtime();
1061 m_mpi_prof->wait(&mpi_request, (MPI_Status *) MPI_STATUS_IGNORE);
1062 mpi_request = MPI_REQUEST_NULL;
1063 }
1064 }
1065 }
1066 else{
1067 MpiLock::Section mls(m_mpi_lock);
1068 begin_time = MPI_Wtime();
1069 int rbuf_size = _checkSize(recv_buffer_size);
1070 m_mpi_prof->recv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_status);
1071 end_time = MPI_Wtime();
1072 }
1073 }
1074 else{
1075 {
1076 MpiLock::Section mls(m_mpi_lock);
1077 begin_time = MPI_Wtime();
1078 int rbuf_size = _checkSize(recv_buffer_size);
1079 m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_request);
1080 end_time = MPI_Wtime();
1081 ARCCORE_ADD_REQUEST(mpi_request);
1082 }
1083 if (m_is_trace){
1084 info() << "MPI Recv: recv after "
1085 << " request=" << mpi_request;
1086 }
1087 }
1088 double sr_time = (end_time-begin_time);
1089
1090 debug(Trace::High) << "MPI Recv: recv after " << recv_size
1091 << " time " << sr_time << " blocking " << is_blocked;
1092 m_stat->add(MpiInfo(eMpiName::Recv).name(),end_time-begin_time,recv_size);
1093 return buildRequest(ret,mpi_request);
1094}
1095
1096/*---------------------------------------------------------------------------*/
1097/*---------------------------------------------------------------------------*/
1098
1099void MpiAdapter::
1100probeRecvPack(UniqueArray<Byte>& recv_buffer,Int32 proc)
1101{
1102 double begin_time = MPI_Wtime();
1103 MPI_Status status;
1104 int recv_buffer_size = 0;
1105 _trace("MPI_Probe");
1106 m_mpi_prof->probe(proc, 101, m_communicator, &status);
1107 m_mpi_prof->getCount(&status, MPI_PACKED, &recv_buffer_size);
1108
1109 recv_buffer.resize(recv_buffer_size);
1110 m_mpi_prof->recv(recv_buffer.data(), recv_buffer_size, MPI_PACKED, proc, 101, m_communicator, &status);
1111
1112 double end_time = MPI_Wtime();
1113 Int64 recv_size = recv_buffer_size;
1114 double sr_time = (end_time-begin_time);
1115 debug(Trace::High) << "MPI probeRecvPack " << recv_size
1116 << " time " << sr_time;
1117 m_stat->add(MpiInfo(eMpiName::Recv).name(),end_time-begin_time,recv_size);
1118}
1119
1120/*---------------------------------------------------------------------------*/
1121/*---------------------------------------------------------------------------*/
1122
1123MessageSourceInfo MpiAdapter::
1124_buildSourceInfoFromStatus(const MPI_Status& mpi_status)
1125{
1126 // Récupère la taille en octet du message.
1127 MPI_Count message_size = 0;
1128 MPI_Get_elements_x(&mpi_status,MPI_BYTE,&message_size);
1129 MessageTag tag(mpi_status.MPI_TAG);
1130 MessageRank rank(mpi_status.MPI_SOURCE);
1131 return MessageSourceInfo(rank,tag,message_size);
1132}
1133
1134/*---------------------------------------------------------------------------*/
1135/*---------------------------------------------------------------------------*/
1136
1137MessageId MpiAdapter::
1138_probeMessage(MessageRank source,MessageTag tag,bool is_blocking)
1139{
1140 MPI_Status mpi_status;
1141 int has_message = 0;
1142 MPI_Message message;
1143 int ret = 0;
1144 int mpi_source = source.value();
1145 if (source.isProcNull())
1146 ARCCORE_THROW(NotImplementedException,"Probe with MPI_PROC_NULL");
1147 if (source.isNull() && !m_is_allow_null_rank_for_any_source)
1148 ARCCORE_FATAL("Can not use MPI_Mprobe with null rank. Use MessageRank::anySourceRank() instead");
1149 if (source.isNull() || source.isAnySource())
1150 mpi_source = MPI_ANY_SOURCE;
1151 int mpi_tag = tag.value();
1152 if (tag.isNull())
1153 mpi_tag = MPI_ANY_TAG;
1154 if (is_blocking){
1155 ret = MPI_Mprobe(mpi_source,mpi_tag,m_communicator,&message,&mpi_status);
1156 has_message = true;
1157 }
1158 else {
1159 ret = MPI_Improbe(mpi_source, mpi_tag, m_communicator, &has_message, &message, &mpi_status);
1160 }
1161 if (ret!=0)
1162 ARCCORE_FATAL("Error during call to MPI_Mprobe r={0}",ret);
1163 MessageId ret_message;
1164 if (has_message!=0){
1165 MessageSourceInfo si(_buildSourceInfoFromStatus(mpi_status));
1166 ret_message = MessageId(si,message);
1167 }
1168 return ret_message;
1169}
1170
1171/*---------------------------------------------------------------------------*/
1172/*---------------------------------------------------------------------------*/
1173
1174MessageId MpiAdapter::
1175probeMessage(PointToPointMessageInfo message)
1176{
1177 if (!message.isValid())
1178 return MessageId();
1179
1180 // Il faut avoir initialisé le message avec un couple (rang/tag).
1181 if (!message.isRankTag())
1182 ARCCORE_FATAL("Invalid message_info: message.isRankTag() is false");
1183
1184 return _probeMessage(message.destinationRank(),message.tag(),message.isBlocking());
1185}
1186
1187/*---------------------------------------------------------------------------*/
1188/*---------------------------------------------------------------------------*/
1189
1190MessageSourceInfo MpiAdapter::
1191_legacyProbeMessage(MessageRank source,MessageTag tag,bool is_blocking)
1192{
1193 MPI_Status mpi_status;
1194 int has_message = 0;
1195 int ret = 0;
1196 int mpi_source = source.value();
1197 if (source.isProcNull())
1198 ARCCORE_THROW(NotImplementedException,"Probe with MPI_PROC_NULL");
1199 if (source.isNull() && !m_is_allow_null_rank_for_any_source)
1200 ARCCORE_FATAL("Can not use MPI_Probe with null rank. Use MessageRank::anySourceRank() instead");
1201 if (source.isNull() || source.isAnySource())
1202 mpi_source = MPI_ANY_SOURCE;
1203 int mpi_tag = tag.value();
1204 if (tag.isNull())
1205 mpi_tag = MPI_ANY_TAG;
1206 if (is_blocking){
1207 ret = MPI_Probe(mpi_source,mpi_tag,m_communicator,&mpi_status);
1208 has_message = true;
1209 }
1210 else
1211 ret = MPI_Iprobe(mpi_source,mpi_tag,m_communicator,&has_message,&mpi_status);
1212 if (ret!=0)
1213 ARCCORE_FATAL("Error during call to MPI_Mprobe r={0}",ret);
1214 if (has_message!=0)
1215 return _buildSourceInfoFromStatus(mpi_status);
1216 return {};
1217}
1218
1219/*---------------------------------------------------------------------------*/
1220/*---------------------------------------------------------------------------*/
1221
1222MessageSourceInfo MpiAdapter::
1223legacyProbeMessage(PointToPointMessageInfo message)
1224{
1225 if (!message.isValid())
1226 return {};
1227
1228 // Il faut avoir initialisé le message avec un couple (rang/tag).
1229 if (!message.isRankTag())
1230 ARCCORE_FATAL("Invalid message_info: message.isRankTag() is false");
1231
1232 return _legacyProbeMessage(message.destinationRank(),message.tag(),message.isBlocking());
1233}
1234
1235/*---------------------------------------------------------------------------*/
1236/*---------------------------------------------------------------------------*/
1237//! Réception via MPI_Mrecv() ou MPI_Imrecv()
1238Request MpiAdapter::
1239directRecv(void* recv_buffer,Int64 recv_buffer_size,
1240 MessageId message,Int64 elem_size,MPI_Datatype data_type,
1241 bool is_blocked)
1242{
1243 MPI_Status mpi_status;
1244 MPI_Request mpi_request = MPI_REQUEST_NULL;
1245 MPI_Message mpi_message = (MPI_Message)message;
1246 int ret = 0;
1247 double begin_time = 0.0;
1248 double end_time = 0.0;
1249
1250 Int64 recv_size = recv_buffer_size * elem_size;
1251 if (m_is_trace){
1252 info() << "MPI_TRACE: MPI Mrecv: recv before "
1253 << " size=" << recv_size
1254 << " from_msg=" << message
1255 << " datatype=" << data_type
1256 << " blocking=" << is_blocked;
1257 }
1258 if (is_blocked){
1259 // si m_mpi_lock n'est pas nul, il faut
1260 // utiliser un MPI_IRecv suivi d'une boucle
1261 // active de MPI_Test pour eviter tout probleme
1262 // de dead lock.
1263 if (m_mpi_lock){
1264 {
1265 MpiLock::Section mls(m_mpi_lock);
1266 begin_time = MPI_Wtime();
1267 int rbuf_size = _checkSize(recv_buffer_size);
1268 MPI_Imrecv(recv_buffer,rbuf_size,data_type,&mpi_message,&mpi_request);
1269 //m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_request);
1270 }
1271 int is_finished = 0;
1272 MPI_Status mpi_status;
1273 while (is_finished==0){
1274 MpiLock::Section mls(m_mpi_lock);
1275 MPI_Request_get_status(mpi_request,&is_finished,&mpi_status);
1276 if (is_finished!=0){
1277 end_time = MPI_Wtime();
1278 m_mpi_prof->wait(&mpi_request, (MPI_Status *) MPI_STATUS_IGNORE);
1279 mpi_request = MPI_REQUEST_NULL;
1280 }
1281 }
1282 }
1283 else{
1284 MpiLock::Section mls(m_mpi_lock);
1285 begin_time = MPI_Wtime();
1286 int rbuf_size = _checkSize(recv_buffer_size);
1287 MPI_Mrecv(recv_buffer,rbuf_size,data_type,&mpi_message,&mpi_status);
1288 //m_mpi_prof->recv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_status);
1289 end_time = MPI_Wtime();
1290 }
1291 }
1292 else{
1293 {
1294 MpiLock::Section mls(m_mpi_lock);
1295 begin_time = MPI_Wtime();
1296 int rbuf_size = _checkSize(recv_buffer_size);
1297 //m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_request);
1298 ret = MPI_Imrecv(recv_buffer,rbuf_size,data_type,&mpi_message,&mpi_request);
1299 //m_mpi_prof->iRecv(recv_buffer, rbuf_size, data_type, i_proc, mpi_tag, m_communicator, &mpi_request);
1300 end_time = MPI_Wtime();
1301 ARCCORE_ADD_REQUEST(mpi_request);
1302 }
1303 if (m_is_trace){
1304 info() << "MPI Recv: recv after "
1305 << " request=" << mpi_request;
1306 }
1307 }
1308 double sr_time = (end_time-begin_time);
1309
1310 debug(Trace::High) << "MPI Recv: recv after " << recv_size
1311 << " time " << sr_time << " blocking " << is_blocked;
1312 m_stat->add(MpiInfo(eMpiName::Recv).name(),end_time-begin_time,recv_size);
1313 return buildRequest(ret,mpi_request);
1314}
1315
1316/*---------------------------------------------------------------------------*/
1317/*---------------------------------------------------------------------------*/
1318
1319Request MpiAdapter::
1320directRecvPack(void* recv_buffer,Int64 recv_buffer_size,
1321 Int32 proc,int mpi_tag,bool is_blocking)
1322{
1323 return directRecv(recv_buffer,recv_buffer_size,proc,1,MPI_PACKED,mpi_tag,is_blocking);
1324}
1325
1326/*---------------------------------------------------------------------------*/
1327/*---------------------------------------------------------------------------*/
1328
1329// FIXME: Implement direct method with MPI_STATUS_IGNORE
1330void MpiAdapter::
1331waitAllRequests(ArrayView<Request> requests)
1332{
1333 UniqueArray<bool> indexes(requests.size());
1334 UniqueArray<MPI_Status> mpi_status(requests.size());
1335 while (_waitAllRequestsMPI(requests, indexes, mpi_status)){
1336 ; // Continue tant qu'il y a des requêtes.
1337 }
1338}
1339
1340/*---------------------------------------------------------------------------*/
1341/*---------------------------------------------------------------------------*/
1342
1343// FIXME: Implement direct method with MPI_STATUS_IGNORE
1344void MpiAdapter::
1345waitSomeRequests(ArrayView<Request> requests,
1346 ArrayView<bool> indexes,
1347 bool is_non_blocking)
1348{
1349 UniqueArray<MPI_Status> mpi_status(requests.size());
1350 waitSomeRequestsMPI(requests, indexes, mpi_status, is_non_blocking);
1351}
1352
1353/*---------------------------------------------------------------------------*/
1354/*---------------------------------------------------------------------------*/
1355
1357{
1358 SubRequestInfo(Ref<ISubRequest> sr, Integer i, int source_rank, int source_tag)
1359 : sub_request(sr)
1360 , index(i)
1361 , mpi_source_rank(source_rank)
1362 , mpi_source_tag(source_tag)
1363 {}
1364
1365 Ref<ISubRequest> sub_request;
1366 Integer index = -1;
1367 int mpi_source_rank = MPI_PROC_NULL;
1368 int mpi_source_tag = 0;
1369};
1370
1371/*---------------------------------------------------------------------------*/
1372/*---------------------------------------------------------------------------*/
1373
1374bool MpiAdapter::
1375_handleEndRequests(ArrayView<Request> requests,ArrayView<bool> done_indexes,
1376 ArrayView<MPI_Status> status)
1377{
1378 UniqueArray<SubRequestInfo> new_requests;
1379 Integer size = requests.size();
1380 {
1381 MpiLock::Section mls(m_mpi_lock);
1382 for( Integer i=0; i<size; ++i ) {
1383 if (done_indexes[i]){
1384 // Attention à bien utiliser une référence sinon le reset ne
1385 // s'applique pas à la bonne variable
1386 Request& r = requests[i];
1387 // Note: la requête peut ne pas être valide (par exemple s'il s'agit)
1388 // d'une requête bloquante mais avoir tout de même une sous-requête.
1389 if (r.hasSubRequest()){
1390 if (m_is_trace)
1391 info() << "Done request with sub-request r=" << r << " mpi_r=" << r << " i=" << i
1392 << " source_rank=" << status[i].MPI_SOURCE
1393 << " source_tag=" << status[i].MPI_TAG;
1394 new_requests.add(SubRequestInfo(r.subRequest(), i, status[i].MPI_SOURCE, status[i].MPI_TAG));
1395 }
1396 if (r.isValid()){
1397 _removeRequest((MPI_Request)(r));
1398 r.reset();
1399 }
1400 }
1401 }
1402 }
1403
1404 // NOTE: les appels aux sous-requêtes peuvent générer d'autres requêtes.
1405 // Il faut faire attention à ne pas utiliser les sous-requêtes avec
1406 // le verrou actif.
1407 bool has_new_request = false;
1408 if (!new_requests.empty()){
1409 // Contient le status de la \a ième requête
1410 UniqueArray<MPI_Status> old_status(size);
1411 {
1412 Integer index = 0;
1413 for( Integer i=0; i<size; ++i ){
1414 if (done_indexes[i]){
1415 old_status[i] = status[index];
1416 ++index;
1417 }
1418 }
1419 }
1420 // S'il y a des nouvelles requêtes, il faut décaler les valeurs de 'status'
1421 for( SubRequestInfo& sri : new_requests ){
1422 Integer index = sri.index;
1423 if (m_is_trace)
1424 info() << "Before handle new request index=" << index
1425 << " sri.source_rank=" << sri.mpi_source_rank
1426 << " sri.source_tag=" << sri.mpi_source_tag;
1427 SubRequestCompletionInfo completion_info(MessageRank(old_status[index].MPI_SOURCE), MessageTag(old_status[index].MPI_TAG));
1428 Request r = sri.sub_request->executeOnCompletion(completion_info);
1429 if (m_is_trace)
1430 info() << "Handle new request index=" << index << " old_r=" << requests[index] << " new_r=" << r;
1431 // S'il y a une nouvelle requête, alors elle remplace
1432 // l'ancienne et donc il faut faire comme si
1433 // la requête d'origine n'est pas terminée.
1434 if (r.isValid()){
1435 has_new_request = true;
1436 requests[index] = r;
1437 done_indexes[index] = false;
1438 }
1439 }
1440 {
1441 Integer index = 0;
1442 for( Integer i=0; i<size; ++i ){
1443 if (done_indexes[i]){
1444 status[index] = old_status[i];
1445 ++index;
1446 }
1447 }
1448 }
1449
1450 }
1451 return has_new_request;
1452}
1453
1454/*---------------------------------------------------------------------------*/
1455/*---------------------------------------------------------------------------*/
1456
1457bool MpiAdapter::
1458_waitAllRequestsMPI(ArrayView<Request> requests,
1459 ArrayView<bool> indexes,
1460 ArrayView<MPI_Status> mpi_status)
1461{
1462 Integer size = requests.size();
1463 if (size==0)
1464 return false;
1465 //ATTENTION: Mpi modifie en retour de MPI_Waitall ce tableau
1466 UniqueArray<MPI_Request> mpi_request(size);
1467 for( Integer i=0; i<size; ++i ){
1468 mpi_request[i] = (MPI_Request)(requests[i]);
1469 }
1470 if (m_is_trace)
1471 info() << " MPI_waitall begin size=" << size;
1472 double diff_time = 0.0;
1473 if (m_mpi_lock){
1474 double begin_time = MPI_Wtime();
1475 for( Integer i=0; i<size; ++i ){
1476 MPI_Request request = (MPI_Request)(mpi_request[i]);
1477 int is_finished = 0;
1478 while (is_finished==0){
1479 MpiLock::Section mls(m_mpi_lock);
1480 m_mpi_prof->test(&request, &is_finished, (MPI_Status *) MPI_STATUS_IGNORE);
1481 }
1482 }
1483 double end_time = MPI_Wtime();
1484 diff_time = end_time - begin_time;
1485 }
1486 else{
1487 //TODO: transformer en boucle while et MPI_Testall si m_mpi_lock est non nul
1488 MpiLock::Section mls(m_mpi_lock);
1489 double begin_time = MPI_Wtime();
1490 m_mpi_prof->waitAll(size, mpi_request.data(), mpi_status.data());
1491 double end_time = MPI_Wtime();
1492 diff_time = end_time - begin_time;
1493 }
1494
1495 // Indique que chaque requête est traitée car on a fait un waitall.
1496 for( Integer i=0; i<size; ++i ){
1497 indexes[i] = true;
1498 }
1499
1500 bool has_new_request = _handleEndRequests(requests,indexes,mpi_status);
1501 if (m_is_trace)
1502 info() << " MPI_waitall end size=" << size;
1503 m_stat->add(MpiInfo(eMpiName::Waitall).name(),diff_time,size);
1504 return has_new_request;
1505}
1506
1507/*---------------------------------------------------------------------------*/
1508/*---------------------------------------------------------------------------*/
1509
1510void MpiAdapter::
1511waitSomeRequestsMPI(ArrayView<Request> requests,ArrayView<bool> indexes,
1512 ArrayView<MPI_Status> mpi_status,bool is_non_blocking)
1513{
1514 Integer size = requests.size();
1515 if (size==0)
1516 return;
1517 //TODO: utiliser des StackArray (quand ils seront disponibles...)
1518 UniqueArray<MPI_Request> mpi_request(size);
1519 UniqueArray<MPI_Request> saved_mpi_request(size);
1520 UniqueArray<int> completed_requests(size);
1521 int nb_completed_request = 0;
1522
1523 // Sauve la requete pour la desallouer dans m_allocated_requests,
1524 // car sa valeur ne sera plus valide après appel à MPI_Wait*
1525 for (Integer i = 0; i < size; ++i) {
1526 // Dans le cas où l'on appelle cette méthode plusieurs fois
1527 // avec le même tableau de requests, il se peut qu'il y ai des
1528 // requests invalides qui feront planter l'appel à MPI.
1529 if (!requests[i].isValid()) {
1530 saved_mpi_request[i] = MPI_REQUEST_NULL;
1531 }
1532 else {
1533 saved_mpi_request[i] = static_cast<MPI_Request>(requests[i]);
1534 }
1535 }
1536
1537 // N'affiche le debug que en mode bloquant ou si explicitement demandé pour
1538 // éviter trop de messages
1539 bool is_print_debug = m_is_trace || (!is_non_blocking);
1540 if (is_print_debug)
1541 debug() << "WaitRequestBegin is_non_blocking=" << is_non_blocking << " n=" << size;
1542
1543 double begin_time = MPI_Wtime();
1544
1545 try{
1546 if (is_non_blocking){
1547 _trace(MpiInfo(eMpiName::Testsome).name().localstr());
1548 {
1549 MpiLock::Section mls(m_mpi_lock);
1550 m_mpi_prof->testSome(size, saved_mpi_request.data(), &nb_completed_request,
1551 completed_requests.data(), mpi_status.data());
1552 }
1553 //If there is no active handle in the list, it returns outcount = MPI_UNDEFINED.
1554 if (nb_completed_request == MPI_UNDEFINED) // Si aucune requete n'etait valide.
1555 nb_completed_request = 0;
1556 if (is_print_debug)
1557 debug() << "WaitSomeRequestMPI: TestSome nb_completed=" << nb_completed_request;
1558 }
1559 else{
1560 _trace(MpiInfo(eMpiName::Waitsome).name().localstr());
1561 {
1562 // TODO: si le verrou existe, il faut faire une boucle de testSome() pour ne
1563 // pas bloquer.
1564 MpiLock::Section mls(m_mpi_lock);
1565 m_mpi_prof->waitSome(size, saved_mpi_request.data(), &nb_completed_request,
1566 completed_requests.data(), mpi_status.data());
1567 }
1568 // Il ne faut pas utiliser mpi_request[i] car il est modifié par Mpi
1569 // mpi_request[i] == MPI_REQUEST_NULL
1570 if (nb_completed_request == MPI_UNDEFINED) // Si aucune requete n'etait valide.
1571 nb_completed_request = 0;
1572 if (is_print_debug)
1573 debug() << "WaitSomeRequest nb_completed=" << nb_completed_request;
1574 }
1575 }
1576 catch(TimeoutException& ex)
1577 {
1578 std::ostringstream ostr;
1579 if (is_non_blocking)
1580 ostr << MpiInfo(eMpiName::Testsome).name();
1581 else
1582 ostr << MpiInfo(eMpiName::Waitsome).name();
1583 ostr << " size=" << size
1584 << " is_non_blocking=" << is_non_blocking;
1585 ex.setAdditionalInfo(ostr.str());
1586 throw;
1587 }
1588
1589 for( int z=0; z<nb_completed_request; ++z ){
1590 int index = completed_requests[z];
1591 if (is_print_debug)
1592 debug() << "Completed my_rank=" << m_comm_rank << " z=" << z
1593 << " index=" << index
1594 << " tag=" << mpi_status[z].MPI_TAG
1595 << " source=" << mpi_status[z].MPI_SOURCE;
1596
1597 indexes[index] = true;
1598 }
1599
1600 bool has_new_request = _handleEndRequests(requests,indexes,mpi_status);
1601 if (has_new_request){
1602 // Si on a de nouvelles requêtes, alors il est possible qu'aucune
1603 // requête n'aie aboutie. En cas de testSome, cela n'est pas grave.
1604 // En cas de waitSome, cela signifie qu'il faut attendre à nouveau.
1605 }
1606 double end_time = MPI_Wtime();
1607 m_stat->add(MpiInfo(eMpiName::Waitsome).name(),end_time-begin_time,size);
1608}
1609
1610/*---------------------------------------------------------------------------*/
1611/*---------------------------------------------------------------------------*/
1612
1613void MpiAdapter::
1614freeRequest(Request& request)
1615{
1616 if (!request.isValid()){
1617 warning() << "MpiAdapter::freeRequest() null request r=" << (MPI_Request)request;
1618 _checkFatalInRequest();
1619 return;
1620 }
1621 {
1622 MpiLock::Section mls(m_mpi_lock);
1623
1624 auto mr = (MPI_Request)request;
1625 _removeRequest(mr);
1626 MPI_Request_free(&mr);
1627 }
1628 request.reset();
1629}
1630
1631/*---------------------------------------------------------------------------*/
1632/*---------------------------------------------------------------------------*/
1633
1634bool MpiAdapter::
1635testRequest(Request& request)
1636{
1637 // Il est autorisé par MPI de faire un test avec une requête nulle.
1638 if (!request.isValid())
1639 return true;
1640
1641 auto mr = (MPI_Request)request;
1642 int is_finished = 0;
1643
1644 {
1645 MpiLock::Section mls(m_mpi_lock);
1646
1647 // Il faut d'abord recuperer l'emplacement de la requete car si elle
1648 // est finie, elle sera automatiquement libérée par MPI lors du test
1649 // et du coup on ne pourra plus la supprimer
1650 RequestSet::Iterator request_iter = m_request_set->findRequest(mr);
1651
1652 m_mpi_prof->test(&mr, &is_finished, (MPI_Status *) MPI_STATUS_IGNORE);
1653 //info() << "** TEST REQUEST r=" << mr << " is_finished=" << is_finished;
1654 if (is_finished!=0){
1655 m_request_set->removeRequest(request_iter);
1656 if (request.hasSubRequest())
1657 ARCCORE_THROW(NotImplementedException,"SubRequest support");
1658 request.reset();
1659 return true;
1660 }
1661 }
1662
1663 return false;
1664}
1665
1666/*---------------------------------------------------------------------------*/
1667/*---------------------------------------------------------------------------*/
1668/*!
1669 * \warning Cette fonction doit etre appelee avec le verrou mpi_lock actif.
1670 */
1671void MpiAdapter::
1672_addRequest(MPI_Request request)
1673{
1674 m_request_set->addRequest(request);
1675}
1676
1677/*---------------------------------------------------------------------------*/
1678/*---------------------------------------------------------------------------*/
1679/*!
1680 * \warning Cette fonction doit etre appelee avec le verrou mpi_lock actif.
1681 */
1682void MpiAdapter::
1683_removeRequest(MPI_Request request)
1684{
1685 m_request_set->removeRequest(request);
1686}
1687
1688/*---------------------------------------------------------------------------*/
1689/*---------------------------------------------------------------------------*/
1690
1691void MpiAdapter::
1692enableDebugRequest(bool enable_debug_request)
1693{
1694 m_stat->enable(enable_debug_request);
1695 //if (!m_enable_debug_request)
1696 //info() << "WARNING: Mpi adpater debug request is disabled (multi-threading)";
1697}
1698
1699/*---------------------------------------------------------------------------*/
1700/*---------------------------------------------------------------------------*/
1701
1702void MpiAdapter::
1703_checkFatalInRequest()
1704{
1705 if (isRequestErrorAreFatal())
1706 ARCCORE_FATAL("Error in requests management");
1707}
1708
1709/*---------------------------------------------------------------------------*/
1710/*---------------------------------------------------------------------------*/
1711
1712void MpiAdapter::
1713setMpiProfiling(IMpiProfiling* mpi_profiling)
1714{
1715 m_mpi_prof = mpi_profiling;
1716}
1717
1718/*---------------------------------------------------------------------------*/
1719/*---------------------------------------------------------------------------*/
1720
1721IMpiProfiling* MpiAdapter::
1722getMpiProfiling() const
1723{
1724 return m_mpi_prof;
1725}
1726
1727/*---------------------------------------------------------------------------*/
1728/*---------------------------------------------------------------------------*/
1729
1730void MpiAdapter::
1731setProfiler(IProfiler* profiler)
1732{
1733 if (!profiler){
1734 m_mpi_prof = nullptr;
1735 return;
1736 }
1737
1738 IMpiProfiling* p = dynamic_cast<IMpiProfiling*>(profiler);
1739 if (!p)
1740 ARCCORE_FATAL("Invalid profiler. Profiler has to implemented interface 'IMpiProfiling'");
1741 m_mpi_prof = p;
1742}
1743
1744/*---------------------------------------------------------------------------*/
1745/*---------------------------------------------------------------------------*/
1746
1747IProfiler* MpiAdapter::
1748profiler() const
1749{
1750 return m_mpi_prof;
1751}
1752
1753/*---------------------------------------------------------------------------*/
1754/*---------------------------------------------------------------------------*/
1755
1756MpiMachineMemoryWindowBaseInternalCreator* MpiAdapter::
1757windowCreator()
1758{
1759 if (m_window_creator == nullptr) {
1760 MPI_Comm_split_type(m_communicator, MPI_COMM_TYPE_SHARED, m_comm_rank, MPI_INFO_NULL, &m_machine_communicator);
1761 MPI_Comm_rank(m_machine_communicator, &m_machine_comm_rank);
1762 MPI_Comm_size(m_machine_communicator, &m_machine_comm_size);
1763 m_window_creator = new MpiMachineMemoryWindowBaseInternalCreator(m_machine_communicator, m_machine_comm_rank, m_machine_comm_size, m_communicator, m_comm_size);
1764 }
1765 return m_window_creator;
1766}
1767
1768/*---------------------------------------------------------------------------*/
1769/*---------------------------------------------------------------------------*/
1770
1771// void* MpiAdapter::
1772// createAllInOneMachineMemoryWindowBase(Integer sizeof_local) const
1773// {
1774// //MPI_Aint offset = sizeof(MPI_Win) + sizeof(Integer);
1775// MPI_Aint offset = sizeof(MPI_Win);
1776//
1777// MPI_Win win;
1778//
1779// MPI_Info win_info;
1780// MPI_Info_create(&win_info);
1781//
1782// MPI_Info_set(win_info, "alloc_shared_noncontig", "false");
1783//
1784// const MPI_Aint new_size = offset + sizeof_local;
1785//
1786// char* my_section;
1787// int error = MPI_Win_allocate_shared(new_size, 1, win_info, m_machine_communicator, &my_section, &win);
1788//
1789// assert(error != MPI_ERR_ARG && "MPI_ERR_ARG");
1790// assert(error != MPI_ERR_COMM && "MPI_ERR_COMM");
1791// assert(error != MPI_ERR_INFO && "MPI_ERR_INFO");
1792// assert(error != MPI_ERR_OTHER && "MPI_ERR_OTHER");
1793// assert(error != MPI_ERR_SIZE && "MPI_ERR_SIZE");
1794//
1795// MPI_Info_free(&win_info);
1796//
1797// memcpy(my_section, &win, sizeof(MPI_Win));
1798// my_section += sizeof(MPI_Win);
1799//
1800// // memcpy(my_section, &sizeof_local, sizeof(Integer));
1801// // my_section += sizeof(Integer);
1802//
1803// return my_section;
1804// }
1805
1806/*---------------------------------------------------------------------------*/
1807/*---------------------------------------------------------------------------*/
1808
1809// void MpiAdapter::
1810// freeAllInOneMachineMemoryWindowBase(void* aio_node_window) const
1811// {
1812// //MPI_Aint offset = sizeof(MPI_Win) + sizeof(Int64);
1813// MPI_Aint offset = sizeof(MPI_Win);
1814//
1815// MPI_Win* win = reinterpret_cast<MPI_Win*>(static_cast<char*>(aio_node_window) - offset);
1816//
1817// MPI_Win win_local;
1818// memcpy(&win_local, win, sizeof(MPI_Win));
1819//
1820// MPI_Win_free(&win_local);
1821// }
1822
1823/*---------------------------------------------------------------------------*/
1824/*---------------------------------------------------------------------------*/
1825
1826} // End namespace Arcane::MessagePassing::Mpi
1827
1828/*---------------------------------------------------------------------------*/
1829/*---------------------------------------------------------------------------*/
bool empty() const
Capacité (nombre d'éléments alloués) du vecteur.
Vue modifiable d'un tableau d'un type T.
constexpr Integer size() const noexcept
Retourne la taille du tableau.
void add(ConstReferenceType val)
Ajoute l'élément val à la fin du tableau.
Interface du gestionnaire de traces.
Iterator findRequest(MPI_Request request)
Vérifie que la requête est dans la liste.
bool m_no_check_request
Vrai si on vérifie pas les requêtes.
static MpiMessagePassingMng * create(MPI_Comm comm, bool clean_comm=false)
Créé un gestionnaire associé au communicateur comm.
Requête d'un message.
Definition Request.h:77
Référence à une instance.
Chaîne de caractères unicode.
TraceAccessor(ITraceMng *m)
Construit un accesseur via le gestionnaire de trace m.
TraceMessage info() const
Flot pour un message d'information.
TraceMessage error() const
Flot pour un message d'erreur.
Vecteur 1D de données avec sémantique par valeur (style STL).
ARCCORE_BASE_EXPORT IStackTraceService * getStackTraceService()
Service utilisé pour obtenir la pile d'appel.
ARCCORE_BASE_EXPORT String getStackTrace()
Retourne une chaîne de caractere contenant la pile d'appel.
ARCCORE_BASE_EXPORT String getEnvironmentVariable(const String &name)
Variable d'environnement du nom name.
std::int64_t Int64
Type entier signé sur 64 bits.
Int32 Integer
Type représentant un entier.
auto makeRef(InstanceType *t) -> Ref< InstanceType >
Créé une référence sur un pointeur.
ARCCORE_BASE_EXPORT bool arccoreIsCheck()
Vrai si on est en mode vérification.
std::int32_t Int32
Type entier signé sur 32 bits.