Arcane  v3.14.10.0
Documentation développeur
Chargement...
Recherche...
Aucune correspondance
Otf2MpiProfiling.cc
1// -*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
2//-----------------------------------------------------------------------------
3// Copyright 2000-2022 CEA (www.cea.fr) IFPEN (www.ifpenergiesnouvelles.com)
4// See the top-level COPYRIGHT file for details.
5// SPDX-License-Identifier: Apache-2.0
6//-----------------------------------------------------------------------------
7/*---------------------------------------------------------------------------*/
8/* Otf2MpiProfiling.cc (C) 2000-2020 */
9/* */
10/* Implementation de l'interface IMpiProfiling permettant l'instrumentation */
11/* au format OTF2 . */
12/*---------------------------------------------------------------------------*/
13
14#include "arccore/message_passing_mpi/MessagePassingMpiEnum.h"
16#include "arcane/utils/FatalErrorException.h"
17#include "arcane/std/Otf2MpiProfiling.h"
18
19/*---------------------------------------------------------------------------*/
20/*---------------------------------------------------------------------------*/
21
22namespace Arcane
23{
24using namespace Arccore::MessagePassing::Mpi;
25
26/*---------------------------------------------------------------------------*/
27/*---------------------------------------------------------------------------*/
28// Helper sur la fonction de taille de type MPI...
29namespace
30{
31using ReturnType = Otf2MpiProfiling::ReturnType;
32
35{
36 int tmp_size(0);
37 MPI_Type_size(datatype, &tmp_size);
38 return static_cast<uint64_t>(tmp_size);
39}
40
41}
42
43/*---------------------------------------------------------------------------*/
44/*---------------------------------------------------------------------------*/
46// Note : On ne passe pas le IMpiProfiling a decorer, on appel directement MPI
47// pour eviter un un autre appel inutile. Mais on peut facilement le changer le
48// cas echeant en rajoutant un IMpiProfiling dans les parametres du ctor.
54
55/*---------------------------------------------------------------------------*/
56/*---------------------------------------------------------------------------*/
57
60broadcast(void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm)
61{
62 // calcul des tailles des donnees echangees
63 const uint64_t byte_send(m_otf2_wrapper->getMpiRank()==root?getSizeOfMpiType(datatype)*count:0);
64 const uint64_t byte_recv(m_otf2_wrapper->getMpiRank()==root?0:getSizeOfMpiType(datatype)*count);
65
66 _doEventEnter(eMpiName::Bcast);
67
69
70 int r = MPI_Bcast(buffer, count, datatype, root, comm);
71
73 OTF2_COLLECTIVE_OP_BCAST, 0 /* comm region */, root,
74 byte_send /* bytes provided */, byte_recv /* bytes obtained */);
75
76 _doEventLeave(eMpiName::Bcast);
77 return _ret(r);
78}
79
80/*---------------------------------------------------------------------------*/
81/*---------------------------------------------------------------------------*/
82
85gather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,
87{
88 // calcul des tailles des donnees echangees
90 const uint64_t byte_recv(m_otf2_wrapper->getMpiRank()==root?getSizeOfMpiType(recvtype)*recvcount:0);
91
92 _doEventEnter(eMpiName::Gather);
93
95
97
99 OTF2_COLLECTIVE_OP_GATHER, 0 /* comm region */, root,
100 byte_send /* bytes provided */, byte_recv /* bytes obtained */);
101
102 _doEventLeave(eMpiName::Gather);
103 return _ret(r);
104}
105
106/*---------------------------------------------------------------------------*/
107/*---------------------------------------------------------------------------*/
108
112 const int *recvcounts, const int *displs, MPI_Datatype recvtype, int root, MPI_Comm comm)
113{
114 // calcul des tailles des donnees echangees
117
118 auto f_byte_recv = [&](){for(int i(0); i < m_otf2_wrapper->getMpiNbRank(); ++i)
120 if (m_otf2_wrapper->getMpiRank() == root)
121 f_byte_recv();
122
123 _doEventEnter(eMpiName::Gatherv);
124
126
128
130 OTF2_COLLECTIVE_OP_GATHERV, 0 /* comm region */, root,
131 byte_send /* bytes provided */, byte_recv /* bytes obtained */);
132
133 _doEventLeave(eMpiName::Gatherv);
134 return _ret(r);
135}
136
137/*---------------------------------------------------------------------------*/
138/*---------------------------------------------------------------------------*/
139
142allGather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,
144{
145 // calcul des tailles des donnees echangees
148
149 _doEventEnter(eMpiName::Allgather);
150
152
154
156 OTF2_COLLECTIVE_OP_ALLGATHER, 0 /* comm region */, OTF2_UNDEFINED_UINT32 /* root */,
157 byte_send /* bytes provided */, byte_recv /* bytes obtained */);
158
159 _doEventLeave(eMpiName::Allgather);
160 return _ret(r);
161}
162
163/*---------------------------------------------------------------------------*/
164/*---------------------------------------------------------------------------*/
165
169 const int *recvcounts, const int *displs, MPI_Datatype recvtype, MPI_Comm comm)
170{
171 // calcul des tailles des donnees echangees
174
175 for (int i(0); i < m_otf2_wrapper->getMpiNbRank(); ++i)
177
178 _doEventEnter(eMpiName::Allgatherv);
179
181
183
185 OTF2_COLLECTIVE_OP_ALLGATHERV, 0 /* comm region */, OTF2_UNDEFINED_UINT32 /* root */,
186 byte_send /* bytes provided */, byte_recv /* bytes obtained */);
187
188 _doEventLeave(eMpiName::Allgatherv);
189 return _ret(r);
190}
191
192/*---------------------------------------------------------------------------*/
193/*---------------------------------------------------------------------------*/
194
197scatterVariable(const void *sendbuf, const int *sendcounts, const int *displs,
199 int root, MPI_Comm comm)
200{
201 // calcul des tailles des donnees echangees
204
205 auto f_byte_send = [&](){for(int i(0); i < m_otf2_wrapper->getMpiNbRank(); ++i)
206 byte_send += sendcounts[i];
208 if (m_otf2_wrapper->getMpiRank() == root)
209 f_byte_send();
210
211 _doEventEnter(eMpiName::Scatterv);
212
214
216
218 OTF2_COLLECTIVE_OP_SCATTERV, 0 /* comm region */, root,
219 byte_send /* bytes provided */, byte_recv /* bytes obtained */);
220
221 _doEventLeave(eMpiName::Scatterv);
222 return _ret(r);
223}
224
225/*---------------------------------------------------------------------------*/
226/*---------------------------------------------------------------------------*/
227
230allToAll(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf,
232{
233 _doEventEnter(eMpiName::Alltoall);
234
236
238
240 OTF2_COLLECTIVE_OP_ALLTOALL, 0 /* comm region */, OTF2_UNDEFINED_UINT32 /* root */,
241 0 /* bytes provided */, 0 /* bytes obtained */);
242
243 _doEventLeave(eMpiName::Alltoall);
244 return _ret(r);
245}
246
247/*---------------------------------------------------------------------------*/
248/*---------------------------------------------------------------------------*/
249
252allToAllVariable(const void *sendbuf, const int *sendcounts, const int *sdispls,
253 MPI_Datatype sendtype, void *recvbuf, const int *recvcounts,
255{
256 _doEventEnter(eMpiName::Alltoallv);
257
259
261
263 OTF2_COLLECTIVE_OP_ALLTOALLV, 0 /* comm region */, OTF2_UNDEFINED_UINT32 /* root */,
264 0 /* bytes provided */, 0 /* bytes obtained */);
265
266 _doEventLeave(eMpiName::Alltoallv);
267 return _ret(r);
268}
269
270/*---------------------------------------------------------------------------*/
271/*---------------------------------------------------------------------------*/
272
276{
277 _doEventEnter(eMpiName::Barrier);
278
280
281 int r = MPI_Barrier(comm);
282
284 OTF2_COLLECTIVE_OP_BARRIER, 0 /* comm region */, OTF2_UNDEFINED_UINT32 /* root */,
285 0 /* bytes provided */, 0 /* bytes obtained */);
286
287 _doEventLeave(eMpiName::Barrier);
288 return _ret(r);
289}
290
291/*---------------------------------------------------------------------------*/
292/*---------------------------------------------------------------------------*/
293
296reduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype,
297 MPI_Op op, int root, MPI_Comm comm)
298{
299 _doEventEnter(eMpiName::Reduce);
300
302
303 int r = MPI_Reduce(sendbuf, recvbuf, count, datatype, op, root, comm);
304
306 OTF2_COLLECTIVE_OP_REDUCE, 0 /* comm region */, root,
307 0 /* bytes provided */, 0 /* bytes obtained */);
308
309 _doEventLeave(eMpiName::Reduce);
310 return _ret(r);
311}
312
313/*---------------------------------------------------------------------------*/
314/*---------------------------------------------------------------------------*/
315
318allReduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
319{
320 // calcul des tailles des donnees echangees
321 const uint64_t byte_send(getSizeOfMpiType(datatype) * count);
322 const uint64_t byte_recv(getSizeOfMpiType(datatype) * count);
323
324 _doEventEnter(eMpiName::Allreduce);
325
327
328 int r = MPI_Allreduce(sendbuf, recvbuf, count, datatype, op, comm);
329
331 OTF2_COLLECTIVE_OP_ALLREDUCE, 0 /* comm region */, OTF2_UNDEFINED_UINT32 /* root */,
332 byte_send /* bytes provided */, byte_recv /* bytes obtained */);
333
334 _doEventLeave(eMpiName::Allreduce);
335 return _ret(r);
336}
337
338/*---------------------------------------------------------------------------*/
339/*---------------------------------------------------------------------------*/
340
343scan(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm)
344{
345 _doEventEnter(eMpiName::Scan);
346
348
349 int r = MPI_Scan(sendbuf, recvbuf, count, datatype, op, comm);
350
351 int type_size;
352 MPI_Type_size(datatype, &type_size);
353 const uint64_t size = static_cast<uint64_t>(type_size) * static_cast<uint64_t>(count);
355 OTF2_COLLECTIVE_OP_SCAN, 0 /* comm region */, OTF2_UNDEFINED_UINT32 /* root */,
356 size /* bytes provided */, size /* bytes obtained */);
357
358 _doEventLeave(eMpiName::Scan);
359 return _ret(r);
360}
361
362/*---------------------------------------------------------------------------*/
363/*---------------------------------------------------------------------------*/
364
367sendRecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest,
369 int source, int recvtag, MPI_Comm comm, MPI_Status *status)
370{
371 _doEventEnter(eMpiName::Sendrecv);
372
374 source, recvtag, comm, status);
375
376 _doEventLeave(eMpiName::Sendrecv);
377 return _ret(r);
378}
379
380/*---------------------------------------------------------------------------*/
381/*---------------------------------------------------------------------------*/
382
385iSend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag,
386 MPI_Comm comm, MPI_Request *request)
387{
388 _doEventEnter(eMpiName::Isend);
389
390 int r = MPI_Isend(buf, count, datatype, dest, tag, comm, request);
391 // Comme OTF2 a besoin d'un entier, utilise la valeur fortran de la requête qui
392 // est garantie être un entier (contrairement à MPI_Request)
395 dest, 0 /* comm region */, tag, getSizeOfMpiType(datatype) * count, request_id);
396
397 _doEventLeave(eMpiName::Isend);
398 return _ret(r);
399}
400
401/*---------------------------------------------------------------------------*/
402/*---------------------------------------------------------------------------*/
403
406send(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm)
407{
408 _doEventEnter(eMpiName::Send);
409
410 int r = MPI_Send(buf, count, datatype, dest, tag, comm);
411
413 dest, 0 /* comm region */, tag, getSizeOfMpiType(datatype) * count);
414
415 _doEventLeave(eMpiName::Send);
416 return _ret(r);
417}
418
419/*---------------------------------------------------------------------------*/
420/*---------------------------------------------------------------------------*/
421
424iRecv(void *buf, int count, MPI_Datatype datatype, int source, int tag,
425 MPI_Comm comm, MPI_Request *request)
426{
427 _doEventEnter(eMpiName::Irecv);
428
429 int r = MPI_Irecv(buf, count, datatype, source, tag, comm, request);
430
431 // TODO: arriver a faire fonctionner ce truc...
432// OTF2_EvtWriter_MpiIrecv(m_otf2_wrapper->getEventWriter(), NULL, Otf2LibWrapper::getTime(),
433// source, 0 /* comm region */, tag, getSizeOfMpiType(datatype) * count, *request);
434 _doEventLeave(eMpiName::Irecv);
435 return _ret(r);
436}
437
438/*---------------------------------------------------------------------------*/
439/*---------------------------------------------------------------------------*/
440
443recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status)
444{
445 _doEventEnter(eMpiName::Recv);
446 int r = MPI_Recv(buf, count, datatype, source, tag, comm, status);
447
449 source, 0 /* comm region */, tag, getSizeOfMpiType(datatype) * count);
450
451 _doEventLeave(eMpiName::Recv);
452 return _ret(r);
453}
454
455/*---------------------------------------------------------------------------*/
456/*---------------------------------------------------------------------------*/
457
460test(MPI_Request *request, int *flag, MPI_Status *status)
461{
462 _doEventEnter(eMpiName::Test);
463 if (!MPI_Test(request, flag, status)){
466 }
467 _doEventLeave(eMpiName::Test);
468 return _ret(0);
469}
470
471/*---------------------------------------------------------------------------*/
472/*---------------------------------------------------------------------------*/
473
476probe(int source, int tag, MPI_Comm comm, MPI_Status *status)
477{
478 _doEventEnter(eMpiName::Probe);
479 int r = MPI_Probe(source, tag, comm, status);
480 _doEventLeave(eMpiName::Probe);
481 return _ret(r);
482}
483
484/*---------------------------------------------------------------------------*/
485/*---------------------------------------------------------------------------*/
486
489getCount(const MPI_Status *status, MPI_Datatype datatype, int *count)
490{
491 _doEventEnter(eMpiName::Get_count);
492 int r = MPI_Get_count(status, datatype, count);
493 _doEventLeave(eMpiName::Get_count);
494 return _ret(r);
495}
496
497/*---------------------------------------------------------------------------*/
498/*---------------------------------------------------------------------------*/
499
502wait(MPI_Request *request, MPI_Status *status)
503{
504 _doEventEnter(eMpiName::Wait);
505 int r = MPI_Wait(request, status);
506 _doEventLeave(eMpiName::Wait);
507 return _ret(r);
508}
509
510/*---------------------------------------------------------------------------*/
511/*---------------------------------------------------------------------------*/
512
516{
517 _doEventEnter(eMpiName::Waitall);
519 _doEventLeave(eMpiName::Waitall);
520 return _ret(r);
521}
522
523/*---------------------------------------------------------------------------*/
524/*---------------------------------------------------------------------------*/
525
530{
531 _doEventEnter(eMpiName::Testsome);
533 _doEventLeave(eMpiName::Testsome);
534 return _ret(r);
535}
536
537/*---------------------------------------------------------------------------*/
538/*---------------------------------------------------------------------------*/
539
544{
545 _doEventEnter(eMpiName::Waitsome);
547 _doEventLeave(eMpiName::Waitsome);
548 return _ret(r);
549}
550
551/*---------------------------------------------------------------------------*/
552/*---------------------------------------------------------------------------*/
553
554void Otf2MpiProfiling::
555_doEventEnter(eMpiName event_name)
556{
558 static_cast<uint32_t>(event_name));
559}
560
561/*---------------------------------------------------------------------------*/
562/*---------------------------------------------------------------------------*/
563
564void Otf2MpiProfiling::
565_doEventLeave(eMpiName event_name)
566{
568 static_cast<uint32_t>(event_name));
569
570}
571
572/*---------------------------------------------------------------------------*/
573/*---------------------------------------------------------------------------*/
574
575} // namespace Arcane
576
577/*---------------------------------------------------------------------------*/
578/*---------------------------------------------------------------------------*/
Fichier de configuration d'Arcane.
Lecteur des fichiers de maillage via la bibliothèque LIMA.
Definition Lima.cc:120
Classe d'encapsulation des fonctions de la librairie OTF2.
OTF2_EvtWriter * getEventWriter()
int getMpiNbRank() const
Helper sur le nombre de rank MPI.
static OTF2_TimeStamp getTime()
Méthode interne statique pour recuperer le timestamp.
int getMpiRank() const
Helper sur le numero de rank MPI.
ReturnType allGatherVariable(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int *recvcounts, const int *displs, MPI_Datatype recvtype, MPI_Comm comm) final
MPI_Allgatherv.
ReturnType allGather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) final
MPI_Allgather.
ReturnType waitAll(int count, MPI_Request array_of_requests[], MPI_Status array_of_statuses[]) final
MPI_Waitall.
ReturnType testSome(int incount, MPI_Request array_of_requests[], int *outcount, int array_of_indices[], MPI_Status array_of_statuses[]) final
MPI_Testsome.
ReturnType probe(int source, int tag, MPI_Comm comm, MPI_Status *status) final
MPI_Probe.
Otf2MpiProfiling(Otf2LibWrapper *otf2_wrapper)
Constructeur.
ReturnType barrier(MPI_Comm comm) final
MPI_Barrier.
ReturnType sendRecv(const void *sendbuf, int sendcount, MPI_Datatype sendtype, int dest, int sendtag, void *recvbuf, int recvcount, MPI_Datatype recvtype, int source, int recvtag, MPI_Comm comm, MPI_Status *status) final
MPI_Sendrecv.
ReturnType recv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Status *status) final
MPI_recv.
ReturnType reduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, int root, MPI_Comm comm) final
MPI_Reduce.
ReturnType scatterVariable(const void *sendbuf, const int *sendcounts, const int *displs, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) final
MPI_Scatterv.
ReturnType allToAllVariable(const void *sendbuf, const int *sendcounts, const int *sdispls, MPI_Datatype sendtype, void *recvbuf, const int *recvcounts, const int *rdispls, MPI_Datatype recvtype, MPI_Comm comm) final
MPI_Alltoallv.
ReturnType allToAll(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, MPI_Comm comm) final
MPI_Alltoall.
ReturnType gatherVariable(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, const int *recvcounts, const int *displs, MPI_Datatype recvtype, int root, MPI_Comm comm) final
MPI_Gatherv.
ReturnType scan(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) final
MPI_Scan.
ReturnType waitSome(int incount, MPI_Request array_of_requests[], int *outcount, int array_of_indices[], MPI_Status array_of_statuses[]) final
MPI_Waitsome.
ReturnType getCount(const MPI_Status *status, MPI_Datatype datatype, int *count) final
MPI_Get_count.
ReturnType broadcast(void *buffer, int count, MPI_Datatype datatype, int root, MPI_Comm comm) final
MPI_Bcast.
ReturnType gather(const void *sendbuf, int sendcount, MPI_Datatype sendtype, void *recvbuf, int recvcount, MPI_Datatype recvtype, int root, MPI_Comm comm) final
MPI_Gather.
ReturnType wait(MPI_Request *request, MPI_Status *status) final
MPI_Wait.
ReturnType test(MPI_Request *request, int *flag, MPI_Status *status) final
MPI_Test.
ReturnType send(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm) final
MPI_Send.
ReturnType allReduce(const void *sendbuf, void *recvbuf, int count, MPI_Datatype datatype, MPI_Op op, MPI_Comm comm) final
MPI_Allreduce.
ReturnType iSend(const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request) final
MPI_Isend.
ReturnType iRecv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Request *request) final
MPI_Irecv.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-