Arcane  v3.16.0.0
Documentation développeur
Chargement...
Recherche...
Aucune correspondance
MpiParallelNonBlockingCollectiveDispatch.cc
1// -*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
2//-----------------------------------------------------------------------------
3// Copyright 2000-2025 CEA (www.cea.fr) IFPEN (www.ifpenergiesnouvelles.com)
4// See the top-level COPYRIGHT file for details.
5// SPDX-License-Identifier: Apache-2.0
6//-----------------------------------------------------------------------------
7/*---------------------------------------------------------------------------*/
8/* MpiParallelNonBlockingCollectiveDispatch.cc (C) 2000-2025 */
9/* */
10/* Implémentation MPI des collectives non bloquantes pour un type donné. */
11/*---------------------------------------------------------------------------*/
12/*---------------------------------------------------------------------------*/
13
14#include "arcane/utils/Array.h"
15#include "arcane/utils/NotImplementedException.h"
16#include "arcane/utils/Real2.h"
17#include "arcane/utils/Real3.h"
18#include "arcane/utils/Real2x2.h"
19#include "arcane/utils/Real3x3.h"
20#include "arcane/utils/HPReal.h"
21#include "arcane/utils/FatalErrorException.h"
22
23#include "arcane/core/IParallelNonBlockingCollective.h"
24#include "arcane/core/ParallelMngDispatcher.h"
25
26#include "arcane/parallel/mpi/MpiParallelNonBlockingCollectiveDispatch.h"
27#include "arcane/parallel/mpi/MpiDatatype.h"
28#include "arcane/parallel/mpi/MpiParallelDispatch.h"
29
30#include "arccore/message_passing_mpi/internal/MpiAdapter.h"
31#include "arccore/message_passing_mpi/internal/MpiLock.h"
32
33/*---------------------------------------------------------------------------*/
34/*---------------------------------------------------------------------------*/
35
36namespace Arcane
37{
38
39/*---------------------------------------------------------------------------*/
40/*---------------------------------------------------------------------------*/
41
44 MpiAdapter* adapter)
45: TraceAccessor(tm)
46, m_parallel_mng(collective_mng->parallelMng())
47, m_adapter(adapter)
48, m_datatype(nullptr)
49{
50 // Récupérer le datatype via le dispatcher MpiParallelDispatch
51 // TODO: créer un type pour contenir tous les MpiDatatype.
52 auto pmd = dynamic_cast<ParallelMngDispatcher*>(m_parallel_mng);
53 if (!pmd)
54 ARCANE_FATAL("Bad parallelMng()");
55 Type* xtype = nullptr;
56 auto dispatcher = pmd->dispatcher(xtype);
57 auto true_dispatcher = dynamic_cast<MpiParallelDispatchT<Type>*>(dispatcher);
58 if (!true_dispatcher)
59 ARCANE_FATAL("Bad dispatcher. should have type MpiParallelDispatcher");
60 m_datatype = true_dispatcher->datatype();
61}
62
63/*---------------------------------------------------------------------------*/
64/*---------------------------------------------------------------------------*/
65
66template <class Type> MpiParallelNonBlockingCollectiveDispatchT<Type>::
67~MpiParallelNonBlockingCollectiveDispatchT()
68{
69 // NOTE: m_datatype est géré par MpiParallelDispatch et ne doit pas être
70 // détruit ici.
71 finalize();
72}
73
74/*---------------------------------------------------------------------------*/
75/*---------------------------------------------------------------------------*/
76
77template <class Type> void MpiParallelNonBlockingCollectiveDispatchT<Type>::
78finalize()
79{
80}
81
82/*---------------------------------------------------------------------------*/
83/*---------------------------------------------------------------------------*/
84
85template <class Type> Parallel::Request MpiParallelNonBlockingCollectiveDispatchT<Type>::
86broadcast(ArrayView<Type> send_buf, Integer sub_domain)
87{
88 MPI_Datatype type = m_datatype->datatype();
89 return m_adapter->nonBlockingBroadcast(send_buf.data(), send_buf.size(), sub_domain, type);
90}
91
92/*---------------------------------------------------------------------------*/
93/*---------------------------------------------------------------------------*/
94
95template <class Type> Parallel::Request MpiParallelNonBlockingCollectiveDispatchT<Type>::
96allGather(ConstArrayView<Type> send_buf, ArrayView<Type> recv_buf)
97{
98 MPI_Datatype type = m_datatype->datatype();
99 return m_adapter->nonBlockingAllGather(send_buf.data(), recv_buf.data(), send_buf.size(), type);
100}
101
102/*---------------------------------------------------------------------------*/
103/*---------------------------------------------------------------------------*/
104
105template <class Type> Parallel::Request MpiParallelNonBlockingCollectiveDispatchT<Type>::
106gather(ConstArrayView<Type> send_buf, ArrayView<Type> recv_buf, Integer rank)
107{
108 MPI_Datatype type = m_datatype->datatype();
109 return m_adapter->nonBlockingGather(send_buf.data(), recv_buf.data(), send_buf.size(), rank, type);
110}
111
112/*---------------------------------------------------------------------------*/
113/*---------------------------------------------------------------------------*/
114
115template <class Type> Parallel::Request MpiParallelNonBlockingCollectiveDispatchT<Type>::
116allGatherVariable(ConstArrayView<Type> send_buf, Array<Type>& recv_buf)
117{
118 ARCANE_UNUSED(send_buf);
119 ARCANE_UNUSED(recv_buf);
120 throw NotImplementedException(A_FUNCINFO);
121#if 0
122 _gatherVariable2(send_buf,recv_buf,-1);
123#endif
124}
125
126/*---------------------------------------------------------------------------*/
127/*---------------------------------------------------------------------------*/
128
129template <class Type> Parallel::Request MpiParallelNonBlockingCollectiveDispatchT<Type>::
130gatherVariable(ConstArrayView<Type> send_buf, Array<Type>& recv_buf, Integer rank)
131{
132 ARCANE_UNUSED(send_buf);
133 ARCANE_UNUSED(recv_buf);
134 ARCANE_UNUSED(rank);
135 throw NotImplementedException(A_FUNCINFO);
136#if 0
137 _gatherVariable2(send_buf,recv_buf,rank);
138#endif
139}
140
141/*---------------------------------------------------------------------------*/
142/*---------------------------------------------------------------------------*/
143
144template <class Type> Parallel::Request MpiParallelNonBlockingCollectiveDispatchT<Type>::
145scatterVariable(ConstArrayView<Type> send_buf, ArrayView<Type> recv_buf, Integer root)
146{
147 ARCANE_UNUSED(send_buf);
148 ARCANE_UNUSED(recv_buf);
149 ARCANE_UNUSED(root);
150 throw NotImplementedException(A_FUNCINFO);
151#if 0
152 MPI_Datatype type = m_adapter->datatype(Type());
153
154 Integer comm_size = static_cast<Integer>(m_adapter->commSize());
155 UniqueArray<int> recv_counts(comm_size);
156 UniqueArray<int> recv_indexes(comm_size);
157
158 Integer nb_elem = recv_buf.size();
159 int my_buf_count = static_cast<int>(nb_elem);
160 ConstArrayView<int> count_r(1,&my_buf_count);
161
162 // Récupère le nombre d'éléments de chaque processeur
163 m_parallel_mng->allGather(count_r,recv_counts);
164
165 // Remplit le tableau des index
166 int index = 0;
167 for( Integer i=0, is=comm_size; i<is; ++i ){
168 recv_indexes[i] = index;
169 index += recv_counts[i];
170 }
171
172 m_adapter->scatterVariable(send_buf.begin(),recv_counts.begin(),recv_indexes.begin(),
173 recv_buf.begin(),nb_elem,root,type);
174#endif
175}
176
177/*---------------------------------------------------------------------------*/
178/*---------------------------------------------------------------------------*/
179
180template <class Type> Parallel::Request MpiParallelNonBlockingCollectiveDispatchT<Type>::
181allToAll(ConstArrayView<Type> send_buf, ArrayView<Type> recv_buf, Integer count)
182{
183 MPI_Datatype type = m_datatype->datatype();
184 return m_adapter->nonBlockingAllToAll(send_buf.data(), recv_buf.data(), count, type);
185}
186
187/*---------------------------------------------------------------------------*/
188/*---------------------------------------------------------------------------*/
189
190template <class Type> Parallel::Request MpiParallelNonBlockingCollectiveDispatchT<Type>::
191allToAllVariable(ConstArrayView<Type> send_buf,
192 Int32ConstArrayView send_count,
193 Int32ConstArrayView send_index,
194 ArrayView<Type> recv_buf,
195 Int32ConstArrayView recv_count,
196 Int32ConstArrayView recv_index)
197{
198 MPI_Datatype type = m_datatype->datatype();
199
200 return m_adapter->nonBlockingAllToAllVariable(send_buf.data(), send_count.data(),
201 send_index.data(), recv_buf.data(),
202 recv_count.data(),
203 recv_index.data(), type);
204}
205
206/*---------------------------------------------------------------------------*/
207/*---------------------------------------------------------------------------*/
208
209template <class Type> Parallel::Request MpiParallelNonBlockingCollectiveDispatchT<Type>::
210allReduce(eReduceType op, ConstArrayView<Type> send_buf, ArrayView<Type> recv_buf)
211{
212 MPI_Datatype type = m_datatype->datatype();
213 Integer s = send_buf.size();
214 MPI_Op operation = m_datatype->reduceOperator(op);
215
216 Request request;
217 {
218 MpiLock::Section mls(m_adapter->mpiLock());
219 request = m_adapter->nonBlockingAllReduce(send_buf.data(), recv_buf.data(),
220 s, type, operation);
221 }
222 return request;
223}
224
225/*---------------------------------------------------------------------------*/
226/*---------------------------------------------------------------------------*/
227
228template class MpiParallelNonBlockingCollectiveDispatchT<char>;
229template class MpiParallelNonBlockingCollectiveDispatchT<signed char>;
230template class MpiParallelNonBlockingCollectiveDispatchT<unsigned char>;
231template class MpiParallelNonBlockingCollectiveDispatchT<short>;
232template class MpiParallelNonBlockingCollectiveDispatchT<unsigned short>;
233template class MpiParallelNonBlockingCollectiveDispatchT<int>;
234template class MpiParallelNonBlockingCollectiveDispatchT<unsigned int>;
235template class MpiParallelNonBlockingCollectiveDispatchT<long>;
236template class MpiParallelNonBlockingCollectiveDispatchT<unsigned long>;
237template class MpiParallelNonBlockingCollectiveDispatchT<long long>;
238template class MpiParallelNonBlockingCollectiveDispatchT<unsigned long long>;
239template class MpiParallelNonBlockingCollectiveDispatchT<float>;
240template class MpiParallelNonBlockingCollectiveDispatchT<double>;
241template class MpiParallelNonBlockingCollectiveDispatchT<long double>;
242template class MpiParallelNonBlockingCollectiveDispatchT<Real2>;
243template class MpiParallelNonBlockingCollectiveDispatchT<Real3>;
244template class MpiParallelNonBlockingCollectiveDispatchT<Real2x2>;
245template class MpiParallelNonBlockingCollectiveDispatchT<Real3x3>;
246template class MpiParallelNonBlockingCollectiveDispatchT<HPReal>;
247
248/*---------------------------------------------------------------------------*/
249/*---------------------------------------------------------------------------*/
250
251} // namespace Arcane
252
253/*---------------------------------------------------------------------------*/
254/*---------------------------------------------------------------------------*/
#define ARCANE_FATAL(...)
Macro envoyant une exception FatalErrorException.
Interface des opérations parallèles collectives non bloquantes.
Interface du gestionnaire de traces.
Implémentation MPI des collectives non bloquantes pour le type Type.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Int32 Integer
Type représentant un entier.
Type
Type of JSON value.
Definition rapidjson.h:665