66 if (topo->isMasterMachine()) {
70 Int32 machineRank = topo->machineRank();
73 m_targetRank = targetRanks[machineRank];
74 m_targetSize = targetRanks.
size();
76 if ((m_targetSize != m_pm_ini->commSize())
77 && (allow_only_one_rank || m_targetSize > 1)) {
79 m_pm_sub = m_pm_ini->createSubParallelMngRef(targetRanks);
84 m_targetRank = m_pm_ini->commRank();
85 m_targetSize = m_pm_ini->commSize();
87 for (
int i = 0; i < m_targetSize; i++)
89 m_pm_sub = m_pm_ini->createSubParallelMngRef(keptRanks);
92 m_pm_ini->traceMng()->info() <<
"Running on " << m_targetSize <<
" nodes";
96 void initWithMaxRank(
Int32 targetSize)
99 m_targetSize = targetSize;
102 m_contribute =
false;
107 if (m_pm_ini->
commRank() < targetSize) {
110 Int64 my_rank = m_pm_ini->
commRank();
111 Int64 x = my_rank * m_targetSize;
115 Int32 step = m_targetSize / m_pm_ini->
commSize();
116 step = (step == 0) ? 1 : step;
117 for (
int i = 0; i < m_targetSize; ++i) {
118 keepProc[i] = i * step;
121 m_pm_sub = m_pm_ini->createSubParallelMngRef(keepProc);
123 if (m_targetSize != m_pm_ini->commSize()) {
130 Int32 size()
const {
return m_targetSize; }
132 bool contribute()
const {
return m_contribute; }
135 template <
typename DataT>
136 SharedArray<DataT> convert(ConstArrayView<DataT> in, Array<DataT>* pattern =
nullptr,
137 bool is_indirection =
false)
const
142 SharedArray<DataT> out(in);
143 if (pattern != NULL) {
147 pattern->resize(size, m_targetRank);
151 ConstArrayView<DataT> toSnd;
154 if (is_indirection) {
155 toSnd = in.subView(0, in.size() - 1);
162 Int32 commSize = m_pm_ini->commSize();
163 UniqueArray<Int32> sndCnt(nInfos * commSize, -1);
164 UniqueArray<Parallel::Request> req;
165 UniqueArray<Int32> n_wanted(nInfos);
166 n_wanted[0] = m_targetRank;
167 n_wanted[1] = toSnd.size();
169 n_wanted[2] =
static_cast<Int32>(in[in.size() - 1]);
171 m_pm_ini->allGather(n_wanted, sndCnt);
173 UniqueArray<Int32> sndNbr(commSize, 0);
174 UniqueArray<Int32> rcvNbr(commSize, 0);
175 UniqueArray<Int32> sndDsp(commSize, 0);
176 UniqueArray<Int32> rcvDsp(commSize, 0);
178 sndNbr[m_targetRank] = toSnd.size();
180 if (pattern != NULL) {
184 Int32 myRank = m_pm_ini->commRank();
186 for (
int i = 0; i < commSize; ++i) {
187 if (sndCnt[nInfos * i] == myRank) {
188 rcvNbr[i] = sndCnt[nInfos * i + 1];
193 pattern->addRange(i, rcvNbr[i]);
196 if (contribute() && is_indirection)
198 SharedArray<DataT> out(begin, -1);
200 m_pm_ini->allToAllVariable(toSnd, sndNbr, sndDsp, out, rcvNbr, rcvDsp);
202 if (contribute() && is_indirection) {
204 DataT* my_iter = out.data();
205 for (
int i = 0; i < commSize; ++i) {
206 if (sndCnt[nInfos * i] == myRank) {
207 Int32 nRecv = sndCnt[nInfos * i + 1];
208 DataT* my_end(my_iter + nRecv);
209 for (; my_iter != my_end; ++my_iter)
210 (*my_iter) += offset;
211 offset += sndCnt[nInfos * i + 2];
214 out[out.size() - 1] = offset;
221 template <
typename DataT>
222 SharedArray<DataT> convertBack(ConstArrayView<DataT> in,
Int32 nRecv)
const
227 SharedArray<DataT> out(in);
232 Int32 commSize = m_pm_ini->commSize();
233 UniqueArray<Int32> sndCnt(nInfos * commSize, -1);
234 UniqueArray<Parallel::Request> req;
235 UniqueArray<Int32> n_wanted(nInfos);
236 n_wanted[0] = m_targetRank;
239 m_pm_ini->allGather(n_wanted, sndCnt);
241 UniqueArray<Int32> sndNbr(commSize, 0);
242 UniqueArray<Int32> rcvNbr(commSize, 0);
243 UniqueArray<Int32> sndDsp(commSize, 0);
244 UniqueArray<Int32> rcvDsp(commSize, 0);
246 rcvNbr[m_targetRank] = nRecv;
248 Int32 myRank = m_pm_ini->commRank();
250 for (
int i = 0; i < commSize; ++i) {
251 if (sndCnt[nInfos * i] == myRank) {
252 sndNbr[i] = sndCnt[nInfos * i + 1];
257 SharedArray<DataT> out(nRecv, -1);
259 m_pm_ini->allToAllVariable(in, sndNbr, sndDsp, out, rcvNbr, rcvDsp);
264 IParallelMng* subParallelMng()
const
266 IParallelMng* pm = m_pm_sub.get();
274 ARCANE_DEPRECATED_REASON(
"Y2024: This method is internal to Arcane. Use subParallelMng()->communicator() instead")
275 MPI_Comm getCommunicator()
const
278 return MPI_COMM_NULL;
279 Parallel::Communicator comm = m_pm_sub->communicator();
280 return (MPI_Comm)comm;
283 ARCANE_DEPRECATED_REASON(
"Y2024: This method is internal to Arcane. Use subParallelMng() instead")
284 IParallelMng* parallelManager()
const
286 return m_pm_sub.get();
291 IParallelMng* m_pm_ini =
nullptr;
292 Ref<IParallelMng> m_pm_sub;
293 Int32 m_targetSize = -1;
294 Int32 m_targetRank = -1;
296 bool m_contribute =
false;
297 bool m_is_init =
false;