14#include "arcane/aleph/AlephArcane.h"
15#include "arcane/utils/StringList.h"
26AlephKernelSolverInitializeArguments::
27AlephKernelSolverInitializeArguments()
40 Integer alephUnderlyingSolver,
41 Integer alephNumberOfCores,
44, m_isParallel(
wpm->isParallel())
45, m_rank(
wpm->commRank())
47, m_world_size(
wpm->commSize())
48, m_there_are_idles(
true)
49, m_i_am_an_other(
true)
51, m_world_parallel(
wpm)
53, m_aleph_vector_idx(0)
55, m_underlying_solver((alephUnderlyingSolver == 0 ? 1 : alephUnderlyingSolver))
58, m_solver_size(alephNumberOfCores)
60, m_has_been_initialized(
false)
80 Integer alephUnderlyingSolver,
81 Integer alephNumberOfCores,
85, m_isParallel(
sd->parallelMng()->isParallel())
86, m_rank(
sd->parallelMng()->commRank())
87, m_size(
sd->parallelMng()->commSize())
88, m_world_size(
sd->parallelMng()->worldParallelMng()->commSize())
89, m_there_are_idles(m_size != m_world_size)
90, m_i_am_an_other(
sd->parallelMng()->worldParallelMng()->commRank() > m_size)
91, m_parallel(
sd->parallelMng())
92, m_world_parallel(
sd->parallelMng()->worldParallelMng())
94, m_aleph_vector_idx(0)
96, m_underlying_solver((alephUnderlyingSolver == 0 ? 1 : alephUnderlyingSolver))
99, m_solver_size(alephNumberOfCores)
101, m_has_been_initialized(
false)
116 Integer alephUnderlyingSolver,
117 Integer alephNumberOfCores)
120, m_isParallel(
sd->parallelMng()->isParallel())
121, m_rank(
sd->parallelMng()->commRank())
122, m_size(
sd->parallelMng()->commSize())
123, m_world_size(
sd->parallelMng()->worldParallelMng()->commSize())
124, m_there_are_idles(m_size != m_world_size)
125, m_i_am_an_other(
sd->parallelMng()->worldParallelMng()->commRank() > m_size)
126, m_parallel(
sd->parallelMng())
127, m_world_parallel(
sd->parallelMng()->worldParallelMng())
132, m_aleph_vector_idx(0)
133, m_underlying_solver(alephUnderlyingSolver == 0 ? 1 : alephUnderlyingSolver)
136, m_solver_size(alephNumberOfCores)
138, m_has_been_initialized(
false)
140 debug() <<
"\33[1;31m[AlephKernel] New kernel with indexing+init options!\33[0m";
154 debug() <<
"\33[1;31m[AlephKernel] thisParallelMng's size=" << m_size <<
"\33[0m";
155 debug() <<
"\33[1;31m[AlephKernel] worldParallelMng's size=" << m_world_size <<
"\33[0m";
158 debug() <<
"\33[1;31m[AlephKernel] I am an additional site #" << m_rank <<
" among " << m_world_size <<
"\33[0m";
160 debug() <<
"\33[1;31m[AlephKernel] Aleph underlying solver has been set to "
161 << m_underlying_solver <<
"\33[0m";
163 if (m_solver_size == 0) {
164 m_solver_size = m_world_size;
165 debug() <<
"\33[1;31m[AlephKernel] Aleph Number of Cores"
166 <<
" now matches world's number of processors: "
167 << m_solver_size <<
"\33[0m";
169 if (m_solver_size > m_size) {
170 m_solver_size = m_size;
171 debug() <<
"\33[1;31m[AlephKernel] Aleph Number of Cores"
172 <<
" exceeds in size, reverting to " << m_size <<
"\33[0m";
174 if ((m_size % m_solver_size) != 0)
176 debug() <<
"\33[1;31m[AlephKernel] Each solver takes "
177 << m_solver_size <<
" site(s)"
180 if (m_there_are_idles && !m_i_am_an_other) {
182 cfg.add(m_underlying_solver);
183 cfg.add(m_solver_size);
184 cfg.add((m_reorder ==
true) ? 1 : 0);
186 debug() <<
"\33[1;31m[AlephKernel] Sending to others configuration: " <<
cfg <<
"\33[0m";
187 m_world_parallel->broadcast(
cfg.view(), 0);
197 info(4) <<
"Destroying ~AlephKernel";
205 m_results_queue.clear();
209 m_matrix_queue.clear();
213 delete aq->m_x_vector;
214 delete aq->m_b_vector;
215 delete aq->m_tmp_vector;
218 m_arguments_queue.clear();
224 info(4) <<
"Destroying ~AlephKernel] done";
231initialize(Integer global_nb_row,
232 Integer local_nb_row)
234 ItacFunction(AlephKernel);
236 if (m_there_are_idles && !m_i_am_an_other) {
237 m_world_parallel->broadcast(UniqueArray<unsigned long>(1, 0xd80dee82l).view(), 0);
238 UniqueArray<Integer> args(0);
239 args.add(global_nb_row);
240 args.add(local_nb_row);
241 m_world_parallel->broadcast(args.view(), 0);
243 debug() <<
"\33[1;31m[initialize] Geometry set to " << global_nb_row
244 <<
" lines, I see " << local_nb_row <<
" of them"
246 m_topology =
new AlephTopology(
traceMng(),
this, global_nb_row, local_nb_row);
247 m_ordering =
new AlephOrdering(
this, global_nb_row, local_nb_row, m_reorder);
248 debug() <<
"\33[1;31m[initialize] done"
250 m_has_been_initialized =
true;
257break_and_return(
void)
259 if (m_there_are_idles && !m_i_am_an_other)
260 m_world_parallel->broadcast(UniqueArray<unsigned long>(1, 0x4b97b15dl).view(), 0);
274 debug() <<
"\33[1;31m[mapranks] mapranks starting @ "
275 << m_solver_index * m_size
276 <<
", m_size=" << m_size
277 <<
", m_solver_size=" << m_solver_size
278 <<
", m_world_size=" << m_world_size <<
"\33[0m";
280 for (
int rnk = m_solver_index * m_size;
rnk < (m_solver_index + 1) * m_size;
rnk += 1) {
281 const int map = (
rnk / (m_size / m_solver_size)) % m_world_size;
282 debug() <<
"\33[1;31m[mapranks] map=" <<
map <<
"\33[0m";
284 debug() <<
"\33[1;31m[mapranks] mapped solver #" << m_solver_index
285 <<
", core " <<
rnk % m_size <<
" --> site " <<
map <<
"\33[0m";
304Ref<IParallelMng> AlephKernel::
305_createUnderlyingParallelMng(Integer nb_wanted_sites)
307 info(4) <<
"[createUnderlyingParallelMng] nb_wanted_sites=" << nb_wanted_sites;
308 UniqueArray<Integer> kept_ranks;
309 for (
Int32 rank = 0; rank < m_world_size; ++rank) {
310 if (hitranks(rank, m_solver_ranks[m_solver_index]))
311 kept_ranks.add(rank);
313 info(4) <<
"[createUnderlyingParallelMng] Now createSubParallelMng of size=" << kept_ranks.size();
315 info(4) <<
"[createUnderlyingParallelMng] done: upm=" << upm.get();
323AlephMatrix* AlephKernel::
324createSolverMatrix(
void)
326 ItacFunction(AlephService);
328 if (isInitialized() ==
false) {
329 debug() <<
"\33[1;31m[createSolverMatrix] has_NOT_been_initialized!\33[0m"
331 return new AlephMatrix(
this);
334 if (m_there_are_idles && !m_i_am_an_other)
335 m_world_parallel->broadcast(UniqueArray<unsigned long>(1, 0xef162166l).view(), 0);
337 debug() <<
"\33[1;31m[createSolverMatrix]\33[0m"
343 debug() <<
"\33[1;31m[createSolverMatrix] UN configured, building Underlying Parallel Managers index="
344 << index() <<
"\33[0m";
346 m_solver_ranks.add(SharedArray<Integer>(m_world_size));
347 m_solver_ranks[m_solver_index].fill(-1);
348 mapranks(m_solver_ranks[m_solver_index]);
350 Ref<IParallelMng> upm = _createUnderlyingParallelMng(m_solver_size);
352 debug() <<
"\33[1;31m[createSolverMatrix] upm->isParallel()=" << upm->isParallel() <<
"\33[0m";
353 debug() <<
"\33[1;31m[createSolverMatrix] upm->commSize()=" << upm->commSize() <<
"\33[0m";
354 debug() <<
"\33[1;31m[createSolverMatrix] upm->commRank()=" << upm->commRank() <<
"\33[0m";
357 debug() <<
"\33[1;31m[createSolverMatrix] upm NULL"
360 m_sub_parallel_mng_queue.add(upm);
361 debug() <<
"\33[1;31m[createSolverMatrix] Queuing new kernel arguments: X, B and Tmp with their topolgy"
365 IAlephTopology* underlying_topology = factory()->GetTopology(
this, index(), topology()->nb_row_size());
368 if (underlying_topology != NULL)
369 underlying_topology->backupAndInitialize();
370 m_arguments_queue.add(
new AlephKernelArguments(
traceMng(),
371 new AlephVector(
this),
372 new AlephVector(
this),
373 new AlephVector(
this),
374 underlying_topology));
377 debug() <<
"\33[1;31m[createSolverMatrix] Creating Tmp vector for this set of arguments"
379 m_arguments_queue.at(m_solver_index)->m_tmp_vector->create();
381 debug() <<
"\33[1;31m[createSolverMatrix] Now queuing the matrix\33[0m";
382 m_matrix_queue.add(
new AlephMatrix(
this));
383 debug() <<
"\33[1;31m[createSolverMatrix] Now queuing the space for the resolution results\33[0m";
384 m_results_queue.add(
new AlephKernelResults());
387 if (getTopologyImplementation(m_solver_index) != NULL)
388 getTopologyImplementation(m_solver_index)->backupAndInitialize();
390 debug() <<
"\33[1;31m[createSolverMatrix] done!\33[0m";
392 return m_matrix_queue[m_solver_index];
407 if (m_has_been_initialized ==
false) {
408 debug() <<
"\33[1;31m[createSolverVector] has_NOT_been_initialized!\33[0m";
411 if (m_there_are_idles && !m_i_am_an_other)
413 m_aleph_vector_idx++;
414 if ((m_aleph_vector_idx % 2) == 0) {
415 debug() <<
"\33[1;31m[createSolverVector] Get " << m_solver_index <<
"th X vector\33[0m";
416 return m_arguments_queue.at(m_solver_index)->m_x_vector;
419 debug() <<
"\33[1;31m[createSolverVector] Get " << m_solver_index <<
"th B vector\33[0m";
420 return m_arguments_queue.at(m_solver_index)->m_b_vector;
437 if (!isInitialized()) {
438 debug() <<
"\33[1;31m[postSolver] Trying to post a solver to an uninitialized kernel!\33[0m";
439 debug() <<
"\33[1;31m[postSolver] Now telling Indexer to do its job!\33[0m";
443 if (m_there_are_idles && !m_i_am_an_other) {
444 m_world_parallel->broadcast(UniqueArray<unsigned long>(1, 0xba9488bel).view(), 0);
445 UniqueArray<Real> real_args(0);
446 real_args.add(params->epsilon());
447 real_args.add(params->alpha());
448 real_args.add(params->minRHSNorm());
449 real_args.add(params->DDMCParameterAmgDiagonalThreshold());
451 UniqueArray<int> bool_args(0);
452 bool_args.add(params->xoUser());
453 bool_args.add(params->checkRealResidue());
454 bool_args.add(params->printRealResidue());
455 bool_args.add(params->debugInfo());
456 bool_args.add(params->convergenceAnalyse());
457 bool_args.add(params->stopErrorStrategy());
458 bool_args.add(params->writeMatrixToFileErrorStrategy());
459 bool_args.add(params->DDMCParameterListingOutput());
460 bool_args.add(params->printCpuTimeResolution());
461 bool_args.add(params->getKeepSolverStructure());
462 bool_args.add(params->getSequentialSolver());
464 UniqueArray<Integer> int_args(0);
465 int_args.add(params->maxIter());
466 int_args.add(params->gamma());
467 int_args.add((Integer)params->precond());
468 int_args.add((Integer)params->method());
469 int_args.add((Integer)params->amgCoarseningMethod());
470 int_args.add(params->getOutputLevel());
471 int_args.add(params->getAmgCycle());
472 int_args.add(params->getAmgSolverIter());
473 int_args.add(params->getAmgSmootherIter());
474 int_args.add((Integer)params->getAmgSmootherOption());
475 int_args.add((Integer)params->getAmgCoarseningOption());
476 int_args.add((Integer)params->getAmgCoarseSolverOption());
477 int_args.add((Integer)params->getCriteriaStop());
480 m_world_parallel->broadcast(real_args.view(), 0);
481 m_world_parallel->broadcast(bool_args.view(), 0);
482 m_world_parallel->broadcast(int_args.view(), 0);
485 debug() <<
"\33[1;31m[postSolver] Queuing solver " << m_solver_index <<
"\33[0m";
486 debug() <<
"\33[1;31m[postSolver] Backuping its params @" << params <<
"\33[0m";
487 m_arguments_queue.at(m_solver_index)->m_params = params;
490 debug() <<
"\33[1;31m[postSolver] Mis à jour des indices solvers et sites"
493 debug() <<
"\33[1;31m[postSolver] m_solver_index=" << m_solver_index <<
"\33[0m";
508 if (m_there_are_idles && !m_i_am_an_other) {
514 debug() <<
"\33[1;31m[syncSolver] Not solved, launching the work"
520 debug() <<
"\33[1;31m[syncSolver] Syncing " <<
gid <<
"\33[0m";
523 m_matrix_queue.at(
gid)->reassemble_waitAndFill(m_results_queue.at(
gid)->m_nb_iteration,
524 m_results_queue.at(
gid)->m_residual_norm);
526 debug() <<
"\33[1;31m[syncSolver] Finishing " <<
gid <<
"\33[0m";
533 if (getTopologyImplementation(
gid) !=
NULL)
534 getTopologyImplementation(
gid)->restore();
536 debug() <<
"\33[1;31m[syncSolver] Done " <<
gid <<
"\33[0m";
538 return m_arguments_queue.at(
gid)->m_x_vector;
549 debug() <<
"\33[1;31m[workSolver] Now working"
551 for (
int gid = 0;
gid < m_solver_index; ++
gid) {
553 debug() <<
"\33[1;31m[workSolver] Waiting for assembling " <<
gid <<
"\33[0m";
564 debug() <<
"\33[1;31m[workSolver] NOW CONFIGURED!"
570 for (
int gid = 0; gid < m_solver_index; ++gid) {
571 ItacRegion(gidSolving, AlephKernel);
572 debug() <<
"\33[1;31m[workSolver] Solving " << gid <<
" ?"
574 if (getTopologyImplementation(gid) != NULL)
575 getTopologyImplementation(gid)->backupAndInitialize();
576 m_matrix_queue.at(gid)->solveNow(m_arguments_queue.at(gid)->m_x_vector,
577 m_arguments_queue.at(gid)->m_b_vector,
578 m_arguments_queue.at(gid)->m_tmp_vector,
579 m_results_queue.at(gid)->m_nb_iteration,
580 m_results_queue.at(gid)->m_residual_norm,
581 m_arguments_queue.at(gid)->m_params);
583 if (getTopologyImplementation(gid) != NULL)
584 getTopologyImplementation(gid)->restore();
587 for (
int gid = 0; gid < m_solver_index; ++gid) {
588 ItacRegion(gidReAssemble, AlephKernel);
589 debug() <<
"\33[1;31m[workSolver] Posting re-assembling " << gid <<
"\33[0m";
590 m_arguments_queue.at(gid)->m_x_vector->reassemble();
591 m_matrix_queue.at(gid)->reassemble(m_results_queue.at(gid)->m_nb_iteration,
592 m_results_queue.at(gid)->m_residual_norm);
602ISubDomain* AlephKernel::
605 if (!m_sub_domain && !m_i_am_an_other)
606 ARCANE_FATAL(
"[AlephKernel::subDomain]",
"No sub-domain to work on!");
#define ARCANE_FATAL(...)
Macro envoyant une exception FatalErrorException.
AlephKernel(IParallelMng *, Integer, IAlephFactory *, Integer=0, Integer=0, bool=false)
AlephVector * createSolverVector(void)
AlephVector * syncSolver(Integer, Integer &, Real *)
void mapranks(Array< Integer > &)
Matrice d'un système linéaire.
Gestionnaire de reordering.
Paramètres d'un système linéraire.
Informations sur l'environnement parallèle.
Vecteur d'un système linéaire.
Interface du gestionnaire de parallélisme pour un sous-domaine.
virtual Ref< IParallelMng > createSubParallelMngRef(Int32ConstArrayView kept_ranks)=0
Créé un nouveau gestionnaire de parallélisme pour un sous-ensemble des rangs.
Interface du gestionnaire d'un sous-domaine.
Lecteur des fichiers de maillage via la bibliothèque LIMA.
Exception lorsqu'une erreur fatale est survenue.
Interface du gestionnaire de traces.
virtual void flush()=0
Flush tous les flots.
Classe d'accès aux traces.
ITraceMng * traceMng() const
Gestionnaire de trace.
TraceMessageDbg debug(Trace::eDebugLevel=Trace::Medium) const
Flot pour un message de debug.
TraceMessage info() const
Flot pour un message d'information.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
List< String > StringList
Tableau de chaînes de caractères unicode.