55 using backend_type = Backend;
56 using BackendType = Backend;
58 typedef typename Backend::params backend_params;
59 typedef typename Backend::value_type value_type;
60 typedef typename math::scalar_of<value_type>::type scalar_type;
62 typedef typename Backend::vector vector;
66 typedef typename Coarsening::params coarsening_params;
67 typedef typename Relaxation::params relax_params;
68 typedef typename DirectSolver::params direct_params;
69 typedef typename Repartition::params repart_params;
120 : ARCCORE_ALINA_PARAMS_IMPORT_CHILD(p,
coarsening)
121 , ARCCORE_ALINA_PARAMS_IMPORT_CHILD(p,
relax)
122 , ARCCORE_ALINA_PARAMS_IMPORT_CHILD(p,
direct)
123 , ARCCORE_ALINA_PARAMS_IMPORT_CHILD(p,
repart)
126 , ARCCORE_ALINA_PARAMS_IMPORT_VALUE(p,
max_levels)
127 , ARCCORE_ALINA_PARAMS_IMPORT_VALUE(p,
npre)
128 , ARCCORE_ALINA_PARAMS_IMPORT_VALUE(p,
npost)
129 , ARCCORE_ALINA_PARAMS_IMPORT_VALUE(p,
ncycle)
130 , ARCCORE_ALINA_PARAMS_IMPORT_VALUE(p,
pre_cycles)
133 p.check_params({
"coarsening",
"relax",
"direct",
"repart",
"coarse_enough",
"direct_coarse",
"max_levels",
"npre",
"npost",
"ncycle",
"pre_cycles",
"allow_rebuild" });
135 Alina::precondition(
max_levels > 0,
"max_levels should be positive");
140 ARCCORE_ALINA_PARAMS_EXPORT_CHILD(p, path,
coarsening);
141 ARCCORE_ALINA_PARAMS_EXPORT_CHILD(p, path,
relax);
142 ARCCORE_ALINA_PARAMS_EXPORT_CHILD(p, path,
direct);
143 ARCCORE_ALINA_PARAMS_EXPORT_CHILD(p, path, repart);
146 ARCCORE_ALINA_PARAMS_EXPORT_VALUE(p, path,
max_levels);
147 ARCCORE_ALINA_PARAMS_EXPORT_VALUE(p, path,
npre);
148 ARCCORE_ALINA_PARAMS_EXPORT_VALUE(p, path,
npost);
149 ARCCORE_ALINA_PARAMS_EXPORT_VALUE(p, path,
ncycle);
150 ARCCORE_ALINA_PARAMS_EXPORT_VALUE(p, path,
pre_cycles);
155 template <
class Matrix>
156 DistributedAMG(mpi_communicator comm,
158 const params& prm = params(),
159 const backend_params& bprm = backend_params())
164 init(std::make_shared<matrix>(comm, A, backend::nbRow(A)), bprm);
167 DistributedAMG(mpi_communicator comm,
168 std::shared_ptr<matrix> A,
169 const params& prm = params(),
170 const backend_params& bprm = backend_params())
184 template <
class Matrix>
186 const backend_params& bprm = backend_params())
188 rebuild(std::make_shared<matrix>(comm, M, backend::nbRow(M)), bprm);
191 template <
class OtherBackend>
192 typename std::enable_if<!std::is_same<Backend, OtherBackend>::value,
void>::type
194 const backend_params& bprm = backend_params())
196 return rebuild(std::make_shared<matrix>(*A), bprm);
199 void rebuild(std::shared_ptr<matrix> A,
200 const backend_params& bprm = backend_params())
202 precondition(prm.
allow_rebuild,
"allow_rebuild is not set!");
203 precondition(A->glob_rows() == system_matrix().glob_rows() &&
204 A->glob_cols() == system_matrix().glob_cols(),
205 "Matrix dimensions differ from the original ones!");
207 ARCCORE_ALINA_TIC(
"rebuild");
210 for (
auto& level : levels) {
211 A = level.rebuild(A, C, prm, bprm);
213 this->A->move_to_backend(bprm);
214 ARCCORE_ALINA_TOC(
"rebuild");
217 template <
class Vec1,
class Vec2>
218 void cycle(
const Vec1& rhs, Vec2&& x)
const
220 cycle(levels.begin(), rhs, x);
223 template <
class Vec1,
class Vec2>
224 void apply(
const Vec1& rhs, Vec2&& x)
const
226 if (prm.pre_cycles) {
228 for (
unsigned i = 0; i < prm.pre_cycles; ++i)
229 cycle(levels.begin(), rhs, x);
232 backend::copy(rhs, x);
242 const matrix& system_matrix()
const
249 struct DistributedAMGLevel
251 ptrdiff_t nrows, nnz;
254 std::shared_ptr<matrix> A, P, R;
255 std::shared_ptr<vector> f, u, t;
256 std::shared_ptr<Relaxation> relax;
257 std::shared_ptr<DirectSolver> solve;
259 DistributedAMGLevel() =
default;
261 DistributedAMGLevel(std::shared_ptr<matrix> a,
263 const backend_params& bprm,
265 : nrows(a->glob_rows())
266 , nnz(a->glob_nonzeros())
267 , f(Backend::create_vector(a->loc_rows(), bprm))
268 , u(Backend::create_vector(a->loc_rows(), bprm))
270 int active = (a->loc_rows() > 0);
271 active_procs = a->comm().reduceSum(active);
276 ARCCORE_ALINA_TIC(
"direct solver");
277 solve = std::make_shared<DirectSolver>(a->comm(), *a, prm.direct);
278 ARCCORE_ALINA_TOC(
"direct solver");
282 t = Backend::create_vector(a->loc_rows(), bprm);
284 ARCCORE_ALINA_TIC(
"relaxation");
285 relax = std::make_shared<Relaxation>(*a, prm.relax, bprm);
286 ARCCORE_ALINA_TOC(
"relaxation");
290 std::shared_ptr<matrix> step_down(Coarsening& C,
const Repartition& repart)
292 ARCCORE_ALINA_TIC(
"transfer operators");
293 std::tie(P, R) = C.transfer_operators(*A);
295 ARCCORE_ALINA_TIC(
"sort");
298 ARCCORE_ALINA_TOC(
"sort");
300 ARCCORE_ALINA_TOC(
"transfer operators");
302 if (P->glob_cols() == 0) {
304 return std::shared_ptr<matrix>();
307 ARCCORE_ALINA_TIC(
"coarse operator");
308 auto Ac = C.coarse_operator(*A, *P, *R);
309 ARCCORE_ALINA_TOC(
"coarse operator");
311 if (repart.is_needed(*Ac)) {
312 ARCCORE_ALINA_TIC(
"partition");
313 auto I = repart(*Ac, block_size(C));
314 auto J = transpose(*I);
318 Ac = product(*J, *product(*Ac, *I));
319 ARCCORE_ALINA_TOC(
"partition");
325 std::shared_ptr<matrix> rebuild(std::shared_ptr<matrix> A,
328 const backend_params& bprm)
331 relax = std::make_shared<Relaxation>(*A, prm.relax, bprm);
332 std::cout <<
"DistributedAMG: relaxation=" << relax <<
"\n";
336 solve = std::make_shared<DirectSolver>(A->comm(), *A, prm.direct);
344 A = C.coarse_operator(*A, *P, *R);
348 this->A->move_to_backend(bprm);
354 void move_to_backend(
const backend_params& bprm,
bool keep_src =
false)
356 ARCCORE_ALINA_TIC(
"move to backend");
358 A->move_to_backend(bprm);
360 P->move_to_backend(bprm, keep_src);
362 R->move_to_backend(bprm, keep_src);
363 ARCCORE_ALINA_TOC(
"move to backend");
366 ptrdiff_t rows()
const
371 ptrdiff_t nonzeros()
const
377 typedef typename std::list<DistributedAMGLevel>::const_iterator level_iterator;
380 std::shared_ptr<matrix> A;
382 std::list<DistributedAMGLevel> levels;
384 void init(std::shared_ptr<matrix> A,
const backend_params& bprm)
386 A->comm().check(A->glob_rows() == A->glob_cols(),
"Matrix should be square!");
389 Coarsening C(prm.coarsening);
391 bool need_coarse =
true;
393 while (A->glob_rows() > prm.coarse_enough) {
396 if (levels.size() >= prm.max_levels) {
397 levels.back().move_to_backend(bprm, prm.allow_rebuild);
401 A = levels.back().step_down(C, repart);
418 if (A && need_coarse) {
420 levels.back().move_to_backend(bprm, prm.allow_rebuild);
423 ARCCORE_ALINA_TIC(
"move to backend");
424 this->A->move_to_backend(bprm, prm.allow_rebuild);
425 ARCCORE_ALINA_TOC(
"move to backend");
428 template <
class Vec1,
class Vec2>
429 void cycle(level_iterator lvl,
const Vec1& rhs, Vec2& x)
const
431 level_iterator nxt = lvl, end = levels.end();
436 ARCCORE_ALINA_TIC(
"direct solver");
437 (*lvl->solve)(rhs, x);
438 ARCCORE_ALINA_TOC(
"direct solver");
441 ARCCORE_ALINA_TIC(
"relax");
442 for (
size_t i = 0; i < prm.npre; ++i)
443 lvl->relax->apply_pre(*lvl->A, rhs, x, *lvl->t);
444 for (
size_t i = 0; i < prm.npost; ++i)
445 lvl->relax->apply_post(*lvl->A, rhs, x, *lvl->t);
446 ARCCORE_ALINA_TOC(
"relax");
450 for (
size_t j = 0; j < prm.ncycle; ++j) {
451 ARCCORE_ALINA_TIC(
"relax");
452 for (
size_t i = 0; i < prm.npre; ++i)
453 lvl->relax->apply_pre(*lvl->A, rhs, x, *lvl->t);
454 ARCCORE_ALINA_TOC(
"relax");
456 backend::residual(rhs, *lvl->A, x, *lvl->t);
458 backend::spmv(math::identity<scalar_type>(), *lvl->R, *lvl->t, math::zero<scalar_type>(), *nxt->f);
460 backend::clear(*nxt->u);
461 cycle(nxt, *nxt->f, *nxt->u);
463 backend::spmv(math::identity<scalar_type>(), *lvl->P, *nxt->u, math::identity<scalar_type>(), x);
465 ARCCORE_ALINA_TIC(
"relax");
466 for (
size_t i = 0; i < prm.npost; ++i)
467 lvl->relax->apply_post(*lvl->A, rhs, x, *lvl->t);
468 ARCCORE_ALINA_TOC(
"relax");
473 template <
class B,
class C,
class R,
class D,
class I>
474 friend std::ostream& operator<<(std::ostream& os,
const DistributedAMG<B, C, R, D, I>& a);