Arcane  v4.1.10.0
Documentation développeur
Chargement...
Recherche...
Aucune correspondance
DistributedSchurPressureCorrection.h
1// -*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
2//-----------------------------------------------------------------------------
3// Copyright 2000-2026 CEA (www.cea.fr) IFPEN (www.ifpenergiesnouvelles.com)
4// See the top-level COPYRIGHT file for details.
5// SPDX-License-Identifier: Apache-2.0
6//-----------------------------------------------------------------------------
7/*---------------------------------------------------------------------------*/
8/* DistributedSchurPressureCorrection.h (C) 2000-2026 */
9/* */
10/* Distributed Schur complement pressure correction preconditioner. */
11/*---------------------------------------------------------------------------*/
12#ifndef ARCCORE_ALINA_MPI_DISTRIBUTEDSCHURPRESSURECORRECTION_H
13#define ARCCORE_ALINA_MPI_DISTRIBUTEDSCHURPRESSURECORRECTION_H
14/*---------------------------------------------------------------------------*/
15/*---------------------------------------------------------------------------*/
16/*
17 * This file is based on the work on AMGCL library (version march 2026)
18 * which can be found at https://github.com/ddemidov/amgcl.
19 *
20 * Copyright (c) 2012-2022 Denis Demidov <dennis.demidov@gmail.com>
21 * SPDX-License-Identifier: MIT
22 */
23/*---------------------------------------------------------------------------*/
24/*---------------------------------------------------------------------------*/
25
26#include "arccore/alina/AlinaUtils.h"
27#include "arccore/alina/BuiltinBackend.h"
28#include "arccore/alina/DistributedInnerProduct.h"
29#include "arccore/alina/DistributedMatrix.h"
30
31/*---------------------------------------------------------------------------*/
32/*---------------------------------------------------------------------------*/
33
34namespace Arcane::Alina
35{
36
37/*---------------------------------------------------------------------------*/
38/*---------------------------------------------------------------------------*/
42template <class USolver, class PSolver>
43class DistributedSchurPressureCorrection
44{
45 using USolverBackendType = typename USolver::backend_type;
46 using PSolverBackendType = typename PSolver::backend_type;
47
48 static_assert(std::is_same<USolverBackendType, PSolverBackendType>::value,
49 "Backends for pressure and flow preconditioners should coincide!");
50
51 public:
52
54 using BackendType = backend_type;
55
56 typedef typename backend_type::value_type value_type;
57 typedef typename math::scalar_of<value_type>::type scalar_type;
58 typedef typename backend_type::matrix bmatrix;
59 typedef typename backend_type::vector vector;
60 typedef typename backend_type::params backend_params;
61
63
64 typedef typename BuiltinBackend<value_type>::matrix build_matrix;
65
66 struct params
67 {
68 typedef typename USolver::params usolver_params;
69 typedef typename PSolver::params psolver_params;
70
71 usolver_params usolver;
72 psolver_params psolver;
73
74 std::vector<char> pmask;
75
76 // Variant of block preconditioner to use in apply()
77 // 1: schur pressure correction:
78 // S p = fp - Kpu Kuu^-1 fu
79 // Kuu u = fu - Kup p
80 // 2: Block triangular:
81 // S p = fp
82 // Kuu u = fu - Kup p
83 int type = 1;
84
85 // Approximate Kuu^-1 with inverted diagonal of Kuu during
86 // construction of matrix-less Schur complement.
87 // When false, USolver is used instead.
88 bool approx_schur = false;
89
90 // Use 1/sum_j(abs(Kuu_{i,j})) instead of dia(Kuu)^-1
91 // as approximation for the Kuu^-1 (as in SIMPLEC algorithm)
92 bool simplec_dia = true;
93
94 int verbose = 0;
95
96 params() = default;
97
98 params(const Alina::PropertyTree& p)
99 : ARCCORE_ALINA_PARAMS_IMPORT_CHILD(p, usolver)
100 , ARCCORE_ALINA_PARAMS_IMPORT_CHILD(p, psolver)
101 , ARCCORE_ALINA_PARAMS_IMPORT_VALUE(p, type)
102 , ARCCORE_ALINA_PARAMS_IMPORT_VALUE(p, approx_schur)
103 , ARCCORE_ALINA_PARAMS_IMPORT_VALUE(p, simplec_dia)
104 , ARCCORE_ALINA_PARAMS_IMPORT_VALUE(p, verbose)
105 {
106 size_t n = 0;
107
108 n = p.get("pmask_size", n);
109
110 precondition(n > 0, "Error in schur_complement parameters: pmask_size is not set");
111
112 if (p.count("pmask_pattern")) {
113 pmask.resize(n, 0);
114
115 std::string pattern = p.get("pmask_pattern", std::string());
116 switch (pattern[0]) {
117 case '%': {
118 int start = std::atoi(pattern.substr(1).c_str());
119 int stride = std::atoi(pattern.substr(3).c_str());
120 for (size_t i = start; i < n; i += stride)
121 pmask[i] = 1;
122 } break;
123 case '<': {
124 size_t m = std::atoi(pattern.c_str() + 1);
125 for (size_t i = 0; i < std::min(m, n); ++i)
126 pmask[i] = 1;
127 } break;
128 case '>': {
129 size_t m = std::atoi(pattern.c_str() + 1);
130 for (size_t i = m; i < n; ++i)
131 pmask[i] = 1;
132 } break;
133 default:
134 Alina::precondition(false, "Unknown pattern in pmask_pattern");
135 }
136 }
137 else if (p.count("pmask")) {
138 void* pm = 0;
139 pm = p.get("pmask", pm);
140 pmask.assign(static_cast<char*>(pm), static_cast<char*>(pm) + n);
141 }
142 else {
143 ARCANE_FATAL("Error in schur_complement parameters: neither pmask_pattern, nor pmask is set");
144 }
145
146 p.check_params({ "usolver", "psolver", "type", "approx_schur", "simplec_dia", "pmask_size", "verbose" },
147 { "pmask", "pmask_pattern" });
148 }
149
150 void get(PropertyTree& p, const std::string& path = "") const
151 {
152 ARCCORE_ALINA_PARAMS_EXPORT_CHILD(p, path, usolver);
153 ARCCORE_ALINA_PARAMS_EXPORT_CHILD(p, path, psolver);
154 ARCCORE_ALINA_PARAMS_EXPORT_VALUE(p, path, type);
155 ARCCORE_ALINA_PARAMS_EXPORT_VALUE(p, path, approx_schur);
156 ARCCORE_ALINA_PARAMS_EXPORT_VALUE(p, path, simplec_dia);
157 ARCCORE_ALINA_PARAMS_EXPORT_VALUE(p, path, verbose);
158 }
159 };
160
161 template <class Matrix>
162 DistributedSchurPressureCorrection(mpi_communicator comm,
163 const Matrix& K,
164 const params& prm = params(),
165 const backend_params& bprm = backend_params())
166 : prm(prm)
167 , comm(comm)
168 {
169 this->K = std::make_shared<matrix>(comm, K, backend::nbRow(K));
170 init(bprm);
171 }
172
174 std::shared_ptr<matrix> K,
175 const params& prm = params(),
176 const backend_params& bprm = backend_params())
177 : prm(prm)
178 , comm(comm)
179 , K(K)
180 {
181 init(bprm);
182 }
183
184 void init(const backend_params& bprm)
185 {
186 using std::make_shared;
187 using std::make_tuple;
188 using std::shared_ptr;
189 using std::tie;
190
191 auto _K_loc = K->local();
192 auto _K_rem = K->remote();
193
194 build_matrix& K_loc = *_K_loc;
195 build_matrix& K_rem = *_K_rem;
196
197 ptrdiff_t n = K->loc_rows();
198
199 // Count pressure and flow variables.
200 ARCCORE_ALINA_TIC("count pressure/flow vars");
201 std::vector<ptrdiff_t> idx(n);
202 ptrdiff_t np = 0, nu = 0;
203
204 for (ptrdiff_t i = 0; i < n; ++i)
205 idx[i] = (prm.pmask[i] ? np++ : nu++);
206 ARCCORE_ALINA_TOC("count pressure/flow vars");
207
208 ARCCORE_ALINA_TIC("setup communication");
209 // We know what points each of our neighbors needs from us;
210 // and we know if those points are pressure or flow.
211 // We can immediately provide them with our renumbering scheme.
212 std::vector<ptrdiff_t> pdomain = comm.exclusive_sum(np);
213 std::vector<ptrdiff_t> udomain = comm.exclusive_sum(nu);
214 ptrdiff_t p_beg = pdomain[comm.rank];
215 ptrdiff_t u_beg = udomain[comm.rank];
216
217 const CommPattern& C = this->K->cpat();
218 ptrdiff_t nsend = C.send.count(), nrecv = C.recv.count();
219 std::vector<char> smask(nsend), rmask(nrecv);
220 std::vector<ptrdiff_t> s_idx(nsend), r_idx(nrecv);
221
222 for (ptrdiff_t i = 0; i < nsend; ++i) {
223 ptrdiff_t c = C.send.col[i];
224 smask[i] = prm.pmask[c];
225 s_idx[i] = idx[c] + (smask[i] ? p_beg : u_beg);
226 }
227
228 C.exchange(&smask[0], &rmask[0]);
229 C.exchange(&s_idx[0], &r_idx[0]);
230 ARCCORE_ALINA_TOC("setup communication");
231
232 // Fill the subblocks of the system matrix.
233 // K_rem->col may be used as direct indices into rmask and r_idx.
234 ARCCORE_ALINA_TIC("schur blocks");
235 this->K->move_to_backend(bprm);
236
237 auto Kpp_loc = make_shared<build_matrix>();
238 auto Kpp_rem = make_shared<build_matrix>();
239 auto Kuu_loc = make_shared<build_matrix>();
240 auto Kuu_rem = make_shared<build_matrix>();
241
242 auto Kpu_loc = make_shared<build_matrix>();
243 auto Kpu_rem = make_shared<build_matrix>();
244 auto Kup_loc = make_shared<build_matrix>();
245 auto Kup_rem = make_shared<build_matrix>();
246
247 Kpp_loc->set_size(np, np, true);
248 Kpp_rem->set_size(np, 0, true);
249
250 Kuu_loc->set_size(nu, nu, true);
251 Kuu_rem->set_size(nu, 0, true);
252
253 Kpu_loc->set_size(np, nu, true);
254 Kpu_rem->set_size(np, 0, true);
255
256 Kup_loc->set_size(nu, np, true);
257 Kup_rem->set_size(nu, 0, true);
258
259 arccoreParallelFor(0, n, ForLoopRunInfo{}, [&](Int32 begin, Int32 size) {
260 for (ptrdiff_t i = begin; i < (begin + size); ++i) {
261 ptrdiff_t ci = idx[i];
262 char pi = prm.pmask[i];
263
264 for (auto a = backend::row_begin(K_loc, i); a; ++a) {
265 char pj = prm.pmask[a.col()];
266
267 if (pi) {
268 if (pj) {
269 ++Kpp_loc->ptr[ci + 1];
270 }
271 else {
272 ++Kpu_loc->ptr[ci + 1];
273 }
274 }
275 else {
276 if (pj) {
277 ++Kup_loc->ptr[ci + 1];
278 }
279 else {
280 ++Kuu_loc->ptr[ci + 1];
281 }
282 }
283 }
284
285 for (auto a = backend::row_begin(K_rem, i); a; ++a) {
286 char pj = rmask[a.col()];
287
288 if (pi) {
289 if (pj) {
290 ++Kpp_rem->ptr[ci + 1];
291 }
292 else {
293 ++Kpu_rem->ptr[ci + 1];
294 }
295 }
296 else {
297 if (pj) {
298 ++Kup_rem->ptr[ci + 1];
299 }
300 else {
301 ++Kuu_rem->ptr[ci + 1];
302 }
303 }
304 }
305 }
306 });
307
308 Kpp_loc->set_nonzeros(Kpp_loc->scan_row_sizes());
309 Kpp_rem->set_nonzeros(Kpp_rem->scan_row_sizes());
310
311 Kuu_loc->set_nonzeros(Kuu_loc->scan_row_sizes());
312 Kuu_rem->set_nonzeros(Kuu_rem->scan_row_sizes());
313
314 Kpu_loc->set_nonzeros(Kpu_loc->scan_row_sizes());
315 Kpu_rem->set_nonzeros(Kpu_rem->scan_row_sizes());
316
317 Kup_loc->set_nonzeros(Kup_loc->scan_row_sizes());
318 Kup_rem->set_nonzeros(Kup_rem->scan_row_sizes());
319
320 // Fill subblocks of the system matrix.
321 arccoreParallelFor(0, n, ForLoopRunInfo{}, [&](Int32 begin, Int32 size) {
322 for (ptrdiff_t i = begin; i < (begin + size); ++i) {
323 ptrdiff_t ci = idx[i];
324 char pi = prm.pmask[i];
325
326 ptrdiff_t pp_loc_head = 0, pp_rem_head = 0;
327 ptrdiff_t uu_loc_head = 0, uu_rem_head = 0;
328 ptrdiff_t pu_loc_head = 0, pu_rem_head = 0;
329 ptrdiff_t up_loc_head = 0, up_rem_head = 0;
330
331 if (pi) {
332 pp_loc_head = Kpp_loc->ptr[ci];
333 pp_rem_head = Kpp_rem->ptr[ci];
334 pu_loc_head = Kpu_loc->ptr[ci];
335 pu_rem_head = Kpu_rem->ptr[ci];
336 }
337 else {
338 uu_loc_head = Kuu_loc->ptr[ci];
339 uu_rem_head = Kuu_rem->ptr[ci];
340 up_loc_head = Kup_loc->ptr[ci];
341 up_rem_head = Kup_rem->ptr[ci];
342 }
343
344 for (auto a = backend::row_begin(K_loc, i); a; ++a) {
345 ptrdiff_t j = a.col();
346 value_type v = a.value();
347 char pj = prm.pmask[j];
348 ptrdiff_t cj = idx[j];
349
350 if (pi) {
351 if (pj) {
352 Kpp_loc->col[pp_loc_head] = cj;
353 Kpp_loc->val[pp_loc_head] = v;
354 ++pp_loc_head;
355 }
356 else {
357 Kpu_loc->col[pu_loc_head] = cj;
358 Kpu_loc->val[pu_loc_head] = v;
359 ++pu_loc_head;
360 }
361 }
362 else {
363 if (pj) {
364 Kup_loc->col[up_loc_head] = cj;
365 Kup_loc->val[up_loc_head] = v;
366 ++up_loc_head;
367 }
368 else {
369 Kuu_loc->col[uu_loc_head] = cj;
370 Kuu_loc->val[uu_loc_head] = v;
371 ++uu_loc_head;
372 }
373 }
374 }
375
376 for (auto a = backend::row_begin(K_rem, i); a; ++a) {
377 ptrdiff_t j = a.col();
378 value_type v = a.value();
379 char pj = rmask[j];
380 ptrdiff_t cj = r_idx[j];
381
382 if (pi) {
383 if (pj) {
384 Kpp_rem->col[pp_rem_head] = cj;
385 Kpp_rem->val[pp_rem_head] = v;
386 ++pp_rem_head;
387 }
388 else {
389 Kpu_rem->col[pu_rem_head] = cj;
390 Kpu_rem->val[pu_rem_head] = v;
391 ++pu_rem_head;
392 }
393 }
394 else {
395 if (pj) {
396 Kup_rem->col[up_rem_head] = cj;
397 Kup_rem->val[up_rem_head] = v;
398 ++up_rem_head;
399 }
400 else {
401 Kuu_rem->col[uu_rem_head] = cj;
402 Kuu_rem->val[uu_rem_head] = v;
403 ++uu_rem_head;
404 }
405 }
406 }
407 }
408 });
409
410 auto Kpp = std::make_shared<matrix>(comm, Kpp_loc, Kpp_rem);
411 auto Kuu = std::make_shared<matrix>(comm, Kuu_loc, Kuu_rem);
412
413 Kpu = make_shared<matrix>(comm, Kpu_loc, Kpu_rem);
414 Kup = make_shared<matrix>(comm, Kup_loc, Kup_rem);
415
416 Kpu->move_to_backend(bprm);
417 Kup->move_to_backend(bprm);
418 ARCCORE_ALINA_TOC("schur blocks");
419
420 ARCCORE_ALINA_TIC("usolver")
421 U = make_shared<USolver>(comm, Kuu, prm.usolver, bprm);
422 ARCCORE_ALINA_TOC("usolver")
423 ARCCORE_ALINA_TIC("psolver")
424 P = make_shared<PSolver>(comm, Kpp, prm.psolver, bprm);
425 ARCCORE_ALINA_TOC("psolver")
426
427 ARCCORE_ALINA_TIC("other");
428 rhs_u = backend_type::create_vector(nu, bprm);
429 rhs_p = backend_type::create_vector(np, bprm);
430
431 u = backend_type::create_vector(nu, bprm);
432 p = backend_type::create_vector(np, bprm);
433
434 tmp = backend_type::create_vector(nu, bprm);
435
436 if (prm.approx_schur) {
437 std::shared_ptr<numa_vector<value_type>> Kuu_dia;
438 ARCCORE_ALINA_TIC("Kuu diagonal");
439 if (prm.simplec_dia) {
440 Kuu_dia = std::make_shared<numa_vector<value_type>>(nu, false);
441 arccoreParallelFor(0, nu, ForLoopRunInfo{}, [&](Int32 begin, Int32 size) {
442 for (ptrdiff_t i = begin; i < (begin + size); ++i) {
443 value_type s = math::zero<value_type>();
444 for (ptrdiff_t j = Kuu_loc->ptr[i], e = Kuu_loc->ptr[i + 1]; j < e; ++j) {
445 s += math::norm(Kuu_loc->val[j]);
446 }
447 for (ptrdiff_t j = Kuu_rem->ptr[i], e = Kuu_rem->ptr[i + 1]; j < e; ++j) {
448 s += math::norm(Kuu_rem->val[j]);
449 }
450 (*Kuu_dia)[i] = math::inverse(s);
451 }
452 });
453 }
454 else {
455 Kuu_dia = diagonal(*Kuu_loc, /*invert = */ true);
456 }
457
458 M = backend_type::copy_vector(Kuu_dia, bprm);
459 ARCCORE_ALINA_TOC("Kuu diagonal");
460 }
461
462 // Scatter/Gather matrices
463 ARCCORE_ALINA_TIC("scatter/gather");
464 auto x2u = std::make_shared<build_matrix>();
465 auto x2p = std::make_shared<build_matrix>();
466 auto u2x = std::make_shared<build_matrix>();
467 auto p2x = std::make_shared<build_matrix>();
468
469 x2u->set_size(nu, n, true);
470 x2p->set_size(np, n, true);
471 u2x->set_size(n, nu, true);
472 p2x->set_size(n, np, true);
473
474 {
475 ptrdiff_t x2u_head = 0, x2u_idx = 0;
476 ptrdiff_t x2p_head = 0, x2p_idx = 0;
477 ptrdiff_t u2x_head = 0, u2x_idx = 0;
478 ptrdiff_t p2x_head = 0, p2x_idx = 0;
479
480 for (ptrdiff_t i = 0; i < n; ++i) {
481 if (prm.pmask[i]) {
482 x2p->ptr[++x2p_idx] = ++x2p_head;
483 ++p2x_head;
484 }
485 else {
486 x2u->ptr[++x2u_idx] = ++x2u_head;
487 ++u2x_head;
488 }
489
490 p2x->ptr[++p2x_idx] = p2x_head;
491 u2x->ptr[++u2x_idx] = u2x_head;
492 }
493 }
494
495 x2u->set_nonzeros();
496 x2p->set_nonzeros();
497 u2x->set_nonzeros();
498 p2x->set_nonzeros();
499
500 {
501 ptrdiff_t x2u_head = 0;
502 ptrdiff_t x2p_head = 0;
503 ptrdiff_t u2x_head = 0;
504 ptrdiff_t p2x_head = 0;
505
506 for (ptrdiff_t i = 0; i < n; ++i) {
507 ptrdiff_t j = idx[i];
508
509 if (prm.pmask[i]) {
510 x2p->col[x2p_head] = i;
511 x2p->val[x2p_head] = math::identity<value_type>();
512 ++x2p_head;
513
514 p2x->col[p2x_head] = j;
515 p2x->val[p2x_head] = math::identity<value_type>();
516 ++p2x_head;
517 }
518 else {
519 x2u->col[x2u_head] = i;
520 x2u->val[x2u_head] = math::identity<value_type>();
521 ++x2u_head;
522
523 u2x->col[u2x_head] = j;
524 u2x->val[u2x_head] = math::identity<value_type>();
525 ++u2x_head;
526 }
527 }
528 }
529
530 this->x2u = backend_type::copy_matrix(x2u, bprm);
531 this->x2p = backend_type::copy_matrix(x2p, bprm);
532 this->u2x = backend_type::copy_matrix(u2x, bprm);
533 this->p2x = backend_type::copy_matrix(p2x, bprm);
534 ARCCORE_ALINA_TOC("scatter/gather");
535 }
536
537 std::shared_ptr<matrix> system_matrix_ptr() const
538 {
539 return K;
540 }
541
542 const matrix& system_matrix() const
543 {
544 return *K;
545 }
546
547 template <class Vec1, class Vec2>
548 void apply(const Vec1& rhs, Vec2&& x) const
549 {
550 const auto one = math::identity<scalar_type>();
551 const auto zero = math::zero<scalar_type>();
552
553 ARCCORE_ALINA_TIC("split variables");
554 backend::spmv(one, *x2u, rhs, zero, *rhs_u);
555 backend::spmv(one, *x2p, rhs, zero, *rhs_p);
556 ARCCORE_ALINA_TOC("split variables");
557
558 if (prm.type == 1) {
559 // Ai u = rhs_u
560 ARCCORE_ALINA_TIC("solve U");
561 backend::clear(*u);
562 report("U1", (*U)(*rhs_u, *u));
563 ARCCORE_ALINA_TOC("solve U");
564
565 // rhs_p -= Kpu u
566 ARCCORE_ALINA_TIC("solve P");
567 backend::spmv(-one, *Kpu, *u, one, *rhs_p);
568
569 // S p = rhs_p
570 backend::clear(*p);
571 report("P", (*P)(*this, *rhs_p, *p));
572 ARCCORE_ALINA_TOC("solve P");
573
574 // rhs_u -= Kup p
575 ARCCORE_ALINA_TIC("Update U");
576 backend::spmv(-one, *Kup, *p, one, *rhs_u);
577
578 // Ai u = rhs_u
579 backend::clear(*u);
580 report("U2", (*U)(*rhs_u, *u));
581 ARCCORE_ALINA_TOC("Update U");
582 }
583 else if (prm.type == 2) {
584 // S p = rhs_p
585 ARCCORE_ALINA_TIC("solve P");
586 backend::clear(*p);
587 report("P", (*P)(*this, *rhs_p, *p));
588 ARCCORE_ALINA_TOC("solve P");
589
590 // Ai u = fu - Kup p
591 ARCCORE_ALINA_TIC("solve U");
592 backend::spmv(-one, *Kup, *p, one, *rhs_u);
593 backend::clear(*u);
594 report("U", (*U)(*rhs_u, *u));
595 ARCCORE_ALINA_TOC("solve U");
596 }
597
598 ARCCORE_ALINA_TIC("merge variables");
599 backend::spmv(one, *u2x, *u, zero, x);
600 backend::spmv(one, *p2x, *p, one, x);
601 ARCCORE_ALINA_TOC("merge variables");
602 }
603
604 template <class Alpha, class Vec1, class Beta, class Vec2>
605 void spmv(Alpha alpha, const Vec1& x, Beta beta, Vec2& y) const
606 {
607 const auto one = math::identity<scalar_type>();
608 const auto zero = math::zero<scalar_type>();
609
610 // y = beta y + alpha S x, where S = Kpp - Kpu Kuu^-1 Kup
611 ARCCORE_ALINA_TIC("matrix-free spmv");
612 backend::spmv(alpha, P->system_matrix(), x, beta, y);
613
614 backend::spmv(one, *Kup, x, zero, *tmp);
615
616 if (prm.approx_schur) {
617 backend::vmul(one, *M, *tmp, zero, *u);
618 }
619 else {
620 backend::clear(*u);
621 (*U)(*tmp, *u);
622 }
623 backend::spmv(-alpha, *Kpu, *u, one, y);
624 ARCCORE_ALINA_TOC("matrix-free spmv");
625 }
626
627 public:
628
629 params prm;
630
631 private:
632
633 typedef CommunicationPattern<backend_type> CommPattern;
634 mpi_communicator comm;
635
636 std::shared_ptr<bmatrix> x2p, x2u, p2x, u2x;
637 std::shared_ptr<matrix> K, Kpu, Kup;
638 std::shared_ptr<vector> rhs_u, rhs_p, u, p, tmp;
639 std::shared_ptr<typename backend_type::matrix_diagonal> M;
640
641 std::shared_ptr<USolver> U;
642 std::shared_ptr<PSolver> P;
643
644#ifdef ARCCORE_ALINA_DEBUG
645 void report(const std::string& name, const SolverResult& sr) const
646 {
647 if (comm.rank == 0 && prm.report >= 1) {
648 std::cout << name << " (" << sr.nbIteration() << ", " << sr.residual() << ")\n";
649 }
650 }
651#else
652 void report(const std::string&, const SolverResult&) const
653 {
654 }
655#endif
656};
657
658/*---------------------------------------------------------------------------*/
659/*---------------------------------------------------------------------------*/
660
661} // namespace Arcane::Alina
662
663/*---------------------------------------------------------------------------*/
664/*---------------------------------------------------------------------------*/
665
666namespace Arcane::Alina::backend
667{
668
669/*---------------------------------------------------------------------------*/
670/*---------------------------------------------------------------------------*/
671
672template <class US, class PS, class Alpha, class Beta, class Vec1, class Vec2>
673struct spmv_impl<Alpha, DistributedSchurPressureCorrection<US, PS>, Vec1, Beta, Vec2>
674{
675 static void apply(Alpha alpha, const DistributedSchurPressureCorrection<US, PS>& A, const Vec1& x, Beta beta, Vec2& y)
676 {
677 A.spmv(alpha, x, beta, y);
678 }
679};
680
681template <class US, class PS, class Vec1, class Vec2, class Vec3>
682struct residual_impl<DistributedSchurPressureCorrection<US, PS>, Vec1, Vec2, Vec3>
683{
684 static void apply(const Vec1& rhs, const DistributedSchurPressureCorrection<US, PS>& A, const Vec2& x, Vec3& r)
685 {
686 backend::copy(rhs, r);
687 A.spmv(-1, x, 1, r);
688 }
689};
690
691/*---------------------------------------------------------------------------*/
692/*---------------------------------------------------------------------------*/
693
694} // namespace Arcane::Alina::backend
695
696/*---------------------------------------------------------------------------*/
697/*---------------------------------------------------------------------------*/
698
699#endif
#define ARCANE_FATAL(...)
Macro envoyant une exception FatalErrorException.
Distributed Matrix using message passing.
Distributed Schur complement pressure correction preconditioner.
Matrix class, to be used by user.
void arccoreParallelFor(const ComplexForLoopRanges< RankValue > &loop_ranges, const ForLoopRunInfo &run_info, const LambdaType &lambda_function, const ReducerArgs &... reducer_args)
Applique en concurrence la fonction lambda lambda_function sur l'intervalle d'itération donné par loo...
Definition ParallelFor.h:85
std::int32_t Int32
Type entier signé sur 32 bits.
Implementation for residual error compuatation.
Implementation for matrix-vector product.
Convenience wrapper around MPI_Comm.
std::vector< T > exclusive_sum(T n) const
Exclusive sum over mpi communicator.