79struct DistributedGaussSeidelRelaxation
82 typedef Backend backend_type;
84 typedef typename Backend::params backend_params;
88 const params& prm = params(),
89 const backend_params& bprm = backend_params())
90 : Base(*A.local(), prm, bprm)
93 template <
class Matrix,
class VectorRHS,
class VectorX,
class VectorTMP>
94 void apply_pre(
const Matrix& A,
const VectorRHS& rhs, VectorX& x, VectorTMP& t)
const
99 template <
class Matrix,
class VectorRHS,
class VectorX,
class VectorTMP>
100 void apply_post(
const Matrix& A,
const VectorRHS& rhs, VectorX& x, VectorTMP& t)
const
105 template <
class Matrix,
class VectorRHS,
class VectorX>
106 void apply(
const Matrix& A,
const VectorRHS& rhs, VectorX& x)
const
108 Base::apply(*A.local_backend(), rhs, x);
192struct DistributedSPAI0Relaxation
194 typedef Backend backend_type;
195 typedef typename Backend::value_type value_type;
196 typedef typename Backend::matrix_diagonal matrix_diagonal;
197 typedef typename math::scalar_of<value_type>::type scalar_type;
199 typedef typename Backend::params backend_params;
202 const params&,
const backend_params& bprm = backend_params())
206 const ptrdiff_t n = A.loc_rows();
207 const build_matrix& A_loc = *A.local();
208 const build_matrix& A_rem = *A.remote();
210 auto m = std::make_shared<numa_vector<value_type>>(n,
false);
214 for (ptrdiff_t i = begin; i < (begin + size); ++i) {
215 value_type num = math::zero<value_type>();
216 scalar_type den = math::zero<scalar_type>();
218 for (ptrdiff_t j = A_loc.ptr[i], e = A_loc.ptr[i + 1]; j < e; ++j) {
219 value_type v = A_loc.val[j];
220 scalar_type norm_v = math::norm(v);
221 den += norm_v * norm_v;
222 if (A_loc.col[j] == i)
226 for (ptrdiff_t j = A_rem.ptr[i], e = A_rem.ptr[i + 1]; j < e; ++j) {
227 value_type v = A_rem.val[j];
228 scalar_type norm_v = math::norm(v);
229 den += norm_v * norm_v;
232 (*m)[i] = math::inverse(den) * num;
236 M = Backend::copy_vector(m, bprm);
239 template <
class Matrix,
class VectorRHS,
class VectorX,
class VectorTMP>
240 void apply_pre(
const Matrix& A,
const VectorRHS& rhs, VectorX& x, VectorTMP& tmp)
const
242 static const scalar_type one = math::identity<scalar_type>();
243 backend::residual(rhs, A, x, tmp);
244 backend::vmul(one, *M, tmp, one, x);
247 template <
class Matrix,
class VectorRHS,
class VectorX,
class VectorTMP>
248 void apply_post(
const Matrix& A,
const VectorRHS& rhs, VectorX& x, VectorTMP& tmp)
const
250 static const scalar_type one = math::identity<scalar_type>();
251 backend::residual(rhs, A, x, tmp);
252 backend::vmul(one, *M, tmp, one, x);
255 template <
class Matrix,
class VectorRHS,
class VectorX>
256 void apply(
const Matrix&,
const VectorRHS& rhs, VectorX& x)
const
258 backend::vmul(math::identity<scalar_type>(), *M, rhs, math::zero<scalar_type>(), x);
263 std::shared_ptr<matrix_diagonal> M;
291struct AsDistributedPreconditioner
293 typedef typename Relaxation::params params;
294 typedef typename Relaxation::BackendType backend_type;
295 using BackendType = backend_type;
296 typedef typename backend_type::params backend_params;
297 typedef typename backend_type::value_type value_type;
298 typedef typename math::scalar_of<value_type>::type scalar_type;
300 typedef typename backend_type::vector vector;
302 template <
class Matrix>
305 const params& prm = params(),
306 const backend_params& bprm = backend_params())
307 : A(std::make_shared<matrix>(comm, A, backend::nbRow(A)))
310 this->A->move_to_backend(bprm);
314 std::shared_ptr<matrix> A,
315 const params& prm = params(),
316 const backend_params& bprm = backend_params())
320 this->A->move_to_backend(bprm);
323 template <
class Vec1,
class Vec2>
324 void apply(
const Vec1& rhs, Vec2&& x)
const
329 std::shared_ptr<matrix> system_matrix_ptr()
const
334 const matrix& system_matrix()
const
336 return *system_matrix_ptr();
341 std::shared_ptr<matrix> A;
344 friend std::ostream& operator<<(std::ostream& os,
const AsDistributedPreconditioner& p)
346 os <<
"Relaxation as preconditioner" << std::endl;
347 os <<
" unknowns: " << p.system_matrix().glob_rows() << std::endl;
348 os <<
" nonzeros: " << p.system_matrix().glob_nonzeros() << std::endl;
void arccoreParallelFor(const ComplexForLoopRanges< RankValue > &loop_ranges, const ForLoopRunInfo &run_info, const LambdaType &lambda_function, const ReducerArgs &... reducer_args)
Applique en concurrence la fonction lambda lambda_function sur l'intervalle d'itération donné par loo...