35#include "arccore/alina/BuiltinBackend.h"
36#include "arccore/alina/StaticMatrix.h"
37#include "arccore/alina/Adapters.h"
39#include "arccore/alina/DistributedMatrix.h"
40#include "arccore/alina/DistributedPreconditionedSolver.h"
41#include "arccore/alina/DistributedAMG.h"
42#include "arccore/alina/DistributedCoarsening.h"
43#include "arccore/alina/DistributedRelaxation.h"
44#include "arccore/alina/DistributedSolver.h"
46#include "arccore/alina/IO.h"
47#include "arccore/alina/Profiler.h"
49#if defined(ARCCORE_ALINA_HAVE_PARMETIS)
50#include "arccore/alina/ParmetisMatrixPartitioner.h"
57using namespace Arcane::Alina;
60int main(
int argc,
char* argv[])
64 std::cerr <<
"Usage: " << argv[0] <<
" <matrix.bin>" << std::endl;
71 auto& prof = Alina::Profiler::globalProfiler();
75 ptrdiff_t rows = Alina::IO::crs_size<ptrdiff_t>(argv[1]);
79 ptrdiff_t chunk = (rows + world.size - 1) / world.size;
81 chunk += B - chunk % B;
83 ptrdiff_t row_beg = std::min(rows, chunk * world.rank);
84 ptrdiff_t row_end = std::min(rows, row_beg + chunk);
85 chunk = row_end - row_beg;
88 std::vector<ptrdiff_t> ptr, col;
89 std::vector<double> val;
90 Alina::IO::read_crs(argv[1], rows, ptr, col, val, row_beg, row_end);
95 <<
"World size: " << world.size << std::endl
96 <<
"Matrix " << argv[1] <<
": " << rows <<
"x" << rows << std::endl;
115 prm.solver.maxiter = 200;
124 std::vector<double> dia(chunk, 1.0);
125 std::vector<ptrdiff_t> d_ptr(chunk + 1), d_col(chunk);
126 for (ptrdiff_t i = 0, I = row_beg; i < chunk; ++i, ++I) {
129 for (ptrdiff_t j = ptr[i], e = ptr[i + 1]; j < e; ++j) {
131 dia[i] = 1 /
sqrt(val[j]);
136 d_ptr.back() = chunk;
140 Alina::adapter::block_matrix<dmat_type>(
141 std::tie(chunk, d_ptr, d_col, dia)));
152 auto f_ptr =
reinterpret_cast<dvec_type*
>(dia.data());
153 std::vector<dvec_type> rhs(f_ptr, f_ptr + chunk / B);
158#if defined(ARCCORE_ALINA_HAVE_PARMETIS)
161 if (world.size > 1) {
162 prof.tic(
"partition");
167 auto R = transpose(*P);
170 A = product(*R, *product(*A, *P));
173 std::vector<dvec_type> new_rhs(R->loc_rows());
174 R->move_to_backend();
175 Alina::backend::spmv(1, *R, rhs, 0, new_rhs);
182 chunk = A->loc_rows();
183 prof.toc(
"partition");
189 Solver solve(world, A, prm);
194 std::cout << solve << std::endl;
197 std::vector<dvec_type> x(chunk, Alina::math::zero<dvec_type>());
206 std::cout <<
"Iters: " << r.nbIteration() << std::endl
207 <<
"Error: " << r.residual() << std::endl
208 << prof << std::endl;
Distributed Matrix using message passing.
Iterative solver wrapper for distributed linear systems.
__host__ __device__ double sqrt(double v)
Racine carrée de v.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Distributed smoothed aggregation coarsening scheme.
Convenience wrapper around MPI_Comm.
Convenience wrapper around MPI_Init/MPI_Finalize.