35#include "arccore/alina/BuiltinBackend.h"
36#include "arccore/alina/Adapters.h"
38#include "arccore/alina/DistributedMatrix.h"
39#include "arccore/alina/DistributedPreconditionedSolver.h"
40#include "arccore/alina/DistributedAMG.h"
41#include "arccore/alina/DistributedCoarsening.h"
42#include "arccore/alina/DistributedRelaxation.h"
43#include "arccore/alina/DistributedSolver.h"
45#include "arccore/alina/IO.h"
46#include "arccore/alina/Profiler.h"
48#if defined(ARCCORE_ALINA_HAVE_PARMETIS)
49#include "arccore/alina/ParmetisMatrixPartitioner.h"
53using namespace Arcane::Alina;
56int main(
int argc,
char* argv[])
60 std::cerr <<
"Usage: " << argv[0] <<
" <matrix.bin> <rhs.bin>" << std::endl;
67 auto& prof = Alina::Profiler::globalProfiler();
72 ptrdiff_t rows = Alina::IO::crs_size<ptrdiff_t>(argv[1]);
76 ptrdiff_t chunk = (rows + world.size - 1) / world.size;
77 ptrdiff_t row_beg = std::min(rows, chunk * world.rank);
78 ptrdiff_t row_end = std::min(rows, row_beg + chunk);
79 chunk = row_end - row_beg;
82 std::vector<ptrdiff_t> ptr, col;
83 std::vector<double> val, rhs;
84 Alina::IO::read_crs(argv[1], rows, ptr, col, val, row_beg, row_end);
85 Alina::IO::read_dense(argv[2], rows, cols, rhs, row_beg, row_end);
90 <<
"World size: " << world.size << std::endl
91 <<
"Matrix " << argv[1] <<
": " << rows <<
"x" << rows << std::endl
92 <<
"RHS " << argv[2] <<
": " << rows <<
"x" << cols << std::endl;
104 auto A = std::make_shared<DistributedMatrix<DBackend>>(world, std::tie(chunk, ptr, col, val));
109#if defined(ARCCORE_ALINA_HAVE_PARMETIS)
112 if (world.size > 1) {
113 prof.tic(
"partition");
118 auto R = transpose(*P);
121 A = product(*R, *product(*A, *P));
124 std::vector<double> new_rhs(R->loc_rows());
125 R->move_to_backend(
typename DBackend::params());
126 Alina::backend::spmv(1, *R, rhs, 0, new_rhs);
131 chunk = A->loc_rows();
132 prof.toc(
"partition");
138 Solver solve(world, A);
143 std::cout << solve << std::endl;
146 std::vector<double> x(chunk, 0.0);
155 std::cout <<
"Iters: " << r.nbIteration() << std::endl
156 <<
"Error: " << r.residual() << std::endl
157 << prof << std::endl;
Iterative solver wrapper for distributed linear systems.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Distributed smoothed aggregation coarsening scheme.
Convenience wrapper around MPI_Comm.
Convenience wrapper around MPI_Init/MPI_Finalize.