35#include "arccore/alina/BuiltinBackend.h"
36#include "arccore/alina/Adapters.h"
37#include "arccore/alina/Coarsening.h"
39#include "arccore/alina/DistributedMatrix.h"
40#include "arccore/alina/DistributedPreconditionedSolver.h"
41#include "arccore/alina/DistributedAMG.h"
42#include "arccore/alina/DistributedCoarsening.h"
43#include "arccore/alina/DistributedRelaxation.h"
44#include "arccore/alina/DistributedSolver.h"
46#include "arccore/alina/IO.h"
47#include "arccore/alina/Profiler.h"
49#if defined(ARCCORE_ALINA_HAVE_PARMETIS)
50#include "arccore/alina/ParmetisMatrixPartitioner.h"
54using namespace Arcane::Alina;
56int main(
int argc,
char* argv[])
60 std::cerr <<
"Usage: " << argv[0] <<
" <A.bin> <b.bin> <coo.bin>" << std::endl;
68 auto& prof = Alina::Profiler::globalProfiler();
73 ptrdiff_t rows = Alina::IO::crs_size<ptrdiff_t>(argv[1]);
77 ptrdiff_t chunk = (rows + world.size - 1) / world.size;
79 chunk += 3 - chunk % 3;
81 ptrdiff_t row_beg = std::min(rows, chunk * world.rank);
82 ptrdiff_t row_end = std::min(rows, row_beg + chunk);
83 chunk = row_end - row_beg;
86 std::vector<ptrdiff_t> ptr, col;
87 std::vector<double> val, rhs, coo;
88 Alina::IO::read_crs(argv[1], rows, ptr, col, val, row_beg, row_end);
91 Alina::IO::read_dense(argv[2], n, m, rhs, row_beg, row_end);
92 Alina::precondition(n == rows && m == 1,
"The RHS file has wrong dimensions");
94 Alina::IO::read_dense(argv[3], n, m, coo, row_beg / 3, row_end / 3);
95 Alina::precondition(n * 3 == rows && m == 3,
"The coordinate file has wrong dimensions");
98 if (world.rank == 0) {
100 <<
"Matrix " << argv[1] <<
": " << rows <<
"x" << rows << std::endl
101 <<
"RHS " << argv[2] <<
": " << rows <<
"x1" << std::endl
102 <<
"Coords " << argv[3] <<
": " << rows / 3 <<
"x3" << std::endl;
116 auto A = std::make_shared<Alina::DistributedMatrix<SBackend>>(
117 world, std::tie(chunk, ptr, col, val));
122#if defined(ARCCORE_ALINA_HAVE_PARMETIS)
125 if (world.size > 1) {
126 auto t = prof.scoped_tic(
"partition");
132 auto P = part(*A, 3);
133 auto R = transpose(*P);
136 A = product(*R, *product(*A, *P));
139 R->move_to_backend();
140 std::vector<double> new_rhs(R->loc_rows());
141 std::vector<double> new_coo(R->loc_rows());
142 Alina::backend::spmv(1, *R, rhs, 0, new_rhs);
143 Alina::backend::spmv(1, *R, coo, 0, new_coo);
149 chunk = A->loc_rows();
155 prm.solver.maxiter = 500;
156 prm.precond.coarsening.aggr.eps_strong = 0;
162 prm.precond.coarsening.aggr.nullspace.cols = Alina::rigid_body_modes(3, coo, prm.precond.coarsening.aggr.nullspace.B);
166 Solver solve(world, A, prm);
171 std::cout << solve << std::endl;
174 std::vector<double> x(chunk, 0.0);
182 if (world.rank == 0) {
183 std::cout <<
"Iters: " << r.nbIteration() << std::endl
184 <<
"Error: " << r.residual() << std::endl
185 << prof << std::endl;
Iterative solver wrapper for distributed linear systems.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Distributed smoothed aggregation coarsening scheme.
Convenience wrapper around MPI_Comm.
Convenience wrapper around MPI_Init/MPI_Finalize.