22#include <boost/scope_exit.hpp>
24#include "arccore/alina/BuiltinBackend.h"
25#include "arccore/alina/StaticMatrix.h"
26#include "arccore/alina/Adapters.h"
27#include "arccore/alina/DistributedMatrix.h"
28#include "arccore/alina/IO.h"
30#include "arccore/alina/Profiler.h"
34namespace math = Alina::math;
37void assemble(
int n,
int beg,
int end,
38 std::vector<int>& ptr,
39 std::vector<int>& col,
40 std::vector<Val>& val)
42 int chunk = end - beg;
45 ptr.reserve(chunk + 1);
48 col.reserve(chunk * 4);
50 val.reserve(chunk * 4);
52 for (
int j = beg, i = 0; j < end; ++j, ++i) {
55 val.push_back(-math::identity<Val>());
59 val.push_back(2 * math::identity<Val>());
63 val.push_back(-math::identity<Val>());
68 val.push_back(-0.1 * math::identity<Val>());
71 ptr.push_back(col.size());
78 typedef typename math::rhs_of<Val>::type Rhs;
83 int chunk_len = (n + comm.size - 1) / comm.size;
84 int chunk_beg = std::min(n, chunk_len * comm.rank);
85 int chunk_end = std::min(n, chunk_len * (comm.rank + 1));
86 int chunk = chunk_end - chunk_beg;
88 std::vector<int> chunks(comm.size);
89 MPI_Allgather(&chunk, 1, MPI_INT, &chunks[0], 1, MPI_INT, comm);
90 std::vector<int> displ(comm.size, 0);
91 for (
int i = 1; i < comm.size; ++i)
92 displ[i] = displ[i - 1] + chunks[i - 1];
97 std::vector<Rhs> x(chunk);
98 std::vector<Rhs> y(chunk);
100 assemble(n, chunk_beg, chunk_end, ptr, col, val);
102 for (
int i = 0; i < chunk; ++i)
103 x[i] = math::constant<Rhs>(drand48());
108 Matrix A(comm, std::tie(chunk, ptr, col, val), chunk);
110 auto B = Alina::product(A, A);
111 B->move_to_backend();
113 Alina::backend::spmv(1, *B, x, 0, y);
115 std::vector<Rhs> X(n), R(n);
116 MPI_Gatherv(&x[0], chunk, Alina::mpi_datatype<Rhs>(), &X[0], &chunks[0], &displ[0], Alina::mpi_datatype<Rhs>(), 0, comm);
117 MPI_Gatherv(&y[0], chunk, Alina::mpi_datatype<Rhs>(), &R[0], &chunks[0], &displ[0], Alina::mpi_datatype<Rhs>(), 0, comm);
119 if (comm.rank == 0) {
120 std::vector<Rhs> Y(n);
121 assemble(n, 0, n, ptr, col, val);
124 Alina::backend::spmv(1, *product(A, A), X, 0, Y);
127 for (
int i = 0; i < n; ++i) {
128 double d = math::norm(R[i] - Y[i]);
131 std::cout <<
"Error: " << s << std::endl;
135int main(
int argc,
char* argv[])
137 MPI_Init(&argc, &argv);
138 BOOST_SCOPE_EXIT(
void)
145 test<Alina::StaticMatrix<double, 2, 2>>();
Distributed Matrix using message passing.
Matrix class, to be used by user.
Espace de nom pour les fonctions mathématiques.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Sparse matrix stored in CSR (Compressed Sparse Row) format.
Convenience wrapper around MPI_Comm.