8#include <gtest/gtest.h>
10#include "arccore/alina/BuiltinBackend.h"
11#include "arccore/alina/StaticMatrix.h"
12#include "arccore/alina/Adapters.h"
13#include "arccore/alina/MessagePassingUtils.h"
14#include "arccore/alina/DistributedPreconditionedSolver.h"
15#include "arccore/alina/DistributedPreconditioner.h"
16#include "arccore/alina/DistributedSolverRuntime.h"
18#include "TestMainMpi.h"
22namespace math = Alina::math;
27 ptrdiff_t n,
int block_size,
28 std::vector<ptrdiff_t>& ptr,
29 std::vector<ptrdiff_t>& col,
30 std::vector<double>& val,
31 std::vector<double>& rhs)
33 ptrdiff_t n3 = n * n * n;
35 ptrdiff_t chunk = (n3 + comm.size - 1) / comm.size;
36 if (chunk % block_size != 0) {
37 chunk += block_size - chunk % block_size;
39 ptrdiff_t row_beg = std::min(n3, chunk * comm.rank);
40 ptrdiff_t row_end = std::min(n3, row_beg + chunk);
41 chunk = row_end - row_beg;
44 ptr.reserve(chunk + 1);
46 col.reserve(chunk * 7);
48 val.reserve(chunk * 7);
51 std::fill(rhs.begin(), rhs.end(), 1.0);
53 const double h2i = (n - 1) * (n - 1);
56 for (ptrdiff_t idx = row_beg; idx < row_end; ++idx) {
57 ptrdiff_t k = idx / (n * n);
58 ptrdiff_t j = (idx / n) % n;
59 ptrdiff_t i = idx % n;
62 col.push_back(idx - n * n);
67 col.push_back(idx - n);
72 col.push_back(idx - 1);
77 val.push_back(6 * h2i);
80 col.push_back(idx + 1);
85 col.push_back(idx + n);
90 col.push_back(idx + n * n);
94 ptr.push_back(col.size());
104 const std::vector<ptrdiff_t>& ptr,
105 const std::vector<ptrdiff_t>& col,
106 const std::vector<double>& val,
108 const std::vector<double>& f)
110 auto& prof = Alina::Profiler::globalProfiler();
113 using BackendValueType = double;
116 std::cout <<
"Using scalar solve ptr_size=" <<
sizeof(ptrdiff_t)
117 <<
" ptr_type_size=" <<
sizeof(Backend::ptr_type)
118 <<
" col_type_size=" <<
sizeof(Backend::col_type)
119 <<
" value_type_size=" <<
sizeof(Backend::value_type)
140 std::shared_ptr<DMatrix> A;
141 std::shared_ptr<Solver> solve;
144 auto t = prof.scoped_tic(
"setup");
145 A = std::make_shared<DMatrix>(comm, std::tie(chunk, ptr, col, val));
146 solve = std::make_shared<Solver>(comm, A, prm, bprm);
148 solve->prm.get(prm2);
149 std::cout <<
"SOLVER parameters=" << prm2 <<
"\n";
152 if (comm.rank == 0) {
153 std::cout <<
"SolverInfo:\n";
154 std::cout << *solve << std::endl;
163 if (comm.rank == 0) {
164 std::cout <<
"Iterations: " << r.nbIteration() << std::endl
165 <<
"Error: " << r.residual() << std::endl
166 << prof << std::endl;
173TEST(alina_test_mpi, BasicSolver)
177 std::cout <<
"World size: " << comm.size <<
"\n";
182 std::vector<ptrdiff_t> ptr;
183 std::vector<ptrdiff_t> col;
184 std::vector<double> val;
185 std::vector<double> rhs;
187 Int64 matrix_size = 32;
188 std::cout <<
"Matrix size=" << matrix_size <<
"\n";
189 n = assemble_poisson3d(comm, matrix_size, 1, ptr, col, val, rhs);
191 solve_scalar(comm, n, ptr, col, val, prm, rhs);
Runtime wrapper for distributed direct solvers.
Distributed Matrix using message passing.
Iterative solver wrapper for distributed linear systems.
NUMA-aware vector container.
Espace de nom pour les fonctions mathématiques.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Alina::detail::empty_params params
Distributed smoothed aggregation coarsening scheme.
Runtime-configurable wrapper around matrix partitioner.
Convenience wrapper around MPI_Comm.