Arcane  v4.1.10.0
Documentation développeur
Chargement...
Recherche...
Aucune correspondance
nullspace_mpi.cc
1// -*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
2//-----------------------------------------------------------------------------
3// Copyright 2000-2026 CEA (www.cea.fr) IFPEN (www.ifpenergiesnouvelles.com)
4// See the top-level COPYRIGHT file for details.
5// SPDX-License-Identifier: Apache-2.0
6//-----------------------------------------------------------------------------
7
8/*
9The MIT License
10
11Copyright (c) 2012-2022 Denis Demidov <dennis.demidov@gmail.com>
12
13Permission is hereby granted, free of charge, to any person obtaining a copy
14of this software and associated documentation files (the "Software"), to deal
15in the Software without restriction, including without limitation the rights
16to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
17copies of the Software, and to permit persons to whom the Software is
18furnished to do so, subject to the following conditions:
19
20The above copyright notice and this permission notice shall be included in
21all copies or substantial portions of the Software.
22
23THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
26AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
28OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
29THE SOFTWARE.
30*/
31
32#include <vector>
33#include <iostream>
34
35#include "arccore/alina/BuiltinBackend.h"
36#include "arccore/alina/Adapters.h"
37#include "arccore/alina/Coarsening.h"
38
39#include "arccore/alina/DistributedMatrix.h"
40#include "arccore/alina/DistributedPreconditionedSolver.h"
41#include "arccore/alina/DistributedAMG.h"
42#include "arccore/alina/DistributedCoarsening.h"
43#include "arccore/alina/DistributedRelaxation.h"
44#include "arccore/alina/DistributedSolver.h"
45
46#include "arccore/alina/IO.h"
47#include "arccore/alina/Profiler.h"
48
49#if defined(ARCCORE_ALINA_HAVE_PARMETIS)
50#include "arccore/alina/ParmetisMatrixPartitioner.h"
51#endif
52
53using namespace Arcane;
54using namespace Arcane::Alina;
55
56int main(int argc, char* argv[])
57{
58 // The command line should contain the matrix, the RHS, and the coordinate files:
59 if (argc < 4) {
60 std::cerr << "Usage: " << argv[0] << " <A.bin> <b.bin> <coo.bin>" << std::endl;
61 return 1;
62 }
63
64 Alina::mpi_init mpi(&argc, &argv);
65 Alina::mpi_communicator world(MPI_COMM_WORLD);
66
67 // The profiler:
68 auto& prof = Alina::Profiler::globalProfiler();
69
70 // Read the system matrix, the RHS, and the coordinates:
71 prof.tic("read");
72 // Get the global size of the matrix:
73 ptrdiff_t rows = Alina::IO::crs_size<ptrdiff_t>(argv[1]);
74
75 // Split the matrix into approximately equal chunks of rows, and
76 // make sure each chunk size is divisible by 3.
77 ptrdiff_t chunk = (rows + world.size - 1) / world.size;
78 if (chunk % 3)
79 chunk += 3 - chunk % 3;
80
81 ptrdiff_t row_beg = std::min(rows, chunk * world.rank);
82 ptrdiff_t row_end = std::min(rows, row_beg + chunk);
83 chunk = row_end - row_beg;
84
85 // Read our part of the system matrix, the RHS and the coordinates.
86 std::vector<ptrdiff_t> ptr, col;
87 std::vector<double> val, rhs, coo;
88 Alina::IO::read_crs(argv[1], rows, ptr, col, val, row_beg, row_end);
89
90 ptrdiff_t n, m;
91 Alina::IO::read_dense(argv[2], n, m, rhs, row_beg, row_end);
92 Alina::precondition(n == rows && m == 1, "The RHS file has wrong dimensions");
93
94 Alina::IO::read_dense(argv[3], n, m, coo, row_beg / 3, row_end / 3);
95 Alina::precondition(n * 3 == rows && m == 3, "The coordinate file has wrong dimensions");
96 prof.toc("read");
97
98 if (world.rank == 0) {
99 std::cout
100 << "Matrix " << argv[1] << ": " << rows << "x" << rows << std::endl
101 << "RHS " << argv[2] << ": " << rows << "x1" << std::endl
102 << "Coords " << argv[3] << ": " << rows / 3 << "x3" << std::endl;
103 }
104
105 // Declare the backends and the solver type
106 typedef Alina::BuiltinBackend<double> SBackend; // the solver backend
107 typedef Alina::BuiltinBackend<float> PBackend; // the preconditioner backend
108
110 PBackend,
114
115 // The distributed matrix
116 auto A = std::make_shared<Alina::DistributedMatrix<SBackend>>(
117 world, std::tie(chunk, ptr, col, val));
118
119 // Partition the matrix, the RHS vector, and the coordinates.
120 // If neither ParMETIS not PT-SCOTCH are not available,
121 // just keep the current naive partitioning.
122#if defined(ARCCORE_ALINA_HAVE_PARMETIS)
124
125 if (world.size > 1) {
126 auto t = prof.scoped_tic("partition");
127 Partition part;
128
129 // part(A) returns the distributed permutation matrix.
130 // Keep the DOFs belonging to the same grid nodes together
131 // (use block-wise partitioning with block size 3).
132 auto P = part(*A, 3);
133 auto R = transpose(*P);
134
135 // Reorder the matrix:
136 A = product(*R, *product(*A, *P));
137
138 // Reorder the RHS vector and the coordinates:
139 R->move_to_backend();
140 std::vector<double> new_rhs(R->loc_rows());
141 std::vector<double> new_coo(R->loc_rows());
142 Alina::backend::spmv(1, *R, rhs, 0, new_rhs);
143 Alina::backend::spmv(1, *R, coo, 0, new_coo);
144 rhs.swap(new_rhs);
145 coo.swap(new_coo);
146
147 // Update the number of the local rows
148 // (it may have changed as a result of permutation).
149 chunk = A->loc_rows();
150 }
151#endif
152
153 // Solver parameters:
154 Solver::params prm;
155 prm.solver.maxiter = 500;
156 prm.precond.coarsening.aggr.eps_strong = 0;
157
158 // Convert the coordinates to the rigid body modes.
159 // The function returns the number of near null-space vectors
160 // (3 in 2D case, 6 in 3D case) and writes the vectors to the
161 // std::vector<double> specified as the last argument:
162 prm.precond.coarsening.aggr.nullspace.cols = Alina::rigid_body_modes(3, coo, prm.precond.coarsening.aggr.nullspace.B);
163
164 // Initialize the solver with the system matrix.
165 prof.tic("setup");
166 Solver solve(world, A, prm);
167 prof.toc("setup");
168
169 // Show the mini-report on the constructed solver:
170 if (world.rank == 0)
171 std::cout << solve << std::endl;
172
173 // Solve the system with the zero initial approximation:
174 std::vector<double> x(chunk, 0.0);
175
176 prof.tic("solve");
177 Alina::SolverResult r = solve(*A, rhs, x);
178 prof.toc("solve");
179
180 // Output the number of iterations, the relative error,
181 // and the profiling data:
182 if (world.rank == 0) {
183 std::cout << "Iters: " << r.nbIteration() << std::endl
184 << "Error: " << r.residual() << std::endl
185 << prof << std::endl;
186 }
187}
Iterative solver wrapper for distributed linear systems.
Result of a solving.
Definition AlinaUtils.h:52
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Distributed smoothed aggregation coarsening scheme.
Convenience wrapper around MPI_Comm.
Convenience wrapper around MPI_Init/MPI_Finalize.