22#include <boost/scope_exit.hpp>
23#include <boost/program_options.hpp>
25#include "arccore/alina/BuiltinBackend.h"
26#include "arccore/alina/Adapters.h"
27#include "arccore/alina/DistributedMatrix.h"
28#include "arccore/alina/Profiler.h"
30#include "DomainPartition.h"
36 const DomainPartition<3>& part;
37 const std::vector<ptrdiff_t>& dom;
39 renumbering(
const DomainPartition<3>& p,
40 const std::vector<ptrdiff_t>& d)
45 ptrdiff_t operator()(ptrdiff_t i, ptrdiff_t j, ptrdiff_t k)
const
47 boost::array<ptrdiff_t, 3> p = { { i, j, k } };
48 std::pair<int, ptrdiff_t> v = part.index(p);
49 return dom[v.first] + v.second;
53int main(
int argc,
char* argv[])
55 auto& prof = Alina::Profiler::globalProfiler();
56 MPI_Init(&argc, &argv);
57 BOOST_SCOPE_EXIT(
void)
66 std::cout <<
"World size: " << world.size << std::endl;
71 namespace po = boost::program_options;
72 po::options_description desc(
"Options");
74 desc.add_options()(
"help,h",
"show help")(
76 po::value<ptrdiff_t>(&n)->default_value(n),
80 po::store(po::parse_command_line(argc, argv, desc), vm);
83 if (vm.count(
"help")) {
84 std::cout << desc << std::endl;
88 boost::array<ptrdiff_t, 3> lo = { { 0, 0, 0 } };
89 boost::array<ptrdiff_t, 3> hi = { { n - 1, n - 1, n - 1 } };
91 prof.tic(
"partition");
92 DomainPartition<3> part(lo, hi, world.size);
93 ptrdiff_t chunk = part.size(world.rank);
95 std::vector<ptrdiff_t> domain = world.exclusive_sum(chunk);
97 lo = part.domain(world.rank).min_corner();
98 hi = part.domain(world.rank).max_corner();
101 prof.toc(
"partition");
103 prof.tic(
"assemble");
104 std::vector<ptrdiff_t> ptr;
105 std::vector<ptrdiff_t> col;
106 std::vector<double> val;
107 std::vector<double> rhs;
109 ptr.reserve(chunk + 1);
110 col.reserve(chunk * 7);
111 val.reserve(chunk * 7);
115 const double h2i = (n - 1) * (n - 1);
117 for (ptrdiff_t k = lo[2]; k <= hi[2]; ++k) {
118 for (ptrdiff_t j = lo[1]; j <= hi[1]; ++j) {
119 for (ptrdiff_t i = lo[0]; i <= hi[0]; ++i) {
121 col.push_back(renum(i, j, k - 1));
126 col.push_back(renum(i, j - 1, k));
131 col.push_back(renum(i - 1, j, k));
135 col.push_back(renum(i, j, k));
136 val.push_back(6 * h2i);
139 col.push_back(renum(i + 1, j, k));
144 col.push_back(renum(i, j + 1, k));
149 col.push_back(renum(i, j, k + 1));
153 ptr.push_back(col.size());
157 prof.toc(
"assemble");
162 prof.tic(
"create distributed version");
163 Matrix A(world, std::tie(chunk, ptr, col, val), chunk);
164 prof.toc(
"create distributed version");
166 prof.tic(
"distributed product");
167 auto B = product(A, A);
168 prof.toc(
"distributed product");
170 if (world.rank == 0) {
171 if (world.size == 1) {
173 matrix A(std::tie(chunk, ptr, col, val));
174 prof.tic(
"openmp product");
175 auto B = product(A, A);
176 prof.toc(
"openmp product");
179 std::cout << prof << std::endl;
Distributed Matrix using message passing.
Matrix class, to be used by user.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Sparse matrix stored in CSR (Compressed Sparse Row) format.
Convenience wrapper around MPI_Comm.