15#include "arccore/message_passing_mpi/internal/MpiDynamicMultiMachineMemoryWindowBaseInternal.h"
17#include "arccore/base/FatalErrorException.h"
22namespace Arcane::MessagePassing::Mpi
32, m_win_actual_sizeof()
33, m_win_target_segments()
34, m_comm_machine(comm_machine)
35, m_comm_machine_size(comm_machine_size)
36, m_comm_machine_rank(comm_machine_rank)
37, m_sizeof_type(sizeof_type)
38, m_nb_segments_per_proc(nb_segments_per_proc)
39, m_machine_ranks(machine_ranks)
40, m_add_requests(nb_segments_per_proc)
41, m_resize_requests(nb_segments_per_proc)
43 if (m_sizeof_type <= 0) {
44 ARCCORE_FATAL(
"Invalid sizeof_type");
46 for (
Integer i = 0; i < m_nb_segments_per_proc; ++i) {
47 if (sizeof_segments[i] < 0 || sizeof_segments[i] % m_sizeof_type != 0) {
48 ARCCORE_FATAL(
"Invalid initial sizeof_segment");
51 if (m_nb_segments_per_proc <= 0) {
52 ARCCORE_FATAL(
"Invalid nb_segments_per_proc");
54 m_all_mpi_win.resize(m_comm_machine_size * m_nb_segments_per_proc);
55 m_reserved_part_span.resize(m_nb_segments_per_proc);
57 for (
Integer num_seg = 0; num_seg < m_nb_segments_per_proc; ++num_seg) {
59 m_resize_requests[num_seg] = -1;
62 MPI_Info win_info_true;
63 MPI_Info_create(&win_info_true);
64 MPI_Info_set(win_info_true,
"alloc_shared_noncontig",
"true");
66 MPI_Info win_info_false;
67 MPI_Info_create(&win_info_false);
68 MPI_Info_set(win_info_false,
"alloc_shared_noncontig",
"false");
70 const Int32 pos_my_wins = m_comm_machine_rank * m_nb_segments_per_proc;
74 for (
Integer i = 0; i < m_comm_machine_size; ++i) {
75 for (
Integer j = 0; j < m_nb_segments_per_proc; ++j) {
77 if (m_comm_machine_rank == i) {
78 if (sizeof_segments[j] == 0)
79 size_seg = m_sizeof_type;
81 size_seg = sizeof_segments[j];
83 std::byte* ptr_seg =
nullptr;
84 int error = MPI_Win_allocate_shared(size_seg, m_sizeof_type, win_info_true, m_comm_machine, &ptr_seg, &m_all_mpi_win[j + i * m_nb_segments_per_proc]);
86 if (error != MPI_SUCCESS) {
87 ARCCORE_FATAL(
"Error with MPI_Win_allocate_shared() call");
93 for (
Integer i = 0; i < m_nb_segments_per_proc; ++i) {
96 std::byte* ptr_seg =
nullptr;
97 int error = MPI_Win_shared_query(m_all_mpi_win[i + pos_my_wins], m_comm_machine_rank, &size_seg, &
size_type, &ptr_seg);
99 if (error != MPI_SUCCESS) {
100 ARCCORE_FATAL(
"Error with MPI_Win_shared_query() call");
111 Int64* ptr_seg =
nullptr;
112 Int64* ptr_win =
nullptr;
114 int error = MPI_Win_allocate_shared(
static_cast<Int64>(
sizeof(
Int64)) * m_nb_segments_per_proc,
sizeof(
Int64), win_info_false, m_comm_machine, &ptr_seg, &m_win_need_resize);
116 if (error != MPI_SUCCESS) {
117 ARCCORE_FATAL(
"Error with MPI_Win_allocate_shared() call");
123 int error = MPI_Win_shared_query(m_win_need_resize, 0, &size_seg, &
size_type, &ptr_win);
125 if (error != MPI_SUCCESS) {
126 ARCCORE_FATAL(
"Error with MPI_Win_shared_query() call");
129 m_need_resize =
Span<Int64>{ ptr_win, m_comm_machine_size * m_nb_segments_per_proc };
131 for (
Integer i = 0; i < m_nb_segments_per_proc; ++i) {
132 m_need_resize[i + pos_my_wins] = -1;
135 if (ptr_win + pos_my_wins != ptr_seg) {
136 ARCCORE_FATAL(
"m_win_need_resize is noncontig");
141 Int64* ptr_seg =
nullptr;
142 Int64* ptr_win =
nullptr;
144 int error = MPI_Win_allocate_shared(
static_cast<Int64>(
sizeof(
Int64)) * m_nb_segments_per_proc,
sizeof(
Int64), win_info_false, m_comm_machine, &ptr_seg, &m_win_actual_sizeof);
146 if (error != MPI_SUCCESS) {
147 ARCCORE_FATAL(
"Error with MPI_Win_allocate_shared() call");
153 int error = MPI_Win_shared_query(m_win_actual_sizeof, 0, &size_seg, &
size_type, &ptr_win);
155 if (error != MPI_SUCCESS) {
156 ARCCORE_FATAL(
"Error with MPI_Win_shared_query() call");
159 m_sizeof_used_part =
Span<Int64>{ ptr_win, m_comm_machine_size * m_nb_segments_per_proc };
161 for (
Integer i = 0; i < m_nb_segments_per_proc; ++i) {
162 m_sizeof_used_part[i + pos_my_wins] = sizeof_segments[i];
165 if (ptr_win + pos_my_wins != ptr_seg) {
166 ARCCORE_FATAL(
"m_win_actual_sizeof is noncontig");
171 Int32* ptr_seg =
nullptr;
172 Int32* ptr_win =
nullptr;
174 int error = MPI_Win_allocate_shared(
static_cast<Int64>(
sizeof(
Int32)) * m_nb_segments_per_proc,
sizeof(
Int32), win_info_false, m_comm_machine, &ptr_seg, &m_win_target_segments);
176 if (error != MPI_SUCCESS) {
177 ARCCORE_FATAL(
"Error with MPI_Win_allocate_shared() call");
183 int error = MPI_Win_shared_query(m_win_target_segments, 0, &size_seg, &
size_type, &ptr_win);
185 if (error != MPI_SUCCESS) {
186 ARCCORE_FATAL(
"Error with MPI_Win_shared_query() call");
189 m_target_segments =
Span<Int32>{ ptr_win, m_comm_machine_size * m_nb_segments_per_proc };
191 for (
Integer i = 0; i < m_nb_segments_per_proc; ++i) {
192 m_target_segments[i + pos_my_wins] = -1;
195 if (ptr_win + pos_my_wins != ptr_seg) {
196 ARCCORE_FATAL(
"m_win_owner_segments is noncontig");
200 MPI_Info_free(&win_info_false);
201 MPI_Info_free(&win_info_true);
203 MPI_Barrier(m_comm_machine);
209MpiDynamicMultiMachineMemoryWindowBaseInternal::
210~MpiDynamicMultiMachineMemoryWindowBaseInternal()
212 for (
Integer i = 0; i < m_comm_machine_size * m_nb_segments_per_proc; ++i) {
213 MPI_Win_free(&m_all_mpi_win[i]);
215 MPI_Win_free(&m_win_need_resize);
216 MPI_Win_free(&m_win_actual_sizeof);
217 MPI_Win_free(&m_win_target_segments);
226 return m_sizeof_type;
235 return m_machine_ranks;
244 MPI_Barrier(m_comm_machine);
253 const Int32 segment_infos_pos = num_seg + m_comm_machine_rank * m_nb_segments_per_proc;
254 return m_reserved_part_span[num_seg].subSpan(0, m_sizeof_used_part[segment_infos_pos]);
263 const Int32 segment_infos_pos = num_seg + _worldToMachine(rank) * m_nb_segments_per_proc;
267 std::byte* ptr_seg =
nullptr;
268 int error = MPI_Win_shared_query(m_all_mpi_win[segment_infos_pos], rank, &size_seg, &
size_type, &ptr_seg);
270 if (error != MPI_SUCCESS) {
271 ARCCORE_FATAL(
"Error with MPI_Win_shared_query() call");
274 return Span<std::byte>{ ptr_seg, m_sizeof_used_part[segment_infos_pos] };
283 const Int32 segment_infos_pos = num_seg + m_comm_machine_rank * m_nb_segments_per_proc;
284 return m_reserved_part_span[num_seg].subSpan(0, m_sizeof_used_part[segment_infos_pos]);
293 const Int32 segment_infos_pos = num_seg + _worldToMachine(rank) * m_nb_segments_per_proc;
297 std::byte* ptr_seg =
nullptr;
298 int error = MPI_Win_shared_query(m_all_mpi_win[segment_infos_pos], rank, &size_seg, &
size_type, &ptr_seg);
300 if (error != MPI_SUCCESS) {
301 ARCCORE_FATAL(
"Error with MPI_Win_shared_query() call");
313 if (elem.
size() % m_sizeof_type) {
314 ARCCORE_FATAL(
"Sizeof elem not valid");
316 if (elem.
empty() || elem.
data() ==
nullptr) {
320 const Int32 segment_infos_pos = num_seg + m_comm_machine_rank * m_nb_segments_per_proc;
322 const Int64 actual_sizeof_win = m_sizeof_used_part[segment_infos_pos];
323 const Int64 future_sizeof_win = actual_sizeof_win + elem.
size();
324 const Int64 old_reserved = m_reserved_part_span[num_seg].size();
326 if (future_sizeof_win > old_reserved) {
327 _requestRealloc(segment_infos_pos, future_sizeof_win);
330 _requestRealloc(segment_infos_pos);
333 m_add_requests[num_seg] = elem;
334 m_add_requested =
true;
345 if (!m_add_requested) {
348 m_add_requested =
false;
350 for (
Integer num_seg = 0; num_seg < m_nb_segments_per_proc; ++num_seg) {
351 if (m_add_requests[num_seg].empty() || m_add_requests[num_seg].data() ==
nullptr) {
355 const Int32 segment_infos_pos = num_seg + m_comm_machine_rank * m_nb_segments_per_proc;
357 const Int64 actual_sizeof_win = m_sizeof_used_part[segment_infos_pos];
358 const Int64 future_sizeof_win = actual_sizeof_win + m_add_requests[num_seg].size();
360 if (m_reserved_part_span[num_seg].size() < future_sizeof_win) {
361 ARCCORE_FATAL(
"Bad realloc -- New size : {1} -- Needed size : {2}", m_reserved_part_span[num_seg].size(), future_sizeof_win);
364 for (
Int64 pos_win = actual_sizeof_win, pos_elem = 0; pos_win < future_sizeof_win; ++pos_win, ++pos_elem) {
365 m_reserved_part_span[num_seg][pos_win] = m_add_requests[num_seg][pos_elem];
367 m_sizeof_used_part[segment_infos_pos] = future_sizeof_win;
379 if (elem.
size() % m_sizeof_type) {
380 ARCCORE_FATAL(
"Sizeof elem not valid");
382 if (elem.
empty() || elem.
data() ==
nullptr) {
386 const Int32 machine_rank = _worldToMachine(rank);
387 const Int32 target_segment_infos_pos = num_seg + machine_rank * m_nb_segments_per_proc;
390 const Int32 segment_infos_pos = thread + m_comm_machine_rank * m_nb_segments_per_proc;
391 m_target_segments[segment_infos_pos] = target_segment_infos_pos;
397 std::byte* ptr_seg =
nullptr;
399 int error = MPI_Win_shared_query(m_all_mpi_win[target_segment_infos_pos], machine_rank, &size_seg, &
size_type, &ptr_seg);
401 if (error != MPI_SUCCESS) {
402 ARCCORE_FATAL(
"Error with MPI_Win_shared_query() call");
407 const Int64 actual_sizeof_win = m_sizeof_used_part[target_segment_infos_pos];
408 const Int64 future_sizeof_win = actual_sizeof_win + elem.
size();
409 const Int64 old_reserved = rank_reserved_part_span.
size();
411 if (future_sizeof_win > old_reserved) {
412 _requestRealloc(target_segment_infos_pos, future_sizeof_win);
415 _requestRealloc(target_segment_infos_pos);
418 m_add_requests[thread] = elem;
419 m_add_requested =
true;
428 MPI_Barrier(m_comm_machine);
430 auto is_my_seg_edited = std::make_unique<bool[]>(m_comm_machine_size);
431 for (
Integer num_seg = 0; num_seg < m_nb_segments_per_proc; ++num_seg) {
432 for (
const Int32 rank_asked : m_target_segments) {
433 if (rank_asked == m_comm_machine_rank) {
434 is_my_seg_edited[num_seg] =
true;
440 if (!m_add_requested) {
445 m_add_requested =
false;
446 for (
Integer num_seg = 0; num_seg < m_nb_segments_per_proc; ++num_seg) {
447 const Int32 segment_infos_pos = num_seg + m_comm_machine_rank * m_nb_segments_per_proc;
448 const Int32 seg_needs_to_edit = m_target_segments[segment_infos_pos];
449 if (seg_needs_to_edit == -1)
452 bool is_found =
false;
453 for (
const Int32 rank_asked : m_target_segments) {
454 if (rank_asked == seg_needs_to_edit) {
459 ARCCORE_FATAL(
"Two subdomains ask same rank for addToAnotherSegment()");
467 for (
Integer num_seg = 0; num_seg < m_nb_segments_per_proc; ++num_seg) {
468 if (m_add_requests[num_seg].empty() || m_add_requests[num_seg].data() ==
nullptr) {
472 const Int32 segment_infos_pos = num_seg + m_comm_machine_rank * m_nb_segments_per_proc;
473 const Int32 target_segment_infos_pos = m_target_segments[segment_infos_pos];
474 if (target_segment_infos_pos == -1) {
475 ARCCORE_FATAL(
"Ne devrait pas aller ici");
478 const Int64 actual_sizeof_win = m_sizeof_used_part[target_segment_infos_pos];
479 const Int64 future_sizeof_win = actual_sizeof_win + m_add_requests[num_seg].size();
484 std::byte* ptr_seg =
nullptr;
486 int error = MPI_Win_shared_query(m_all_mpi_win[target_segment_infos_pos], target_segment_infos_pos / m_nb_segments_per_proc, &size_seg, &
size_type, &ptr_seg);
488 if (error != MPI_SUCCESS) {
489 ARCCORE_FATAL(
"Error with MPI_Win_shared_query() call");
494 if (rank_reserved_part_span.
size() < future_sizeof_win) {
495 ARCCORE_FATAL(
"Bad realloc -- New size : {1} -- Needed size : {2}", rank_reserved_part_span.
size(), future_sizeof_win);
498 for (
Int64 pos_win = actual_sizeof_win, pos_elem = 0; pos_win < future_sizeof_win; ++pos_win, ++pos_elem) {
499 rank_reserved_part_span[pos_win] = m_add_requests[num_seg][pos_elem];
501 m_sizeof_used_part[target_segment_infos_pos] = future_sizeof_win;
504 m_target_segments[segment_infos_pos] = -1;
507 MPI_Barrier(m_comm_machine);
509 for (
Integer num_seg = 0; num_seg < m_nb_segments_per_proc; ++num_seg) {
510 if (is_my_seg_edited[num_seg]) {
511 const Int32 segment_infos_pos = num_seg + m_comm_machine_rank * m_nb_segments_per_proc;
514 std::byte* ptr_seg =
nullptr;
516 int error = MPI_Win_shared_query(m_all_mpi_win[segment_infos_pos], m_comm_machine_rank, &size_seg, &
size_type, &ptr_seg);
518 if (error != MPI_SUCCESS) {
519 ARCCORE_FATAL(
"Error with MPI_Win_shared_query() call");
532 if (new_capacity % m_sizeof_type) {
533 ARCCORE_FATAL(
"new_capacity not valid");
536 const Int32 segment_infos_pos = num_seg + m_comm_machine_rank * m_nb_segments_per_proc;
538 if (new_capacity <= m_reserved_part_span[num_seg].size()) {
539 _requestRealloc(segment_infos_pos);
542 _requestRealloc(segment_infos_pos, new_capacity);
560 if (new_size == -1) {
563 if (new_size < 0 || new_size % m_sizeof_type) {
564 ARCCORE_FATAL(
"new_size not valid");
567 const Int32 segment_infos_pos = num_seg + m_comm_machine_rank * m_nb_segments_per_proc;
569 if (new_size > m_reserved_part_span[num_seg].size()) {
570 _requestRealloc(segment_infos_pos, new_size);
573 _requestRealloc(segment_infos_pos);
576 m_resize_requests[num_seg] = new_size;
577 m_resize_requested =
true;
588 if (!m_resize_requested) {
591 m_resize_requested =
false;
593 for (
Integer num_seg = 0; num_seg < m_nb_segments_per_proc; ++num_seg) {
594 if (m_resize_requests[num_seg] == -1) {
598 const Int32 segment_infos_pos = num_seg + m_comm_machine_rank * m_nb_segments_per_proc;
600 if (m_reserved_part_span[num_seg].size() < m_resize_requests[num_seg]) {
601 ARCCORE_FATAL(
"Bad realloc -- New size : {0} -- Needed size : {1}", m_reserved_part_span[num_seg].size(), m_resize_requests[num_seg]);
604 m_sizeof_used_part[segment_infos_pos] = m_resize_requests[num_seg];
605 m_resize_requests[num_seg] = -1;
615 for (
Integer num_seg = 0; num_seg < m_nb_segments_per_proc; ++num_seg) {
616 const Int32 segment_infos_pos = num_seg + m_comm_machine_rank * m_nb_segments_per_proc;
618 if (m_reserved_part_span[num_seg].size() == m_sizeof_used_part[segment_infos_pos]) {
619 _requestRealloc(segment_infos_pos);
622 _requestRealloc(segment_infos_pos, m_sizeof_used_part[segment_infos_pos]);
631void MpiDynamicMultiMachineMemoryWindowBaseInternal::
632_requestRealloc(
Int32 owner_pos_segment,
Int64 new_capacity)
const
634 m_need_resize[owner_pos_segment] = new_capacity;
640void MpiDynamicMultiMachineMemoryWindowBaseInternal::
641_requestRealloc(
Int32 owner_pos_segment)
const
643 m_need_resize[owner_pos_segment] = -1;
649void MpiDynamicMultiMachineMemoryWindowBaseInternal::
654 MPI_Barrier(m_comm_machine);
660 for (
Integer num_seg = 0; num_seg < m_nb_segments_per_proc; ++num_seg) {
661 const Int32 segment_infos_pos = num_seg + m_comm_machine_rank * m_nb_segments_per_proc;
662 m_need_resize[segment_infos_pos] = -1;
670 MPI_Barrier(m_comm_machine);
676void MpiDynamicMultiMachineMemoryWindowBaseInternal::
680 MPI_Info_create(&win_info);
681 MPI_Info_set(win_info,
"alloc_shared_noncontig",
"true");
684 for (
Integer rank = 0; rank < m_comm_machine_size; ++rank) {
685 for (
Integer num_seg = 0; num_seg < m_nb_segments_per_proc; ++num_seg) {
687 const Int32 local_segment_infos_pos = num_seg + rank * m_nb_segments_per_proc;
689 if (m_need_resize[local_segment_infos_pos] == -1)
692 ARCCORE_ASSERT(m_need_resize[local_segment_infos_pos] >= 0, (
"New size must be >= 0"));
693 ARCCORE_ASSERT(m_need_resize[local_segment_infos_pos] % m_sizeof_type == 0, (
"New size must be % sizeof type"));
697 const Int64 size_seg = (m_comm_machine_rank == rank ? (m_need_resize[local_segment_infos_pos] == 0 ? m_sizeof_type : m_need_resize[local_segment_infos_pos]) : 0);
700 MPI_Win old_win = m_all_mpi_win[local_segment_infos_pos];
701 std::byte* ptr_new_seg =
nullptr;
704 int error = MPI_Win_allocate_shared(size_seg, m_sizeof_type, win_info, m_comm_machine, &ptr_new_seg, &m_all_mpi_win[local_segment_infos_pos]);
705 if (error != MPI_SUCCESS) {
706 MPI_Win_free(&old_win);
707 ARCCORE_FATAL(
"Error with MPI_Win_allocate_shared() call");
711 if (m_comm_machine_rank == rank) {
717 std::byte* ptr_old_seg =
nullptr;
718 MPI_Aint mpi_reserved_size_new_seg;
722 MPI_Aint size_old_seg;
726 error = MPI_Win_shared_query(old_win, m_comm_machine_rank, &size_old_seg, &size_type, &ptr_old_seg);
727 if (error != MPI_SUCCESS || ptr_old_seg ==
nullptr) {
728 MPI_Win_free(&old_win);
729 ARCCORE_FATAL(
"Error with MPI_Win_shared_query() call");
735 std::byte* ptr_seg =
nullptr;
739 error = MPI_Win_shared_query(m_all_mpi_win[local_segment_infos_pos], m_comm_machine_rank, &mpi_reserved_size_new_seg, &size_type, &ptr_seg);
740 if (error != MPI_SUCCESS || ptr_seg ==
nullptr || ptr_seg != ptr_new_seg) {
741 MPI_Win_free(&old_win);
742 ARCCORE_FATAL(
"Error with MPI_Win_shared_query() call");
749 const Int64 min_size = std::min(m_need_resize[local_segment_infos_pos], m_sizeof_used_part[local_segment_infos_pos]);
751 memcpy(ptr_new_seg, ptr_old_seg, min_size);
754 MPI_Win_free(&old_win);
757 MPI_Info_free(&win_info);
760 for (
Integer num_seg = 0; num_seg < m_nb_segments_per_proc; ++num_seg) {
761 const Int32 segment_infos_pos = num_seg + m_comm_machine_rank * m_nb_segments_per_proc;
765 std::byte* ptr_seg =
nullptr;
766 int error = MPI_Win_shared_query(m_all_mpi_win[segment_infos_pos], m_comm_machine_rank, &size_seg, &size_type, &ptr_seg);
768 if (error != MPI_SUCCESS) {
769 ARCCORE_FATAL(
"Error with MPI_Win_shared_query() call");
772 m_reserved_part_span[num_seg] = Span<std::byte>{ ptr_seg, size_seg };
779Int32 MpiDynamicMultiMachineMemoryWindowBaseInternal::
780_worldToMachine(
Int32 world)
const
782 for (
Int32 i = 0; i < m_comm_machine_size; ++i) {
783 if (m_machine_ranks[i] == world) {
787 ARCCORE_FATAL(
"Rank is not in machine");
793Int32 MpiDynamicMultiMachineMemoryWindowBaseInternal::
794_machineToWorld(
Int32 machine)
const
796 return m_machine_ranks[machine];
Vue constante d'un tableau de type T.
void executeResize()
Méthode permettant d'exécuter les requêtes de redimensionnement.
Span< const std::byte > segmentConstView(Int32 num_seg) const
Méthode permettant d'obtenir une vue sur l'un de nos segments.
Int32 sizeofOneElem() const
Méthode permettant d'obtenir la taille d'un élement de la fenêtre.
void requestReserve(Int32 num_seg, Int64 new_capacity)
Méthode permettant de demander la réservation d'espace mémoire pour un de nos segments.
ConstArrayView< Int32 > machineRanks() const
Méthode permettant d'obtenir les rangs qui possèdent un segment dans la fenêtre.
void requestResize(Int32 num_seg, Int64 new_size)
Méthode permettant de demander le redimensionnement d'un de nos segments.
void requestAdd(Int32 num_seg, Span< const std::byte > elem)
Méthode permettant de demander l'ajout d'éléments dans l'un de nos segments.
void executeShrink()
Méthode permettant de réduire l'espace mémoire réservé pour les segments au minimum nécessaire.
void executeAdd()
Méthode permettant d'exécuter les requêtes d'ajout.
void executeAddToAnotherSegment()
Méthode permettant d'exécuter les requêtes d'ajout dans les segments d'autres processus.
void requestAddToAnotherSegment(Int32 thread, Int32 rank, Int32 num_seg, Span< const std::byte > elem)
Méthode permettant de demander l'ajout d'éléments dans un des segments de la fenêtre.
MpiDynamicMultiMachineMemoryWindowBaseInternal(SmallSpan< Int64 > sizeof_segments, Int32 nb_segments_per_proc, Int32 sizeof_type, const MPI_Comm &comm_machine, Int32 comm_machine_rank, Int32 comm_machine_size, ConstArrayView< Int32 > machine_ranks)
Le sizeof_segments ne doit pas être conservé !
Span< std::byte > segmentView(Int32 num_seg)
Méthode permettant d'obtenir une vue sur l'un de nos segments.
void barrier() const
Méthode permettant d'attendre que tous les processus du noeud appellent cette méthode pour continuer ...
void executeReserve()
Méthode permettant d'exécuter les requêtes de réservation.
Vue d'un tableau d'éléments de type T.
constexpr __host__ __device__ bool empty() const noexcept
Retourne true si le tableau est vide (dimension nulle)
constexpr __host__ __device__ SizeType size() const noexcept
Retourne la taille du tableau.
constexpr __host__ __device__ pointer data() const noexcept
Pointeur sur le début de la vue.
Vue d'un tableau d'éléments de type T.
std::int64_t Int64
Type entier signé sur 64 bits.
Int32 Integer
Type représentant un entier.
std::int32_t Int32
Type entier signé sur 32 bits.