Arcane  v3.15.0.0
Documentation développeur
Chargement...
Recherche...
Aucune correspondance
CudaAcceleratorRuntime.cc
1// -*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
2//-----------------------------------------------------------------------------
3// Copyright 2000-2025 CEA (www.cea.fr) IFPEN (www.ifpenergiesnouvelles.com)
4// See the top-level COPYRIGHT file for details.
5// SPDX-License-Identifier: Apache-2.0
6//-----------------------------------------------------------------------------
7/*---------------------------------------------------------------------------*/
8/* CudaAcceleratorRuntime.cc (C) 2000-2025 */
9/* */
10/* Runtime pour 'Cuda'. */
11/*---------------------------------------------------------------------------*/
12/*---------------------------------------------------------------------------*/
13
14#include "arcane/accelerator/cuda/CudaAccelerator.h"
15
16#include "arcane/utils/PlatformUtils.h"
17#include "arcane/utils/Array.h"
18#include "arcane/utils/TraceInfo.h"
19#include "arcane/utils/NotSupportedException.h"
20#include "arcane/utils/FatalErrorException.h"
21#include "arcane/utils/NotImplementedException.h"
22#include "arcane/utils/IMemoryRessourceMng.h"
23#include "arcane/utils/MemoryView.h"
24#include "arcane/utils/OStringStream.h"
25#include "arcane/utils/ValueConvert.h"
26#include "arcane/utils/internal/IMemoryRessourceMngInternal.h"
27
28#include "arcane/accelerator/core/RunQueueBuildInfo.h"
29#include "arcane/accelerator/core/Memory.h"
30#include "arcane/accelerator/core/DeviceInfoList.h"
31
32#include "arcane/accelerator/core/internal/IRunnerRuntime.h"
33#include "arcane/accelerator/core/internal/RegisterRuntimeInfo.h"
34#include "arcane/accelerator/core/internal/RunCommandImpl.h"
35#include "arcane/accelerator/core/internal/IRunQueueStream.h"
36#include "arcane/accelerator/core/internal/IRunQueueEventImpl.h"
37#include "arcane/accelerator/core/PointerAttribute.h"
38#include "arcane/accelerator/core/RunQueue.h"
39#include "arcane/accelerator/core/DeviceMemoryInfo.h"
40#include "arcane/accelerator/core/NativeStream.h"
41
42#include "arcane/accelerator/cuda/runtime/internal/Cupti.h"
43
44#include <iostream>
45
46#include <cuda.h>
47
48#ifdef ARCANE_HAS_CUDA_NVTOOLSEXT
49#include <nvtx3/nvToolsExt.h>
50#endif
51
52using namespace Arccore;
53
54namespace Arcane::Accelerator::Cuda
55{
56namespace
57{
58 Int32 global_cupti_flush = 0;
59 CuptiInfo global_cupti_info;
60} // namespace
61
62/*---------------------------------------------------------------------------*/
63/*---------------------------------------------------------------------------*/
64
65void arcaneCheckCudaErrors(const TraceInfo& ti, CUresult e)
66{
67 if (e == CUDA_SUCCESS)
68 return;
69 const char* error_name = nullptr;
70 CUresult e2 = cuGetErrorName(e, &error_name);
71 if (e2 != CUDA_SUCCESS)
72 error_name = "Unknown";
73
74 const char* error_message = nullptr;
75 CUresult e3 = cuGetErrorString(e, &error_message);
76 if (e3 != CUDA_SUCCESS)
77 error_message = "Unknown";
78
79 ARCANE_FATAL("CUDA Error trace={0} e={1} name={2} message={3}",
80 ti, e, error_name, error_message);
81}
82
83/*---------------------------------------------------------------------------*/
84/*---------------------------------------------------------------------------*/
85
88{
89 public:
90
92 : m_runtime(runtime)
93 {
94 if (bi.isDefault())
95 ARCANE_CHECK_CUDA(cudaStreamCreate(&m_cuda_stream));
96 else {
97 int priority = bi.priority();
98 ARCANE_CHECK_CUDA(cudaStreamCreateWithPriority(&m_cuda_stream, cudaStreamDefault, priority));
99 }
100 }
101 ~CudaRunQueueStream() override
102 {
103 ARCANE_CHECK_CUDA_NOTHROW(cudaStreamDestroy(m_cuda_stream));
104 }
105
106 public:
107
109 {
110#ifdef ARCANE_HAS_CUDA_NVTOOLSEXT
111 auto kname = c.kernelName();
112 if (kname.empty())
113 nvtxRangePush(c.traceInfo().name());
114 else
115 nvtxRangePush(kname.localstr());
116#endif
117 return m_runtime->notifyBeginLaunchKernel();
118 }
120 {
121#ifdef ARCANE_HAS_CUDA_NVTOOLSEXT
122 nvtxRangePop();
123#endif
124 return m_runtime->notifyEndLaunchKernel();
125 }
126 void barrier() override
127 {
128 ARCANE_CHECK_CUDA(cudaStreamSynchronize(m_cuda_stream));
129 if (global_cupti_flush > 0)
130 global_cupti_info.flush();
131 }
132 bool _barrierNoException() override
133 {
134 return (cudaStreamSynchronize(m_cuda_stream) != cudaSuccess);
135 }
136 void copyMemory(const MemoryCopyArgs& args) override
137 {
138 auto source_bytes = args.source().bytes();
139 auto r = cudaMemcpyAsync(args.destination().data(), source_bytes.data(),
140 source_bytes.size(), cudaMemcpyDefault, m_cuda_stream);
141 ARCANE_CHECK_CUDA(r);
142 if (!args.isAsync())
143 barrier();
144 }
145 void prefetchMemory(const MemoryPrefetchArgs& args) override
146 {
147 auto src = args.source().bytes();
148 if (src.size() == 0)
149 return;
150 DeviceId d = args.deviceId();
151 int device = cudaCpuDeviceId;
152 if (!d.isHost())
153 device = d.asInt32();
154 //std::cout << "PREFETCH device=" << device << " host(id)=" << cudaCpuDeviceId
155 // << " size=" << args.source().size() << " data=" << src.data() << "\n";
156 auto r = cudaMemPrefetchAsync(src.data(), src.size(), device, m_cuda_stream);
157 ARCANE_CHECK_CUDA(r);
158 if (!args.isAsync())
159 barrier();
160 }
162 {
163 return impl::NativeStream(&m_cuda_stream);
164 }
165
166 public:
167
168 cudaStream_t trueStream() const
169 {
170 return m_cuda_stream;
171 }
172
173 private:
174
175 impl::IRunnerRuntime* m_runtime;
176 cudaStream_t m_cuda_stream;
177};
178
179/*---------------------------------------------------------------------------*/
180/*---------------------------------------------------------------------------*/
181
184{
185 public:
186
187 explicit CudaRunQueueEvent(bool has_timer)
188 {
189 if (has_timer)
190 ARCANE_CHECK_CUDA(cudaEventCreate(&m_cuda_event));
191 else
192 ARCANE_CHECK_CUDA(cudaEventCreateWithFlags(&m_cuda_event, cudaEventDisableTiming));
193 }
194 ~CudaRunQueueEvent() override
195 {
196 ARCANE_CHECK_CUDA_NOTHROW(cudaEventDestroy(m_cuda_event));
197 }
198
199 public:
200
201 // Enregistre l'événement au sein d'une RunQueue
202 void recordQueue(impl::IRunQueueStream* stream) final
203 {
204 auto* rq = static_cast<CudaRunQueueStream*>(stream);
205 ARCANE_CHECK_CUDA(cudaEventRecord(m_cuda_event, rq->trueStream()));
206 }
207
208 void wait() final
209 {
210 ARCANE_CHECK_CUDA(cudaEventSynchronize(m_cuda_event));
211 }
212
213 void waitForEvent(impl::IRunQueueStream* stream) final
214 {
215 auto* rq = static_cast<CudaRunQueueStream*>(stream);
216 ARCANE_CHECK_CUDA(cudaStreamWaitEvent(rq->trueStream(), m_cuda_event, cudaEventWaitDefault));
217 }
218
219 Int64 elapsedTime(IRunQueueEventImpl* start_event) final
220 {
221 // NOTE: Les évènements doivent avoir été créé avec le timer actif
223 auto* true_start_event = static_cast<CudaRunQueueEvent*>(start_event);
224 float time_in_ms = 0.0;
225
226 // TODO: regarder si nécessaire
227 // ARCANE_CHECK_CUDA(cudaEventSynchronize(m_cuda_event));
228
229 ARCANE_CHECK_CUDA(cudaEventElapsedTime(&time_in_ms, true_start_event->m_cuda_event, m_cuda_event));
230 double x = time_in_ms * 1.0e6;
231 Int64 nano_time = static_cast<Int64>(x);
232 return nano_time;
233 }
234
235 private:
236
237 cudaEvent_t m_cuda_event;
238};
239
240/*---------------------------------------------------------------------------*/
241/*---------------------------------------------------------------------------*/
242
245{
246 public:
247
248 ~CudaRunnerRuntime() override = default;
249
250 public:
251
252 void notifyBeginLaunchKernel() override
253 {
254 ++m_nb_kernel_launched;
255 if (m_is_verbose)
256 std::cout << "BEGIN CUDA KERNEL!\n";
257 }
258 void notifyEndLaunchKernel() override
259 {
260 ARCANE_CHECK_CUDA(cudaGetLastError());
261 if (m_is_verbose)
262 std::cout << "END CUDA KERNEL!\n";
263 }
264 void barrier() override
265 {
266 ARCANE_CHECK_CUDA(cudaDeviceSynchronize());
267 }
268 eExecutionPolicy executionPolicy() const override
269 {
271 }
272 impl::IRunQueueStream* createStream(const RunQueueBuildInfo& bi) override
273 {
274 return new CudaRunQueueStream(this, bi);
275 }
276 impl::IRunQueueEventImpl* createEventImpl() override
277 {
278 return new CudaRunQueueEvent(false);
279 }
280 impl::IRunQueueEventImpl* createEventImplWithTimer() override
281 {
282 return new CudaRunQueueEvent(true);
283 }
284 void setMemoryAdvice(ConstMemoryView buffer, eMemoryAdvice advice, DeviceId device_id) override
285 {
286 auto v = buffer.bytes();
287 const void* ptr = v.data();
288 size_t count = v.size();
289 int device = device_id.asInt32();
291
300 device = cudaCpuDeviceId;
301 }
304 device = cudaCpuDeviceId;
305 }
306 else
307 return;
308 //std::cout << "MEMADVISE p=" << ptr << " size=" << count << " advise = " << cuda_advise << " id = " << device << "\n";
309 ARCANE_CHECK_CUDA(cudaMemAdvise(ptr, count, cuda_advise, device));
310 }
311 void unsetMemoryAdvice(ConstMemoryView buffer, eMemoryAdvice advice, DeviceId device_id) override
312 {
313 auto v = buffer.bytes();
314 const void* ptr = v.data();
315 size_t count = v.size();
316 int device = device_id.asInt32();
318
327 device = cudaCpuDeviceId;
328 }
331 device = cudaCpuDeviceId;
332 }
333 else
334 return;
335 ARCANE_CHECK_CUDA(cudaMemAdvise(ptr, count, cuda_advise, device));
336 }
337
338 void setCurrentDevice(DeviceId device_id) final
339 {
340 Int32 id = device_id.asInt32();
341 if (!device_id.isAccelerator())
342 ARCANE_FATAL("Device {0} is not an accelerator device", id);
343 ARCANE_CHECK_CUDA(cudaSetDevice(id));
344 }
345
346 const IDeviceInfoList* deviceInfoList() final { return &m_device_info_list; }
347
348 void startProfiling() override
349 {
350 global_cupti_info.start();
351 }
352
353 void stopProfiling() override
354 {
355 global_cupti_info.stop();
356 }
357
358 bool isProfilingActive() override
359 {
360 return global_cupti_info.isActive();
361 }
362
363 void getPointerAttribute(PointerAttribute& attribute, const void* ptr) override
364 {
366 ARCANE_CHECK_CUDA(cudaPointerGetAttributes(&ca, ptr));
367 // NOTE: le type Arcane 'ePointerMemoryType' a normalememt les mêmes valeurs
368 // que le type CUDA correspondant donc on peut faire un cast simple.
369 auto mem_type = static_cast<ePointerMemoryType>(ca.type);
370 _fillPointerAttribute(attribute, mem_type, ca.device,
371 ptr, ca.devicePointer, ca.hostPointer);
372 }
373
374 DeviceMemoryInfo getDeviceMemoryInfo(DeviceId device_id) override
375 {
376 int d = 0;
377 int wanted_d = device_id.asInt32();
378 ARCANE_CHECK_CUDA(cudaGetDevice(&d));
379 if (d != wanted_d)
380 ARCANE_CHECK_CUDA(cudaSetDevice(wanted_d));
381 size_t free_mem = 0;
382 size_t total_mem = 0;
383 ARCANE_CHECK_CUDA(cudaMemGetInfo(&free_mem, &total_mem));
384 if (d != wanted_d)
385 ARCANE_CHECK_CUDA(cudaSetDevice(d));
387 dmi.setFreeMemory(free_mem);
388 dmi.setTotalMemory(total_mem);
389 return dmi;
390 }
391
392 void pushProfilerRange(const String& name, Int32 color_rgb) override
393 {
394#ifdef ARCANE_HAS_CUDA_NVTOOLSEXT
395 if (color_rgb >= 0) {
396 // NOTE: Il faudrait faire: nvtxEventAttributes_t eventAttrib = { 0 };
397 // mais cela provoque pleins d'avertissement de type 'missing initializer for member'
399 std::memset(&eventAttrib, 0, sizeof(nvtxEventAttributes_t));
400 eventAttrib.version = NVTX_VERSION;
402 eventAttrib.colorType = NVTX_COLOR_ARGB;
403 eventAttrib.color = color_rgb;
405 eventAttrib.message.ascii = name.localstr();
407 }
408 else
409 nvtxRangePush(name.localstr());
410#endif
411 }
412 void popProfilerRange() override
413 {
414#ifdef ARCANE_HAS_CUDA_NVTOOLSEXT
415 nvtxRangePop();
416#endif
417 }
418
419 void finalize(ITraceMng* tm) override
420 {
421 finalizeCudaMemoryAllocators(tm);
422 }
423
424 public:
425
426 void fillDevices(bool is_verbose);
427
428 private:
429
430 Int64 m_nb_kernel_launched = 0;
431 bool m_is_verbose = false;
432 impl::DeviceInfoList m_device_info_list;
433};
434
435/*---------------------------------------------------------------------------*/
436/*---------------------------------------------------------------------------*/
437
438void CudaRunnerRuntime::
439fillDevices(bool is_verbose)
440{
441 int nb_device = 0;
442 ARCANE_CHECK_CUDA(cudaGetDeviceCount(&nb_device));
443 std::ostream& omain = std::cout;
444 if (is_verbose)
445 omain << "ArcaneCUDA: Initialize Arcane CUDA runtime nb_available_device=" << nb_device << "\n";
446 for (int i = 0; i < nb_device; ++i) {
450 std::ostream& o = ostr.stream();
451 o << "Device " << i << " name=" << dp.name << "\n";
452 o << " computeCapability = " << dp.major << "." << dp.minor << "\n";
453 o << " totalGlobalMem = " << dp.totalGlobalMem << "\n";
454 o << " sharedMemPerBlock = " << dp.sharedMemPerBlock << "\n";
455 o << " regsPerBlock = " << dp.regsPerBlock << "\n";
456 o << " warpSize = " << dp.warpSize << "\n";
457 o << " memPitch = " << dp.memPitch << "\n";
458 o << " maxThreadsPerBlock = " << dp.maxThreadsPerBlock << "\n";
459 o << " totalConstMem = " << dp.totalConstMem << "\n";
460 o << " clockRate = " << dp.clockRate << "\n";
461 o << " deviceOverlap = " << dp.deviceOverlap << "\n";
462 o << " cooperativeLaunch = " << dp.cooperativeLaunch << "\n";
463 o << " multiProcessorCount = " << dp.multiProcessorCount << "\n";
464 o << " kernelExecTimeoutEnabled = " << dp.kernelExecTimeoutEnabled << "\n";
465 o << " integrated = " << dp.integrated << "\n";
466 o << " canMapHostMemory = " << dp.canMapHostMemory << "\n";
467 o << " computeMode = " << dp.computeMode << "\n";
468 o << " directManagedMemAccessFromHost = " << dp.directManagedMemAccessFromHost << "\n";
469 o << " hostNativeAtomicSupported = " << dp.hostNativeAtomicSupported << "\n";
470 o << " pageableMemoryAccess = " << dp.pageableMemoryAccess << "\n";
471 o << " concurrentManagedAccess = " << dp.concurrentManagedAccess << "\n";
472 o << " pageableMemoryAccessUsesHostPageTables = " << dp.pageableMemoryAccessUsesHostPageTables << "\n";
473 o << " hostNativeAtomicSupported = " << dp.hostNativeAtomicSupported << "\n";
474 o << " maxThreadsDim = " << dp.maxThreadsDim[0] << " " << dp.maxThreadsDim[1]
475 << " " << dp.maxThreadsDim[2] << "\n";
476 o << " maxGridSize = " << dp.maxGridSize[0] << " " << dp.maxGridSize[1]
477 << " " << dp.maxGridSize[2] << "\n";
478 {
479 int least_val = 0;
480 int greatest_val = 0;
482 o << " leastPriority = " << least_val << " greatestPriority = " << greatest_val << "\n";
483 }
484 {
485 CUdevice device;
486 ARCANE_CHECK_CUDA(cuDeviceGet(&device, i));
488 ARCANE_CHECK_CUDA(cuDeviceGetUuid(&device_uuid, device));
489 o << " deviceUuid=";
490 impl::printUUID(o, device_uuid.bytes);
491 o << "\n";
492 }
493 String description(ostr.str());
494 if (is_verbose)
495 omain << description;
496
498 device_info.setDescription(description);
499 device_info.setDeviceId(DeviceId(i));
500 device_info.setName(dp.name);
501 m_device_info_list.addDevice(device_info);
502 }
503
504 Int32 global_cupti_level = 0;
505
506 // Regarde si on active Cupti
507 if (auto v = Convert::Type<Int32>::tryParseFromEnvironment("ARCANE_CUPTI_LEVEL", true))
508 global_cupti_level = v.value();
509 if (auto v = Convert::Type<Int32>::tryParseFromEnvironment("ARCANE_CUPTI_FLUSH", true))
510 global_cupti_flush = v.value();
511 bool do_print_cupti = true;
512 if (auto v = Convert::Type<Int32>::tryParseFromEnvironment("ARCANE_CUPTI_PRINT", true))
513 do_print_cupti = (v.value() != 0);
514
515 if (global_cupti_level > 0) {
516#ifndef ARCANE_HAS_CUDA_CUPTI
517 ARCANE_FATAL("Trying to enable CUPTI but Arcane is not compiled with cupti support");
518#endif
519 global_cupti_info.init(global_cupti_level, do_print_cupti);
520 global_cupti_info.start();
521 }
522}
523
524/*---------------------------------------------------------------------------*/
525/*---------------------------------------------------------------------------*/
526
528: public IMemoryCopier
529{
530 void copy(ConstMemoryView from, [[maybe_unused]] eMemoryRessource from_mem,
532 const RunQueue* queue) override
533 {
534 if (queue) {
535 queue->copyMemory(MemoryCopyArgs(to.bytes(), from.bytes()).addAsync(queue->isAsync()));
536 return;
537 }
538 // 'cudaMemcpyDefault' sait automatiquement ce qu'il faut faire en tenant
539 // uniquement compte de la valeur des pointeurs. Il faudrait voir si
540 // utiliser \a from_mem et \a to_mem peut améliorer les performances.
541 ARCANE_CHECK_CUDA(cudaMemcpy(to.data(), from.data(), from.bytes().size(), cudaMemcpyDefault));
542 }
543};
544
545/*---------------------------------------------------------------------------*/
546/*---------------------------------------------------------------------------*/
547
548} // End namespace Arcane::Accelerator::Cuda
549
550namespace
551{
554} // namespace
555
556/*---------------------------------------------------------------------------*/
557/*---------------------------------------------------------------------------*/
558
559// Cette fonction est le point d'entrée utilisé lors du chargement
560// dynamique de cette bibliothèque
561extern "C" ARCANE_EXPORT void
562arcaneRegisterAcceleratorRuntimecuda(Arcane::Accelerator::RegisterRuntimeInfo& init_info)
563{
564 using namespace Arcane;
565 using namespace Arcane::Accelerator::Cuda;
566 Arcane::Accelerator::impl::setUsingCUDARuntime(true);
567 Arcane::Accelerator::impl::setCUDARunQueueRuntime(&global_cuda_runtime);
568 initializeCudaMemoryAllocators();
569 Arcane::platform::setAcceleratorHostMemoryAllocator(getCudaMemoryAllocator());
570 IMemoryRessourceMngInternal* mrm = platform::getDataMemoryRessourceMng()->_internal();
571 mrm->setIsAccelerator(true);
572 mrm->setAllocator(eMemoryRessource::UnifiedMemory, getCudaUnifiedMemoryAllocator());
573 mrm->setAllocator(eMemoryRessource::HostPinned, getCudaHostPinnedMemoryAllocator());
574 mrm->setAllocator(eMemoryRessource::Device, getCudaDeviceMemoryAllocator());
575 mrm->setCopier(&global_cuda_memory_copier);
576 global_cuda_runtime.fillDevices(init_info.isVerbose());
577}
578
579/*---------------------------------------------------------------------------*/
580/*---------------------------------------------------------------------------*/
#define ARCANE_CHECK_POINTER(ptr)
Macro retournant le pointeur ptr s'il est non nul ou lancant une exception s'il est nul.
#define ARCANE_FATAL(...)
Macro envoyant une exception FatalErrorException.
void copy(ConstMemoryView from, eMemoryRessource from_mem, MutableMemoryView to, eMemoryRessource to_mem, const RunQueue *queue) override
Copie les données de from vers to avec la queue queue.
void notifyBeginLaunchKernel(impl::RunCommandImpl &c) override
Notification avant le lancement de la commande.
void barrier() override
Bloque jusqu'à ce que toutes les actions associées à cette file soient terminées.
bool _barrierNoException() override
Barrière sans exception. Retourne true en cas d'erreur.
void prefetchMemory(const MemoryPrefetchArgs &args) override
Effectue un pré-chargement d'une zone mémoire.
void notifyEndLaunchKernel(impl::RunCommandImpl &) override
Notification de fin de lancement de la commande.
impl::NativeStream nativeStream() override
Pointeur sur la structure interne dépendante de l'implémentation.
void copyMemory(const MemoryCopyArgs &args) override
Effectue une copie entre deux zones mémoire.
Identifiant d'un composant du système.
Definition DeviceId.h:33
bool isHost() const
Indique si l'instance est associée à l'hôte.
Definition DeviceId.h:60
Int32 asInt32() const
Valeur numérique du device.
Definition DeviceId.h:69
Information sur un device.
Definition DeviceInfo.h:32
Information mémoire d'un accélérateur.
Interface d'une liste de devices.
Arguments pour la copie mémoire.
Definition Memory.h:63
Arguments pour le préfetching mémoire.
Definition Memory.h:125
Informations sur une adresse mémoire.
Informations pour initialiser le runtime accélérateur.
Informations pour créer une RunQueue.
File d'exécution pour un accélérateur.
bool isAsync() const
Indique si la file d'exécution est asynchrone.
Definition RunQueue.cc:320
void copyMemory(const MemoryCopyArgs &args) const
Copie des informations entre deux zones mémoires.
Definition RunQueue.cc:237
Interface d'une liste de devices.
Interface de l'implémentation d'un évènement.
Interface d'un flux d'exécution pour une RunQueue.
Interface du runtime associé à un accélérateur.
Type opaque pour encapsuler une 'stream' native.
Implémentation d'une commande pour accélérateur.
Interface pour les copies mémoire avec support des accélérateurs.
Partie interne à Arcane de 'IMemoryRessourceMng'.
Lecteur des fichiers de maillage via la bibliothèque LIMA.
Definition Lima.cc:149
Flot de sortie lié à une String.
Interface du gestionnaire de traces.
Chaîne de caractères unicode.
eMemoryAdvice
Conseils pour la gestion mémoire.
Definition Memory.h:36
@ AccessedByHost
Indique que la zone mémoire est accédée par l'hôte.
@ PreferredLocationDevice
Privilégié le positionnement de la mémoire sur l'accélérateur.
@ MostlyRead
Indique que la zone mémoire est principalement en lecture seule.
@ PreferredLocationHost
Privilégié le positionnement de la mémoire sur l'hôte.
@ AccessedByDevice
Indique que la zone mémoire est accédée par l'accélérateur.
ePointerMemoryType
Type de mémoire pour un pointeur.
eExecutionPolicy
Politique d'exécution pour un Runner.
@ CUDA
Politique d'exécution utilisant l'environnement CUDA.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Espace de nom de Arccore.
Definition ArcaneTypes.h:24
std::int64_t Int64
Type entier signé sur 64 bits.
eMemoryResource
Liste des ressources mémoire disponibles.
std::int32_t Int32
Type entier signé sur 32 bits.