Arcane  v3.14.10.0
Documentation développeur
Chargement...
Recherche...
Aucune correspondance
Reduce.h
Aller à la documentation de ce fichier.
1// -*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
2//-----------------------------------------------------------------------------
3// Copyright 2000-2024 CEA (www.cea.fr) IFPEN (www.ifpenergiesnouvelles.com)
4// See the top-level COPYRIGHT file for details.
5// SPDX-License-Identifier: Apache-2.0
6//-----------------------------------------------------------------------------
7/*---------------------------------------------------------------------------*/
8/* Reduce.h (C) 2000-2024 */
9/* */
10/* Gestion des réductions pour les accélérateurs. */
11/*---------------------------------------------------------------------------*/
12#ifndef ARCANE_ACCELERATOR_REDUCE_H
13#define ARCANE_ACCELERATOR_REDUCE_H
14/*---------------------------------------------------------------------------*/
15/*---------------------------------------------------------------------------*/
16
17#include "arcane/utils/ArrayView.h"
18#include "arcane/utils/String.h"
19
20#include "arcane/accelerator/core/IReduceMemoryImpl.h"
21#include "arcane/accelerator/AcceleratorGlobal.h"
22#include "arcane/accelerator/CommonUtils.h"
23#include "arcane/accelerator/RunCommandLaunchInfo.h"
25
26#include <limits.h>
27#include <float.h>
28#include <atomic>
29#include <iostream>
30
31/*---------------------------------------------------------------------------*/
32/*---------------------------------------------------------------------------*/
33
34namespace Arcane::impl
35{
36class HostReducerHelper;
37}
38
39namespace Arcane::Accelerator::impl
40{
41class KernelReducerHelper;
42
43/*---------------------------------------------------------------------------*/
44/*---------------------------------------------------------------------------*/
45
46using namespace Arccore;
47
48/*---------------------------------------------------------------------------*/
49/*---------------------------------------------------------------------------*/
50
51extern "C++" ARCANE_ACCELERATOR_CORE_EXPORT IReduceMemoryImpl*
52internalGetOrCreateReduceMemoryImpl(RunCommand* command);
53
54template <typename DataType>
56template <>
57// TODO: utiliser numeric_limits.
59{
60 public:
61
62 ARCCORE_HOST_DEVICE static constexpr double sumValue() { return 0.0; }
63 ARCCORE_HOST_DEVICE static constexpr double minValue() { return DBL_MAX; }
64 ARCCORE_HOST_DEVICE static constexpr double maxValue() { return -DBL_MAX; }
65};
66template <>
68{
69 public:
70
71 ARCCORE_HOST_DEVICE static constexpr Int32 sumValue() { return 0; }
72 ARCCORE_HOST_DEVICE static constexpr Int32 minValue() { return INT32_MAX; }
73 ARCCORE_HOST_DEVICE static constexpr Int32 maxValue() { return -INT32_MAX; }
74};
75template <>
77{
78 public:
79
80 ARCCORE_HOST_DEVICE static constexpr Int64 sumValue() { return 0; }
81 ARCCORE_HOST_DEVICE static constexpr Int64 minValue() { return INT64_MAX; }
82 ARCCORE_HOST_DEVICE static constexpr Int64 maxValue() { return -INT64_MAX; }
83};
84
85/*---------------------------------------------------------------------------*/
86/*---------------------------------------------------------------------------*/
87// L'implémentation utilisée est définie dans 'CommonCudaHipReduceImpl.h'
88
89/*---------------------------------------------------------------------------*/
90/*---------------------------------------------------------------------------*/
95template <typename DataType>
97{
98 public:
99
103 DataType m_identity;
105 DataType* m_device_final_ptr = nullptr;
107 void* m_host_final_ptr = nullptr;
114 unsigned int* m_device_count = nullptr;
115
117 bool m_use_grid_reduce = true;
118};
119
120/*---------------------------------------------------------------------------*/
121/*---------------------------------------------------------------------------*/
122
123template <typename DataType>
125
126template <>
128{
129 public:
130
131 static double apply(double* vptr, double v)
132 {
133 std::atomic_ref<double> aref(*vptr);
134 double old = aref.load(std::memory_order_consume);
135 double wanted = old + v;
136 while (!aref.compare_exchange_weak(old, wanted, std::memory_order_release, std::memory_order_consume))
137 wanted = old + v;
138 return wanted;
139 }
140};
141template <>
143{
144 public:
145
146 static Int64 apply(Int64* vptr, Int64 v)
147 {
148 std::atomic_ref<Int64> aref(*vptr);
149 Int64 x = aref.fetch_add(v);
150 return x + v;
151 }
152};
153template <>
155{
156 public:
157
158 static Int32 apply(Int32* vptr, Int32 v)
159 {
160 std::atomic_ref<Int32> aref(*vptr);
161 Int32 x = aref.fetch_add(v);
162 return x + v;
163 }
164};
165
166/*---------------------------------------------------------------------------*/
167/*---------------------------------------------------------------------------*/
168
169template <typename DataType>
171{
172 public:
173
174 static ARCCORE_DEVICE DataType
175 applyDevice(const ReduceDeviceInfo<DataType>& dev_info)
176 {
177 _applyDevice(dev_info);
178 return *(dev_info.m_device_final_ptr);
179 }
180 static DataType apply(DataType* vptr, DataType v)
181 {
183 }
184#if defined(ARCANE_COMPILING_SYCL)
185 static sycl::plus<DataType> syclFunctor() { return {}; }
186#endif
187
188 public:
189
190 ARCCORE_HOST_DEVICE static constexpr DataType identity() { return impl::ReduceIdentity<DataType>::sumValue(); }
191
192 private:
193
194 static ARCCORE_DEVICE void _applyDevice(const ReduceDeviceInfo<DataType>& dev_info);
195};
196
197/*---------------------------------------------------------------------------*/
198/*---------------------------------------------------------------------------*/
199
200template <typename DataType>
202{
203 public:
204
205 static ARCCORE_DEVICE DataType
206 applyDevice(const ReduceDeviceInfo<DataType>& dev_info)
207 {
208 _applyDevice(dev_info);
209 return *(dev_info.m_device_final_ptr);
210 }
211 static DataType apply(DataType* ptr, DataType v)
212 {
213 std::atomic_ref<DataType> aref(*ptr);
214 DataType prev_value = aref.load();
215 while (prev_value < v && !aref.compare_exchange_weak(prev_value, v)) {
216 }
217 return aref.load();
218 }
219#if defined(ARCANE_COMPILING_SYCL)
220 static sycl::maximum<DataType> syclFunctor() { return {}; }
221#endif
222
223 public:
224
225 ARCCORE_HOST_DEVICE static constexpr DataType identity() { return impl::ReduceIdentity<DataType>::maxValue(); }
226
227 private:
228
229 static ARCCORE_DEVICE void _applyDevice(const ReduceDeviceInfo<DataType>& dev_info);
230};
231
232/*---------------------------------------------------------------------------*/
233/*---------------------------------------------------------------------------*/
234
235template <typename DataType>
237{
238 public:
239
240 static ARCCORE_DEVICE DataType
241 applyDevice(const ReduceDeviceInfo<DataType>& dev_info)
242 {
243 _applyDevice(dev_info);
244 return *(dev_info.m_device_final_ptr);
245 }
246 static DataType apply(DataType* vptr, DataType v)
247 {
248 std::atomic_ref<DataType> aref(*vptr);
249 DataType prev_value = aref.load();
250 while (prev_value > v && !aref.compare_exchange_weak(prev_value, v)) {
251 }
252 return aref.load();
253 }
254#if defined(ARCANE_COMPILING_SYCL)
255 static sycl::minimum<DataType> syclFunctor() { return {}; }
256#endif
257
258 public:
259
260 ARCCORE_HOST_DEVICE static constexpr DataType identity() { return impl::ReduceIdentity<DataType>::minValue(); }
261
262 private:
263
264 static ARCCORE_DEVICE void _applyDevice(const ReduceDeviceInfo<DataType>& dev_info);
265};
266
267/*---------------------------------------------------------------------------*/
268/*---------------------------------------------------------------------------*/
269
270} // namespace Arcane::Accelerator::impl
271
272namespace Arcane::Accelerator
273{
274
275/*---------------------------------------------------------------------------*/
276/*---------------------------------------------------------------------------*/
277
301template <typename DataType, typename ReduceFunctor>
303{
304 public:
305
308 , m_command(&command)
309 {
310 //std::cout << String::format("Reduce main host this={0}\n",this); std::cout.flush();
311 m_is_master_instance = true;
312 m_identity = ReduceFunctor::identity();
313 m_local_value = m_identity;
314 m_atomic_value = m_identity;
315 m_atomic_parent_value = &m_atomic_value;
316 //printf("Create null host parent_value=%p this=%p\n",(void*)m_parent_value,(void*)this);
317 m_memory_impl = impl::internalGetOrCreateReduceMemoryImpl(&command);
318 if (m_memory_impl) {
319 m_host_or_device_memory_for_reduced_value = impl::allocateReduceDataMemory<DataType>(m_memory_impl, m_identity);
320 m_grid_memory_info = m_memory_impl->gridMemoryInfo();
321 }
322 }
323
324#if defined(__INTEL_LLVM_COMPILER) && defined(__SYCL_DEVICE_ONLY__)
326#else
327 ARCCORE_HOST_DEVICE HostDeviceReducerBase(const HostDeviceReducerBase& rhs)
328 : m_host_or_device_memory_for_reduced_value(rhs.m_host_or_device_memory_for_reduced_value)
329 , m_local_value(rhs.m_local_value)
330 , m_identity(rhs.m_identity)
331 {
332#ifdef ARCCORE_DEVICE_CODE
333 m_grid_memory_info = rhs.m_grid_memory_info;
334 //int threadId = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z;
335 //if (threadId==0)
336 //printf("Create ref device Id=%d parent=%p\n",threadId,&rhs);
337#else
338 m_memory_impl = rhs.m_memory_impl;
339 if (m_memory_impl) {
340 m_grid_memory_info = m_memory_impl->gridMemoryInfo();
341 }
342 //std::cout << String::format("Reduce: host copy this={0} rhs={1} mem={2} device_count={3}\n",this,&rhs,m_memory_impl,(void*)m_grid_device_count);
343 m_atomic_parent_value = rhs.m_atomic_parent_value;
344 m_local_value = rhs.m_identity;
345 m_atomic_value = m_identity;
346 //std::cout << String::format("Reduce copy host this={0} parent_value={1} rhs={2}\n",this,(void*)m_parent_value,&rhs); std::cout.flush();
347 //if (!rhs.m_is_master_instance)
348 //ARCANE_FATAL("Only copy from master instance is allowed");
349 //printf("Create ref host parent_value=%p this=%p rhs=%p\n",(void*)m_parent_value,(void*)this,(void*)&rhs);
350#endif
351 }
352#endif
353
354 ARCCORE_HOST_DEVICE HostDeviceReducerBase(HostDeviceReducerBase&& rhs) = delete;
355 HostDeviceReducerBase& operator=(const HostDeviceReducerBase& rhs) = delete;
356
357 public:
358
359 ARCCORE_HOST_DEVICE void setValue(DataType v)
360 {
361 m_local_value = v;
362 }
363 ARCCORE_HOST_DEVICE DataType localValue() const
364 {
365 return m_local_value;
366 }
367
368 protected:
369
370 impl::IReduceMemoryImpl* m_memory_impl = nullptr;
379
380 private:
381
382 RunCommand* m_command = nullptr;
383
384 protected:
385
386 mutable DataType m_local_value;
387 DataType* m_atomic_parent_value = nullptr;
388 mutable DataType m_atomic_value;
389
390 private:
391
392 DataType m_identity;
393 //bool m_is_allocated = false;
394 bool m_is_master_instance = false;
395
396 protected:
397
399 DataType _reduce()
400 {
401 if (!m_is_master_instance)
402 ARCANE_FATAL("Final reduce operation is only valid on master instance");
403 // Si la réduction est faite sur accélérateur, il faut recopier la valeur du device sur l'hôte.
405 if (m_memory_impl) {
406 m_memory_impl->copyReduceValueFromDevice();
407 final_ptr = reinterpret_cast<DataType*>(m_grid_memory_info.m_host_memory_for_reduced_value);
408 m_memory_impl->release();
409 m_memory_impl = nullptr;
410 }
411
412 if (m_atomic_parent_value) {
413 //std::cout << String::format("Reduce host has parent this={0} local_value={1} parent_value={2}\n",
414 // this,m_local_value,*m_parent_value);
415 //std::cout.flush();
416 ReduceFunctor::apply(m_atomic_parent_value, *final_ptr);
417 *final_ptr = *m_atomic_parent_value;
418 }
419 else {
420 //std::cout << String::format("Reduce host no parent this={0} local_value={1} managed={2}\n",
421 // this,m_local_value,*m_host_or_device_memory_for_reduced_value);
422 //std::cout.flush();
423 }
424 return *final_ptr;
425 }
426
427 // NOTE: Lorsqu'il n'y aura plus la version V1 de la réduction, cette méthode ne sera
428 // appelée que depuis le device.
429 ARCCORE_HOST_DEVICE void
430 _finalize()
431 {
432#ifdef ARCCORE_DEVICE_CODE
433 //int threadId = threadIdx.x + blockDim.x * threadIdx.y + (blockDim.x * blockDim.y) * threadIdx.z;
434 //if ((threadId%16)==0)
435 //printf("Destroy device Id=%d\n",threadId);
436 auto buf_span = m_grid_memory_info.m_grid_memory_values.bytes();
437 DataType* buf = reinterpret_cast<DataType*>(buf_span.data());
438 SmallSpan<DataType> grid_buffer(buf, static_cast<Int32>(buf_span.size()));
439
441 dvi.m_grid_buffer = grid_buffer;
442 dvi.m_device_count = m_grid_memory_info.m_grid_device_count;
444 dvi.m_host_final_ptr = m_grid_memory_info.m_host_memory_for_reduced_value;
445 dvi.m_current_value = m_local_value;
446 dvi.m_identity = m_identity;
447 dvi.m_use_grid_reduce = m_grid_memory_info.m_reduce_policy != eDeviceReducePolicy::Atomic;
448 ReduceFunctor::applyDevice(dvi); //grid_buffer,m_grid_device_count,m_host_or_device_memory_for_reduced_value,m_local_value,m_identity);
449#else
450 // printf("Destroy host parent_value=%p this=%p\n",(void*)m_parent_value,(void*)this);
451 // Code hôte
452 //std::cout << String::format("Reduce destructor this={0} parent_value={1} v={2} memory_impl={3}\n",this,(void*)m_parent_value,m_local_value,m_memory_impl);
453 //std::cout << String::format("Reduce destructor this={0} grid_data={1} grid_size={2}\n",
454 // this,(void*)m_grid_memory_value_as_bytes,m_grid_memory_size);
455 //std::cout.flush();
456 if (!m_is_master_instance)
457 ReduceFunctor::apply(m_atomic_parent_value, m_local_value);
458
459 //printf("Destroy host %p %p\n",m_host_or_device_memory_for_reduced_value,this);
460#endif
461 }
462};
463
464/*---------------------------------------------------------------------------*/
465/*---------------------------------------------------------------------------*/
472template <typename DataType, typename ReduceFunctor>
474: public HostDeviceReducerBase<DataType, ReduceFunctor>
475{
476 public:
477
479
480 public:
481
482 explicit HostDeviceReducer(RunCommand& command)
483 : BaseClass(command)
484 {}
485 HostDeviceReducer(const HostDeviceReducer& rhs) = default;
486 ARCCORE_HOST_DEVICE ~HostDeviceReducer()
487 {
488 this->_finalize();
489 }
490
491 public:
492
493 DataType reduce()
494 {
495 return this->_reduce();
496 }
497
498 DataType reducedValue()
499 {
500 return this->_reduce();
501 }
502};
503
504/*---------------------------------------------------------------------------*/
505/*---------------------------------------------------------------------------*/
509template <typename DataType, typename ReduceFunctor>
511: public HostDeviceReducerBase<DataType, ReduceFunctor>
512{
514 friend ::Arcane::impl::HostReducerHelper;
515
516 public:
517
519 using BaseClass::m_grid_memory_info;
521 using BaseClass::m_local_value;
522
523 public:
524
525 explicit HostDeviceReducer2(RunCommand& command)
526 : BaseClass(command)
527 {}
528
529 public:
530
531 DataType reducedValue()
532 {
533 return this->_reduce();
534 }
535
536 private:
537
538 // Note: les méthodes _internalReduce...() sont
539 // internes à Arcane.
540
541 void _internalReduceHost()
542 {
543 this->_finalize();
544 }
545
546#if defined(ARCANE_COMPILING_CUDA) || defined(ARCANE_COMPILING_HIP)
547 ARCCORE_HOST_DEVICE void _internalExecWorkItem(Int32)
548 {
549 this->_finalize();
550 };
551#endif
552
553#if defined(ARCANE_COMPILING_SYCL)
554 void _internalExecWorkItem(sycl::nd_item<1> id)
555 {
556 unsigned int* atomic_counter_ptr = m_grid_memory_info.m_grid_device_count;
557 const Int32 local_id = static_cast<Int32>(id.get_local_id(0));
558 const Int32 group_id = static_cast<Int32>(id.get_group_linear_id());
559 const Int32 nb_block = static_cast<Int32>(id.get_group_range(0));
560
561 auto buf_span = m_grid_memory_info.m_grid_memory_values.bytes();
562 DataType* buf = reinterpret_cast<DataType*>(buf_span.data());
563 SmallSpan<DataType> grid_buffer(buf, static_cast<Int32>(buf_span.size()));
564
565 DataType v = m_local_value;
566 bool is_last = false;
567 auto sycl_functor = ReduceFunctor::syclFunctor();
568 DataType local_sum = sycl::reduce_over_group(id.get_group(), v, sycl_functor);
569 if (local_id == 0) {
570 grid_buffer[group_id] = local_sum;
571
572 // TODO: En théorie il faut faire l'équivalent d'un __threadfence() ici
573 // pour garantir que les autres work-item voient bien la mise à jour de 'grid_buffer'.
574 // Mais ce mécanisme n'existe pas avec SYCL 2020.
575
576 // AdaptiveCpp 2024.2 ne supporte pas les opérations atomiques sur 'unsigned int'.
577 // Elles sont supportées avec le type 'int'. Comme on est certain de ne pas dépasser 2^31, on
578 // converti le pointeur en un 'int*'.
579#if defined(__ADAPTIVECPP__)
580 int* atomic_counter_ptr_as_int = reinterpret_cast<int*>(atomic_counter_ptr);
581 sycl::atomic_ref<int, sycl::memory_order::relaxed, sycl::memory_scope::device> a(*atomic_counter_ptr_as_int);
582#else
583 sycl::atomic_ref<unsigned int, sycl::memory_order::relaxed, sycl::memory_scope::device> a(*atomic_counter_ptr);
584#endif
585 Int32 cx = a.fetch_add(1);
586 if (cx == (nb_block - 1))
587 is_last = true;
588 }
589
590 // Je suis le dernier à faire la réduction.
591 // Calcule la réduction finale
592 if (is_last) {
593 DataType my_total = grid_buffer[0];
594 for (int x = 1; x < nb_block; ++x)
595 my_total = sycl_functor(my_total, grid_buffer[x]);
596 // Met le résultat final dans le premier élément du tableau.
597 grid_buffer[0] = my_total;
599 *atomic_counter_ptr = 0;
600 }
601 }
602#endif
603};
604
605/*---------------------------------------------------------------------------*/
606/*---------------------------------------------------------------------------*/
613template <typename DataType, typename ReduceFunctor>
615{
616 public:
617
618 explicit SyclReducer(RunCommand&) {}
619
620 public:
621
622 DataType reduce()
623 {
624 return m_local_value;
625 }
626 void setValue(DataType v) { m_local_value = v; }
627
628 protected:
629
630 mutable DataType m_local_value = {};
631};
632
633/*---------------------------------------------------------------------------*/
634/*---------------------------------------------------------------------------*/
635
636#if defined(ARCANE_COMPILING_SYCL)
637template <typename DataType, typename ReduceFunctor> using Reducer = SyclReducer<DataType, ReduceFunctor>;
638#else
639template <typename DataType, typename ReduceFunctor> using Reducer = HostDeviceReducer<DataType, ReduceFunctor>;
640#endif
641
642/*---------------------------------------------------------------------------*/
643/*---------------------------------------------------------------------------*/
644
645/*---------------------------------------------------------------------------*/
646/*---------------------------------------------------------------------------*/
650template <typename DataType>
652: public Reducer<DataType, impl::ReduceFunctorSum<DataType>>
653{
655 using BaseClass::m_local_value;
656
657 public:
658
659 explicit ReducerSum(RunCommand& command)
660 : BaseClass(command)
661 {}
662
663 public:
664
665 ARCCORE_HOST_DEVICE DataType combine(DataType v) const
666 {
667 m_local_value += v;
668 return m_local_value;
669 }
670
671 ARCCORE_HOST_DEVICE DataType add(DataType v) const
672 {
673 return combine(v);
674 }
675};
676
677/*---------------------------------------------------------------------------*/
678/*---------------------------------------------------------------------------*/
682template <typename DataType>
684: public Reducer<DataType, impl::ReduceFunctorMax<DataType>>
685{
687 using BaseClass::m_local_value;
688
689 public:
690
691 explicit ReducerMax(RunCommand& command)
692 : BaseClass(command)
693 {}
694
695 public:
696
697 ARCCORE_HOST_DEVICE DataType combine(DataType v) const
698 {
699 m_local_value = v > m_local_value ? v : m_local_value;
700 return m_local_value;
701 }
702
703 ARCCORE_HOST_DEVICE DataType max(DataType v) const
704 {
705 return combine(v);
706 }
707};
708
709/*---------------------------------------------------------------------------*/
710/*---------------------------------------------------------------------------*/
711
712/*---------------------------------------------------------------------------*/
713/*---------------------------------------------------------------------------*/
717template <typename DataType>
719: public Reducer<DataType, impl::ReduceFunctorMin<DataType>>
720{
722 using BaseClass::m_local_value;
723
724 public:
725
726 explicit ReducerMin(RunCommand& command)
727 : BaseClass(command)
728 {}
729
730 public:
731
732 ARCCORE_HOST_DEVICE DataType combine(DataType v) const
733 {
734 m_local_value = v < m_local_value ? v : m_local_value;
735 return m_local_value;
736 }
737
738 ARCCORE_HOST_DEVICE DataType min(DataType v) const
739 {
740 return combine(v);
741 }
742};
743
744/*---------------------------------------------------------------------------*/
745/*---------------------------------------------------------------------------*/
746
747/*---------------------------------------------------------------------------*/
748/*---------------------------------------------------------------------------*/
752template <typename DataType>
754: public HostDeviceReducer2<DataType, impl::ReduceFunctorSum<DataType>>
755{
757
758 public:
759
760 explicit ReducerSum2(RunCommand& command)
761 : BaseClass(command)
762 {}
763
764 public:
765
766 ARCCORE_HOST_DEVICE void combine(DataType v)
767 {
768 this->m_local_value += v;
769 }
770};
771
772/*---------------------------------------------------------------------------*/
773/*---------------------------------------------------------------------------*/
777template <typename DataType>
779: public HostDeviceReducer2<DataType, impl::ReduceFunctorMax<DataType>>
780{
782
783 public:
784
785 explicit ReducerMax2(RunCommand& command)
786 : BaseClass(command)
787 {}
788
789 public:
790
791 ARCCORE_HOST_DEVICE void combine(DataType v)
792 {
793 DataType& lv = this->m_local_value;
794 lv = v > lv ? v : lv;
795 }
796};
797
798/*---------------------------------------------------------------------------*/
799/*---------------------------------------------------------------------------*/
803template <typename DataType>
805: public HostDeviceReducer2<DataType, impl::ReduceFunctorMin<DataType>>
806{
808
809 public:
810
811 explicit ReducerMin2(RunCommand& command)
812 : BaseClass(command)
813 {}
814
815 public:
816
817 ARCCORE_HOST_DEVICE void combine(DataType v)
818 {
819 DataType& lv = this->m_local_value;
820 lv = v < lv ? v : lv;
821 }
822};
823
824/*---------------------------------------------------------------------------*/
825/*---------------------------------------------------------------------------*/
826
827} // End namespace Arcane::Accelerator
828
829/*---------------------------------------------------------------------------*/
830/*---------------------------------------------------------------------------*/
831// Cette macro est définie si on souhaite rendre inline l'implémentation.
832// Dans l'idéal il ne faut pas que ce soit le cas (ce qui permettrait de
833// changer l'implémentation sans tout recompiler) mais cela ne semble pas
834// bien fonctionner pour l'instant.
835
836#define ARCANE_INLINE_REDUCE_IMPL
837
838#ifdef ARCANE_INLINE_REDUCE_IMPL
839
840# ifndef ARCANE_INLINE_REDUCE
841# define ARCANE_INLINE_REDUCE inline
842# endif
843
844#if defined(__CUDACC__) || defined(__HIP__)
845# include "arcane/accelerator/CommonCudaHipReduceImpl.h"
846#else
847
848#endif
849
850#endif
851
852/*---------------------------------------------------------------------------*/
853/*---------------------------------------------------------------------------*/
854
855#include "arcane/utils/NumArray.h"
856#include "arcane/utils/FatalErrorException.h"
857#include "arcane/accelerator/core/RunQueue.h"
858
859/*---------------------------------------------------------------------------*/
860/*---------------------------------------------------------------------------*/
861
862namespace Arcane::Accelerator::impl
863{
864template <typename DataType>
865class GenericReducerIf;
866
867/*---------------------------------------------------------------------------*/
868/*---------------------------------------------------------------------------*/
869// Classe pour déterminer l'instance de 'Reducer2' à utiliser en fonction de l'opérateur.
870// A spécialiser.
871template <typename DataType, typename Operator>
873
874template <typename DataType>
876{
877 public:
878
880};
881template <typename DataType>
883{
884 public:
885
887};
888template <typename DataType>
890{
891 public:
892
894};
895
896/*---------------------------------------------------------------------------*/
897/*---------------------------------------------------------------------------*/
904template <typename DataType>
906{
907 friend class GenericReducerIf<DataType>;
908
909 public:
910
911 GenericReducerBase(const RunQueue& queue)
912 : m_queue(queue)
913 {}
914
915 protected:
916
917 DataType _reducedValue() const
918 {
919 m_queue.barrier();
920 return m_host_reduce_storage[0];
921 }
922
923 void _allocate()
924 {
926 if (m_host_reduce_storage.memoryRessource() != r)
927 m_host_reduce_storage = NumArray<DataType, MDDim1>(r);
928 m_host_reduce_storage.resize(1);
929 }
930
931 protected:
932
933 RunQueue m_queue;
934 GenericDeviceStorage m_algo_storage;
935 DeviceStorage<DataType> m_device_reduce_storage;
936 NumArray<DataType, MDDim1> m_host_reduce_storage;
937};
938
939/*---------------------------------------------------------------------------*/
940/*---------------------------------------------------------------------------*/
949template <typename DataType>
951{
952 // TODO: Faire le malloc sur le device associé à la queue.
953 // et aussi regarder si on peut utiliser mallocAsync().
954
955 public:
956
957 template <typename InputIterator, typename ReduceOperator>
958 void apply(GenericReducerBase<DataType>& s, Int32 nb_item, const DataType& init_value,
960 {
961 RunQueue& queue = s.m_queue;
962 RunCommand command = makeCommand(queue);
963 command << trace_info;
965 launch_info.beginExecute();
967 switch (exec_policy) {
968#if defined(ARCANE_COMPILING_CUDA)
970 size_t temp_storage_size = 0;
971 cudaStream_t stream = impl::CudaUtils::toNativeStream(queue);
972 DataType* reduced_value_ptr = nullptr;
973 // Premier appel pour connaitre la taille pour l'allocation
974 ARCANE_CHECK_CUDA(::cub::DeviceReduce::Reduce(nullptr, temp_storage_size, input_iter, reduced_value_ptr,
975 nb_item, reduce_op, init_value, stream));
976
977 s.m_algo_storage.allocate(temp_storage_size);
978 reduced_value_ptr = s.m_device_reduce_storage.allocate();
979 ARCANE_CHECK_CUDA(::cub::DeviceReduce::Reduce(s.m_algo_storage.address(), temp_storage_size,
981 reduce_op, init_value, stream));
982 s.m_device_reduce_storage.copyToAsync(s.m_host_reduce_storage, queue);
983 } break;
984#endif
985#if defined(ARCANE_COMPILING_HIP)
987 size_t temp_storage_size = 0;
988 hipStream_t stream = impl::HipUtils::toNativeStream(queue);
989 DataType* reduced_value_ptr = nullptr;
990 // Premier appel pour connaitre la taille pour l'allocation
991 ARCANE_CHECK_HIP(rocprim::reduce(nullptr, temp_storage_size, input_iter, reduced_value_ptr, init_value,
992 nb_item, reduce_op, stream));
993
994 s.m_algo_storage.allocate(temp_storage_size);
995 reduced_value_ptr = s.m_device_reduce_storage.allocate();
996
997 ARCANE_CHECK_HIP(rocprim::reduce(s.m_algo_storage.address(), temp_storage_size, input_iter, reduced_value_ptr, init_value,
998 nb_item, reduce_op, stream));
999 s.m_device_reduce_storage.copyToAsync(s.m_host_reduce_storage, queue);
1000 } break;
1001#endif
1002#if defined(ARCANE_COMPILING_SYCL)
1004 {
1007 ReducerType reducer(command2);
1009 {
1010 auto [i] = iter();
1011 reducer.combine(input_iter[i]);
1012 };
1013 queue.barrier();
1014 s.m_host_reduce_storage[0] = reducer.reducedValue();
1015 }
1016 } break;
1017#endif
1019 // Pas encore implémenté en multi-thread
1020 [[fallthrough]];
1022 DataType reduced_value = init_value;
1023 for (Int32 i = 0; i < nb_item; ++i) {
1025 ++input_iter;
1026 }
1027 s.m_host_reduce_storage[0] = reduced_value;
1028 } break;
1029 default:
1030 ARCANE_FATAL(getBadPolicyMessage(exec_policy));
1031 }
1032 launch_info.endExecute();
1033 }
1034};
1035
1036/*---------------------------------------------------------------------------*/
1037/*---------------------------------------------------------------------------*/
1038
1039} // namespace Arcane::Accelerator::impl
1040
1041/*---------------------------------------------------------------------------*/
1042/*---------------------------------------------------------------------------*/
1043
1044namespace Arcane::Accelerator
1045{
1046
1047/*---------------------------------------------------------------------------*/
1048/*---------------------------------------------------------------------------*/
1083template <typename DataType>
1085: private impl::GenericReducerBase<DataType>
1086{
1087 public:
1088
1089 explicit GenericReducer(const RunQueue& queue)
1091 {
1092 this->_allocate();
1093 }
1094
1095 public:
1096
1099 {
1100 _apply(values.size(), values.data(), impl::MinOperator<DataType>{}, trace_info);
1101 }
1102
1105 {
1106 _apply(values.size(), values.data(), impl::MaxOperator<DataType>{}, trace_info);
1107 }
1108
1111 {
1112 _apply(values.size(), values.data(), impl::SumOperator<DataType>{}, trace_info);
1113 }
1114
1116 template <typename SelectLambda>
1121
1123 template <typename SelectLambda>
1128
1130 template <typename SelectLambda>
1135
1137 DataType reducedValue()
1138 {
1139 m_is_already_called = false;
1140 return this->_reducedValue();
1141 }
1142
1143 private:
1144
1145 bool m_is_already_called = false;
1146
1147 private:
1148
1149 template <typename InputIterator, typename ReduceOperator>
1151 {
1152 _setCalled();
1155 DataType init_value = reduce_op.defaultValue();
1157 }
1158
1159 template <typename GetterLambda, typename ReduceOperator>
1160 void _applyWithIndex(Int32 nb_value, const GetterLambda& getter_lambda,
1162 {
1163 _setCalled();
1164 impl::GenericReducerBase<DataType>* base_ptr = this;
1165 impl::GenericReducerIf<DataType> gf;
1166 impl::GetterLambdaIterator<DataType, GetterLambda> input_iter(getter_lambda);
1167 DataType init_value = reduce_op.defaultValue();
1169 }
1170
1171 void _setCalled()
1172 {
1173 if (m_is_already_called)
1174 ARCANE_FATAL("apply() has already been called for this instance");
1175 m_is_already_called = true;
1176 }
1177};
1178
1179/*---------------------------------------------------------------------------*/
1180/*---------------------------------------------------------------------------*/
1181
1182} // namespace Arcane::Accelerator
1183
1184/*---------------------------------------------------------------------------*/
1185/*---------------------------------------------------------------------------*/
1186
1187#endif
1188
1189/*---------------------------------------------------------------------------*/
1190/*---------------------------------------------------------------------------*/
#define ARCANE_FATAL(...)
Macro envoyant une exception FatalErrorException.
Types et macros pour gérer les boucles sur les accélérateurs.
#define RUNCOMMAND_LOOP1(iter_name, x1,...)
Boucle sur accélérateur avec arguments supplémentaires pour les réductions.
Algorithme générique de réduction sur accélérateur.
Definition Reduce.h:1086
DataType reducedValue()
Valeur de la réduction.
Definition Reduce.h:1137
void applyMinWithIndex(Int32 nb_value, const SelectLambda &select_lambda, const TraceInfo &trace_info=TraceInfo())
Applique une réduction 'Min' sur les valeurs sélectionnées par select_lambda.
Definition Reduce.h:1117
void applyMaxWithIndex(Int32 nb_value, const SelectLambda &select_lambda, const TraceInfo &trace_info=TraceInfo())
Applique une réduction 'Max' sur les valeurs sélectionnées par select_lambda.
Definition Reduce.h:1124
void applyMax(SmallSpan< const DataType > values, const TraceInfo &trace_info=TraceInfo())
Applique une réduction 'Max' sur les valeurs values.
Definition Reduce.h:1104
void applyMin(SmallSpan< const DataType > values, const TraceInfo &trace_info=TraceInfo())
Applique une réduction 'Min' sur les valeurs values.
Definition Reduce.h:1098
void applySum(SmallSpan< const DataType > values, const TraceInfo &trace_info=TraceInfo())
Applique une réduction 'Somme' sur les valeurs values.
Definition Reduce.h:1110
void applySumWithIndex(Int32 nb_value, const SelectLambda &select_lambda, const TraceInfo &trace_info=TraceInfo())
Applique une réduction 'Somme' sur les valeurs sélectionnées par select_lambda.
Definition Reduce.h:1131
Version 2 de la réduction.
Definition Reduce.h:512
DataType * m_host_or_device_memory_for_reduced_value
Pointeur vers la donnée qui contiendra la valeur réduite.
Definition Reduce.h:377
Opérateur de réduction.
Definition Reduce.h:303
DataType * m_host_or_device_memory_for_reduced_value
Pointeur vers la donnée qui contiendra la valeur réduite.
Definition Reduce.h:377
DataType _reduce()
Effectue la réduction et récupère la valeur. ATTENTION: ne faire qu'une seule fois.
Definition Reduce.h:399
Version 1 de la réduction.
Definition Reduce.h:475
Classe pour effectuer une réduction 'max'.
Definition Reduce.h:780
Classe pour effectuer une réduction 'max'.
Definition Reduce.h:685
Classe pour effectuer une réduction 'min'.
Definition Reduce.h:806
Classe pour effectuer une réduction 'min'.
Definition Reduce.h:720
Classe pour effectuer une réduction 'somme'.
Definition Reduce.h:755
Classe pour effectuer une réduction 'somme'.
Definition Reduce.h:653
Gestion d'une commande sur accélérateur.
File d'exécution pour un accélérateur.
void barrier() const
Bloque tant que toutes les commandes associées à la file ne sont pas terminées.
Definition RunQueue.cc:158
eExecutionPolicy executionPolicy() const
Politique d'exécution de la file.
Definition RunQueue.cc:168
Implémentation de la réduction pour le backend SYCL.
Definition Reduce.h:615
Gère l'allocation interne sur le device.
Definition CommonUtils.h:50
Classe de base pour effectuer une réduction.
Definition Reduce.h:906
Classe pour effectuer un partitionnement d'une liste.
Definition Reduce.h:951
Interface de la gestion mémoire pour les réductions.
virtual GridMemoryInfo gridMemoryInfo()=0
Informations sur la mémoire utilisée par la réduction.
virtual void copyReduceValueFromDevice()=0
Copie la valeur réduite depuis le device vers l'hote.
virtual void release()=0
Libère l'instance.
Classe pour appliquer la finalisation des réductions.
Opérateur de Scan/Reduce pour le maximum.
Opérateur de Scan/Reduce pour le minimum.
Informations pour effectuer une réduction sur un device.
Definition Reduce.h:97
DataType * m_device_final_ptr
Pointeur vers la donnée réduite (mémoire uniquement accessible depuis le device)
Definition Reduce.h:105
bool m_use_grid_reduce
Indique si on utilise la réduction par grille (sinon on utilise les atomiques)
Definition Reduce.h:117
void * m_host_final_ptr
Pointeur vers la donnée réduite (mémoire uniquement accessible depuis l'hôte)
Definition Reduce.h:107
DataType m_current_value
Valeur du thread courant à réduire.
Definition Reduce.h:101
SmallSpan< DataType > m_grid_buffer
Tableau avec une valeur par bloc pour la réduction.
Definition Reduce.h:109
DataType m_identity
Valeur de l'identité pour la réduction.
Definition Reduce.h:103
Object temporaire pour conserver les informations d'exécution d'une commande et regrouper les tests.
Opérateur de Scan/Reduce pour les sommes.
Lecteur des fichiers de maillage via la bibliothèque LIMA.
Definition Lima.cc:120
Vue d'un tableau d'éléments de type T.
Definition Span.h:670
Espace de nom pour l'utilisation des accélérateurs.
RunCommand makeCommand(const RunQueue &run_queue)
Créé une commande associée à la file run_queue.
@ Atomic
Utilise des opérations atomiques entre les blocs.
eExecutionPolicy
Politique d'exécution pour un Runner.
@ SYCL
Politique d'exécution utilisant l'environnement SYCL.
@ HIP
Politique d'exécution utilisant l'environnement HIP.
@ CUDA
Politique d'exécution utilisant l'environnement CUDA.
@ Sequential
Politique d'exécution séquentielle.
@ Thread
Politique d'exécution multi-thread.
eMemoryRessource
Liste des ressources mémoire disponibles.
@ HostPinned
Alloue sur l'hôte.
Espace de nom de Arccore.
Definition ArcaneTypes.h:24
std::int64_t Int64
Type entier signé sur 64 bits.
std::int32_t Int32
Type entier signé sur 32 bits.
Informations mémoire pour la réduction sur les accélérateurs.
void * m_host_memory_for_reduced_value
Pointeur vers la mémoire sur l'hôte contenant la valeur réduite.
eDeviceReducePolicy m_reduce_policy
Politique de réduction.
MutableMemoryView m_grid_memory_values
Mémoire allouée pour la réduction sur une grille (de taille nb_bloc * sizeof(T))
unsigned int * m_grid_device_count
Entier utilisé pour compter le nombre de blocs ayant déjà fait leur partie de la réduction.