14#include "arcane/utils/Collection.h"
15#include "arcane/utils/Enumerator.h"
16#include "arcane/utils/Iostream.h"
17#include "arcane/utils/StringBuilder.h"
18#include "arcane/utils/IOException.h"
19#include "arcane/utils/FixedArray.h"
20#include "arcane/utils/MemoryView.h"
22#include "arcane/core/PostProcessorWriterBase.h"
23#include "arcane/core/Directory.h"
24#include "arcane/core/FactoryService.h"
25#include "arcane/core/IDataWriter.h"
26#include "arcane/core/IData.h"
27#include "arcane/core/IItemFamily.h"
28#include "arcane/core/VariableCollection.h"
29#include "arcane/core/IParallelMng.h"
30#include "arcane/core/IMesh.h"
31#include "arcane/core/internal/VtkCellTypes.h"
33#include "arcane/core/materials/IMeshMaterialMng.h"
34#include "arcane/core/materials/IMeshEnvironment.h"
36#include "arcane/hdf5/Hdf5Utils.h"
37#include "arcane/hdf5/VtkHdfV2PostProcessor_axl.h"
71using namespace Hdf5Utils;
76 template <
typename T> Span<const T>
77 asConstSpan(
const T* v)
79 return Span<const T>(v, 1);
134 bool isNull()
const {
return m_name.
null(); }
136 HGroup* group()
const {
return m_group; }
137 const String& name()
const {
return m_name; }
140 void setOffset(
Int64 v) { m_offset = v; }
141 friend bool operator<(
const DatasetInfo&
s1,
const DatasetInfo&
s2)
143 return (
s1.m_name <
s2.m_name);
148 HGroup* m_group =
nullptr;
220 DatasetInfo datasetInfo()
const {
return m_dataset_info; }
236 void endWrite()
override;
274 HGroup m_point_data_offsets_group;
275 HGroup m_cell_data_offsets_group;
276 HGroup m_field_data_offsets_group;
278 bool m_is_parallel =
false;
279 bool m_is_master_io =
false;
280 bool m_is_collective_io =
false;
281 bool m_is_first_call =
false;
282 bool m_is_writer =
false;
290 std::map<DatasetInfo, Int64> m_offset_info_list;
294 ItemGroupCollectiveInfo m_all_cells_info;
295 ItemGroupCollectiveInfo m_all_nodes_info;
309 void _addStringAttribute(
Hid&
hid,
const char* name,
const String& value);
311 template <
typename DataType>
void
313 template <
typename DataType>
void
315 template <
typename DataType>
void
317 template <
typename DataType>
void
319 template <
typename DataType>
void
321 template <
typename DataType>
void
323 template <
typename DataType>
void
332 return sb.toString();
334 template <
typename DataType>
void
341 void _addInt64Attribute(
Hid&
hid,
const char* name,
Int64 value);
342 Int64 _readInt64Attribute(
Hid&
hid,
const char* name);
343 void _openOrCreateGroups();
346 void _initializeOffsets();
347 void _initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo&
group_info);
349 void _writeConstituentsGroups();
357: TraceAccessor(mesh->traceMng())
360, m_is_collective_io(is_collective_io)
361, m_all_cells_info(mesh->allCells())
362, m_all_nodes_info(mesh->allNodes())
369void VtkHdfV2DataWriter::
370beginWrite(
const VariableCollection& vars)
379 m_is_parallel = nb_rank > 1;
383 const bool is_first_call = (time_index < 2);
384 m_is_first_call = is_first_call;
386 info() <<
"WARNING: L'implémentation au format 'VtkHdfV2' est expérimentale";
388 String filename = _getFileName();
402 m_is_collective_io =
false;
405 info() <<
"VtkHdfV2DataWriter: using collective MPI/IO ?=" << m_is_collective_io;
414 m_is_writer = m_is_master_io || m_is_collective_io;
418 if (m_is_collective_io)
419 plist_id.createFilePropertyMPIIO(pm);
421 if (is_first_call && m_is_master_io)
422 dir.createDirectory();
424 if (m_is_collective_io)
435 _openOrCreateGroups();
438 std::array<Int64, 2> version = { 2, 0 };
439 _addInt64ArrayAttribute(m_top_group,
"Version", version);
440 _addStringAttribute(m_top_group,
"Type",
"UnstructuredGrid");
445 _initializeItemGroupCollectiveInfos(m_all_cells_info);
446 _initializeItemGroupCollectiveInfos(m_all_nodes_info);
451 const Int32 nb_cell = all_cells.size();
452 const Int32 nb_node = all_nodes.size();
454 Int32 total_nb_connected_node = 0;
457 total_nb_connected_node += cell.nodeIds().size();
462 UniqueArray<Int64> cells_connectivity(total_nb_connected_node);
463 UniqueArray<Int64> cells_offset(nb_cell + 1);
464 UniqueArray<unsigned char> cells_ghost_type(nb_cell);
465 UniqueArray<unsigned char> cells_type(nb_cell);
466 UniqueArray<Int64> cells_uid(nb_cell);
469 Int32 connected_node_index = 0;
471 Int32 index = icell.index();
474 cells_uid[index] = cell.uniqueId();
477 bool is_ghost = !cell.isOwn();
479 ghost_type = VtkUtils::CellGhostTypes::DUPLICATECELL;
480 cells_ghost_type[index] = ghost_type;
482 unsigned char vtk_type = VtkUtils::arcaneToVtkCellType(cell.type());
483 cells_type[index] = vtk_type;
484 for (NodeLocalId node : cell.nodeIds()) {
485 cells_connectivity[connected_node_index] = node;
486 ++connected_node_index;
488 cells_offset[index + 1] = connected_node_index;
492 _initializeOffsets();
495 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Offsets" }, m_offset_for_cell_offset_info }, cells_offset);
497 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Connectivity" }, m_connectivity_offset_info },
499 _writeDataSet1DCollective<unsigned char>({ { m_top_group,
"Types" }, m_cell_offset_info }, cells_type);
502 Int64 nb_cell_int64 = nb_cell;
503 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfCells" }, m_part_offset_info },
504 asConstSpan(&nb_cell_int64));
505 Int64 nb_node_int64 = nb_node;
506 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfPoints" }, m_part_offset_info },
507 asConstSpan(&nb_node_int64));
508 Int64 number_of_connectivity_ids = cells_connectivity.size();
509 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfConnectivityIds" }, m_part_offset_info },
510 asConstSpan(&number_of_connectivity_ids));
515 UniqueArray<Int64> nodes_uid(nb_node);
516 UniqueArray<unsigned char> nodes_ghost_type(nb_node);
518 UniqueArray2<Real> points;
519 points.resize(nb_node, 3);
521 Int32 index = inode.index();
524 nodes_uid[index] = node.uniqueId();
527 bool is_ghost = !node.isOwn();
529 ghost_type = VtkUtils::PointGhostTypes::DUPLICATEPOINT;
530 nodes_ghost_type[index] = ghost_type;
532 Real3 pos = nodes_coordinates[inode];
533 points[index][0] = pos.x;
534 points[index][1] = pos.y;
535 points[index][2] = pos.z;
539 _writeDataSet1DCollective<Int64>({ { m_node_data_group,
"GlobalNodeId" }, m_cell_offset_info }, nodes_uid);
542 _writeDataSet1DCollective<unsigned char>({ { m_node_data_group,
"vtkGhostType" }, m_cell_offset_info }, nodes_ghost_type);
545 _writeDataSet2DCollective<Real>({ { m_top_group,
"Points" }, m_point_offset_info }, points);
549 _writeDataSet1DCollective<unsigned char>({ { m_cell_data_group,
"vtkGhostType" }, m_cell_offset_info }, cells_ghost_type);
553 _writeDataSet1DCollective<Int64>({ { m_cell_data_group,
"GlobalCellId" }, m_cell_offset_info }, cells_uid);
558 _writeDataSet1D<Real>({ { m_steps_group,
"Values" }, m_time_offset_info }, asConstSpan(¤t_time));
562 _writeDataSet1D<Int64>({ { m_steps_group,
"PartOffsets" }, m_time_offset_info }, asConstSpan(&part_offset));
565 _addInt64Attribute(m_steps_group,
"NSteps", time_index);
568 _writeConstituentsGroups();
574void VtkHdfV2DataWriter::
575_writeConstituentsGroups()
584 Ref<ItemGroupCollectiveInfo> group_info_ref = createRef<ItemGroupCollectiveInfo>(cells);
585 m_materials_groups.add(group_info_ref);
586 ItemGroupCollectiveInfo& group_info = *group_info_ref.get();
587 _initializeItemGroupCollectiveInfos(group_info);
588 ConstArrayView<Int32> groups_ids = cells.view().localIds();
589 DatasetGroupAndName dataset_group_name(m_top_group, String(
"Constituent_") + cells.name());
591 info() <<
"Writing infos for group '" << cells.name() <<
"'";
592 _writeDataSet1DCollective<Int32>({ dataset_group_name, m_cell_offset_info }, groups_ids);
615 for (Integer i = 0; i < nb_rank; ++i)
619 for (Integer i = 0; i <
my_rank; ++i)
632void VtkHdfV2DataWriter::
633_initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo&
group_info)
641 std::pair<Int64, Int64> _getInterval(
Int64 index,
Int64 nb_interval,
Int64 total_size)
643 Int64 n = total_size;
644 Int64 isize = n / nb_interval;
645 Int64 ibegin = index * isize;
647 if ((index + 1) == nb_interval)
649 return { ibegin, isize };
661void VtkHdfV2DataWriter::
677 static constexpr int MAX_DIM = 2;
728 if (m_is_first_call) {
760 dataset.open(group, name.localstr());
833template <
typename DataType>
void VtkHdfV2DataWriter::
838 const hid_t hdf_type = m_standard_types.nativeType(DataType{});
839 ConstMemoryView mem_view = makeConstMemoryView(values_data,
sizeof(DataType), dim1_size * dim2_size);
840 _writeDataSetGeneric(data_info, nb_dim, dim1_size, dim2_size, mem_view, hdf_type, is_collective);
846template <
typename DataType>
void VtkHdfV2DataWriter::
847_writeDataSet1D(
const DataInfo& data_info, Span<const DataType> values)
849 _writeDataSetGeneric(data_info, 1, values.size(), 1, values.data(),
false);
855template <
typename DataType>
void VtkHdfV2DataWriter::
856_writeDataSet1DUsingCollectiveIO(
const DataInfo& data_info, Span<const DataType> values)
858 _writeDataSetGeneric(data_info, 1, values.size(), 1, values.data(),
true);
864template <
typename DataType>
void VtkHdfV2DataWriter::
865_writeDataSet1DCollective(
const DataInfo& data_info, Span<const DataType> values)
868 return _writeDataSet1D(data_info, values);
869 if (m_is_collective_io)
870 return _writeDataSet1DUsingCollectiveIO(data_info, values);
871 UniqueArray<DataType> all_values;
875 _writeDataSet1D<DataType>(data_info, all_values);
881template <
typename DataType>
void VtkHdfV2DataWriter::
882_writeDataSet2D(
const DataInfo& data_info, Span2<const DataType> values)
884 _writeDataSetGeneric(data_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
false);
890template <
typename DataType>
void VtkHdfV2DataWriter::
891_writeDataSet2DUsingCollectiveIO(
const DataInfo& data_info, Span2<const DataType> values)
893 _writeDataSetGeneric(data_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
true);
899template <
typename DataType>
void VtkHdfV2DataWriter::
900_writeDataSet2DCollective(
const DataInfo& data_info, Span2<const DataType> values)
903 return _writeDataSet2D(data_info, values);
904 if (m_is_collective_io)
905 return _writeDataSet2DUsingCollectiveIO(data_info, values);
907 Int64 dim2_size = values.dim2Size();
908 UniqueArray<DataType> all_values;
910 Span<const DataType> values_1d(values.data(), values.totalNbElement());
912 if (m_is_master_io) {
913 Int64 dim1_size = all_values.size();
915 dim1_size = dim1_size / dim2_size;
916 Span2<const DataType> span2(all_values.data(), dim1_size, dim2_size);
917 return _writeDataSet2D<DataType>(data_info, span2);
924void VtkHdfV2DataWriter::
925_addInt64ArrayAttribute(Hid& hid,
const char* name, Span<const Int64> values)
927 hsize_t
len = values.size();
928 hid_t aid = H5Screate_simple(1, &len,
nullptr);
929 hid_t attr = H5Acreate2(hid.id(), name, H5T_NATIVE_INT64, aid, H5P_DEFAULT, H5P_DEFAULT);
932 int ret = H5Awrite(attr, H5T_NATIVE_INT64, values.data());
942void VtkHdfV2DataWriter::
943_addInt64Attribute(Hid& hid,
const char* name,
Int64 value)
945 HSpace aid(H5Screate(H5S_SCALAR));
948 attr.create(hid, name, H5T_NATIVE_INT64, aid);
950 attr.open(hid, name);
953 herr_t ret = attr.write(H5T_NATIVE_INT64, &value);
961Int64 VtkHdfV2DataWriter::
962_readInt64Attribute(Hid& hid,
const char* name)
965 attr.open(hid, name);
969 herr_t ret = attr.read(H5T_NATIVE_INT64, &value);
978void VtkHdfV2DataWriter::
979_addStringAttribute(Hid& hid,
const char* name,
const String& value)
981 hid_t aid = H5Screate(H5S_SCALAR);
982 hid_t attr_type = H5Tcopy(H5T_C_S1);
983 H5Tset_size(attr_type, value.length());
984 hid_t attr = H5Acreate2(hid.id(), name, attr_type, aid, H5P_DEFAULT, H5P_DEFAULT);
987 int ret = H5Awrite(attr, attr_type, value.localstr());
988 ret = H5Tclose(attr_type);
998void VtkHdfV2DataWriter::
1004 for (
const auto& i : m_offset_info_list) {
1005 Int64 offset = i.second;
1006 const DatasetInfo& offset_info = i.first;
1007 HGroup* hdf_group = offset_info.group();
1010 _writeDataSet1D<Int64>({ { *hdf_group, offset_info.name() }, m_time_offset_info }, asConstSpan(&offset));
1020void VtkHdfV2DataWriter::
1021_openOrCreateGroups()
1024 m_top_group.openOrCreate(
m_file_id,
"VTKHDF");
1025 m_cell_data_group.openOrCreate(m_top_group,
"CellData");
1026 m_node_data_group.openOrCreate(m_top_group,
"PointData");
1027 m_steps_group.openOrCreate(m_top_group,
"Steps");
1028 m_point_data_offsets_group.openOrCreate(m_steps_group,
"PointDataOffsets");
1029 m_cell_data_offsets_group.openOrCreate(m_steps_group,
"CellDataOffsets");
1030 m_field_data_offsets_group.openOrCreate(m_steps_group,
"FieldDataOffsets");
1036void VtkHdfV2DataWriter::
1039 m_cell_data_group.close();
1040 m_node_data_group.close();
1041 m_point_data_offsets_group.close();
1042 m_cell_data_offsets_group.close();
1043 m_field_data_offsets_group.close();
1044 m_steps_group.close();
1045 m_top_group.close();
1063 info(4) <<
"Write VtkHdfV2 var=" <<
var->name();
1067 if (
var->dimension() != 1)
1068 ARCANE_FATAL(
"Only export of scalar item variable is implemented (name={0})",
var->name());
1069 if (
var->isPartial())
1070 ARCANE_FATAL(
"Export of partial variable is not implemented");
1075 switch (item_kind) {
1077 group = &m_cell_data_group;
1082 group = &m_node_data_group;
1087 ARCANE_FATAL(
"Only export of 'Cell' or 'Node' variable is implemented (name={0})",
var->name());
1111 warning() << String::format(
"Export for datatype '{0}' is not supported (var_name={1})",
data_type,
var->name());
1118template <
typename DataType>
void VtkHdfV2DataWriter::
1129void VtkHdfV2DataWriter::
1130_writeReal3Dataset(
const DataInfo& data_info, IData* data)
1132 auto* true_data =
dynamic_cast<IArrayDataT<Real3>*
>(data);
1134 SmallSpan<const Real3> values(true_data->view());
1135 Int32 nb_value = values.size();
1137 UniqueArray2<Real> scalar_values;
1138 scalar_values.resize(nb_value, 3);
1139 for (
Int32 i = 0; i < nb_value; ++i) {
1140 Real3 v = values[i];
1141 scalar_values[i][0] = v.x;
1142 scalar_values[i][1] = v.y;
1143 scalar_values[i][2] = v.z;
1145 _writeDataSet2DCollective<Real>(data_info, scalar_values);
1151void VtkHdfV2DataWriter::
1152_writeReal2Dataset(
const DataInfo& data_info, IData* data)
1155 auto* true_data =
dynamic_cast<IArrayDataT<Real2>*
>(data);
1157 SmallSpan<const Real2> values(true_data->view());
1158 Int32 nb_value = values.size();
1159 UniqueArray2<Real> scalar_values;
1160 scalar_values.resize(nb_value, 3);
1161 for (
Int32 i = 0; i < nb_value; ++i) {
1162 Real2 v = values[i];
1163 scalar_values[i][0] = v.x;
1164 scalar_values[i][1] = v.y;
1165 scalar_values[i][2] = 0.0;
1167 _writeDataSet2DCollective<Real>(data_info, scalar_values);
1173void VtkHdfV2DataWriter::
1174_readAndSetOffset(DatasetInfo& offset_info,
Int32 wanted_step)
1176 HGroup* hgroup = offset_info.group();
1178 StandardArrayT<Int64> a(hgroup->id(), offset_info.name());
1179 UniqueArray<Int64> values;
1180 a.directRead(m_standard_types, values);
1181 Int64 offset_value = values[wanted_step];
1182 offset_info.setOffset(offset_value);
1183 info() <<
"VALUES name=" << offset_info.name() <<
" values=" << values
1184 <<
" wanted_step=" << wanted_step <<
" v=" << offset_value;
1190void VtkHdfV2DataWriter::
1211 m_cell_offset_info = DatasetInfo(m_steps_group,
"CellOffsets");
1212 m_point_offset_info = DatasetInfo(m_steps_group,
"PointOffsets");
1213 m_connectivity_offset_info = DatasetInfo(m_steps_group,
"ConnectivityIdOffsets");
1215 m_offset_for_cell_offset_info = DatasetInfo(
"_OffsetForCellOffsetInfo");
1216 m_part_offset_info = DatasetInfo(
"_PartOffsetInfo");
1217 m_time_offset_info = DatasetInfo(
"_TimeOffsetInfo");
1222 if (m_is_writer && !m_is_first_call) {
1225 Int64 nb_current_step = _readInt64Attribute(m_steps_group,
"NSteps");
1227 info(4) <<
"NB_STEP=" << nb_current_step <<
" time_index=" << time_index
1229 const bool debug_times =
false;
1231 StandardArrayT<Real> a1(m_steps_group.id(),
"Values");
1232 UniqueArray<Real> times;
1233 a1.directRead(m_standard_types, times);
1234 info() <<
"TIMES=" << times;
1236 if ((nb_current_step + 1) != time_index) {
1237 info() <<
"[VtkHdf] go_backward detected";
1238 Int32 wanted_step = time_index - 1;
1241 _readAndSetOffset(m_cell_offset_info, wanted_step);
1242 _readAndSetOffset(m_point_offset_info, wanted_step);
1243 _readAndSetOffset(m_connectivity_offset_info, wanted_step);
1244 m_part_offset_info.setOffset(wanted_step * nb_rank);
1245 m_time_offset_info.setOffset(wanted_step);
1246 m_offset_for_cell_offset_info.setOffset(m_cell_offset_info.
offset() + wanted_step * nb_rank);
1282 w->setDirectoryName(
dir.file(
"vtkhdfv2"));
1283 m_writer = std::move(
w);
1293 std::unique_ptr<IDataWriter> m_writer;
1299ARCANE_REGISTER_SERVICE_VTKHDFV2POSTPROCESSOR(VtkHdfV2PostProcessor,
1300 VtkHdfV2PostProcessor);
#define ARCANE_CHECK_POINTER(ptr)
Macro retournant le pointeur ptr s'il est non nul ou lancant une exception s'il est nul.
#define ARCANE_THROW(exception_class,...)
Macro pour envoyer une exception avec formattage.
#define ARCANE_FATAL(...)
Macro envoyant une exception FatalErrorException.
Generation de la classe de base du Service.
CaseOptionsVtkHdfV2PostProcessor * options() const
Options du jeu de données du service.
Classe gérant un répertoire.
Encapsule un hid_t pour un dataset.
Encapsule un hid_t pour un fichier.
Encapsule un hid_t pour un groupe.
static bool hasParallelHdf5()
Vrai HDF5 est compilé avec le support de MPI.
Encapsule un hid_t pour une propriété (H5P*).
Encapsule un hid_t pour un dataspace.
Définition des types standards Arcane pour hdf5.
void initialize()
Initialise les types.
Interface d'écriture des données d'une variable.
virtual String name() const =0
Nom du maillage.
virtual NodeGroup allNodes()=0
Groupe de tous les noeuds.
virtual CellGroup allCells()=0
Groupe de toutes les mailles.
virtual VariableNodeReal3 & nodesCoordinates()=0
Coordonnées des noeuds.
virtual IParallelMng * parallelMng()=0
Gestionnaire de parallèlisme.
Exception lorsqu'une erreur d'entrée/sortie est détectée.
Interface du gestionnaire de parallélisme pour un sous-domaine.
virtual void gatherVariable(ConstArrayView< char > send_buf, Array< char > &recv_buf, Int32 rank)=0
Effectue un regroupement sur tous les processeurs.
virtual bool isThreadImplementation() const =0
Indique si l'implémentation utilise les threads.
virtual Int32 commRank() const =0
Rang de cette instance dans le communicateur.
virtual bool isMasterIO() const =0
true si l'instance est un gestionnaire maître des entrées/sorties.
virtual Int32 commSize() const =0
Nombre d'instance dans le communicateur.
virtual bool isHybridImplementation() const =0
Indique si l'implémentation utilise le mode hybride.
virtual void allGather(ConstArrayView< char > send_buf, ArrayView< char > recv_buf)=0
Effectue un regroupement sur tous les processeurs. Il s'agit d'une opération collective....
virtual Integer masterIORank() const =0
Rang de l'instance gérant les entrées/sorties (pour laquelle isMasterIO() est vrai)
virtual bool isParallel() const =0
Retourne true si l'exécution est parallèle.
virtual void barrier()=0
Effectue une barière.
Interface d'une variable.
Groupe d'entités de maillage.
Lecteur des fichiers de maillage via la bibliothèque LIMA.
Interface du gestionnaire des matériaux et des milieux d'un maillage.
static IMeshMaterialMng * getReference(const MeshHandleOrMesh &mesh_handle, bool create=true)
Récupère ou créé la référence associée à mesh.
virtual RealConstArrayView times()
Liste des temps sauvés.
virtual const String & baseDirectoryName()
Nom du répertoire de sortie des fichiers.
virtual ItemGroupCollection groups()
Liste des groupes à sauver.
Structure contenant les informations pour créer un service.
void write(IVariable *var, IData *data) override
Ecrit les données data de la variable var.
IMeshMaterialMng * m_material_mng
Gestionnaire de matériaux associé (peut-être nul)
Int64 m_max_write_size
Taille maximale (en kilo-octet) pour une écriture.
String m_directory_name
Répertoire de sortie.
WritePartInfo _computeWritePartInfo(Int64 local_size)
Calcule l'offset de notre partie et le nombre total d'éléments.
IMesh * m_mesh
Maillage associé
HFile m_file_id
Identifiant HDF du fichier.
ItemGroupCollection m_groups
Liste des groupes à sauver.
UniqueArray< Real > m_times
Liste des temps.
void setMetaData(const String &meta_data) override
Positionne les infos des méta-données.
String m_full_filename
Nom du fichier HDF courant.
Post-traitement au format VtkHdf V2.
IDataWriter * dataWriter() override
Retourne l'écrivain associé à ce post-processeur.
void close() override
Ferme l'écrivain. Après fermeture, il ne peut plus être utilisé
void notifyBeginWrite() override
Notifie qu'une sortie va être effectuée avec les paramètres courants.
void notifyEndWrite() override
Notifie qu'une sortie vient d'être effectuée.
Integer size() const
Nombre d'éléments du vecteur.
T & back()
Dernier élément du tableau.
Vue constante d'un tableau de type T.
Constructeur de chaîne de caractère unicode.
Chaîne de caractères unicode.
bool null() const
Retourne true si la chaîne est nulle.
Classe d'accès aux traces.
TraceMessage warning() const
Flot pour un message d'avertissement.
TraceMessage info() const
Flot pour un message d'information.
ARCCORE_HOST_DEVICE Real2 min(Real2 a, Real2 b)
Retourne le minimum de deux Real2.
ItemGroupT< Cell > CellGroup
Groupe de mailles.
ItemGroupT< Node > NodeGroup
Groupe de noeuds.
MeshVariableScalarRefT< Node, Real3 > VariableNodeReal3
Grandeur au noeud de type coordonnées.
Integer len(const char *s)
Retourne la longueur de la chaîne s.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Collection< ItemGroup > ItemGroupCollection
Collection de groupes d'éléments du maillage.
eItemKind
Genre d'entité de maillage.
@ IK_Node
Entité de maillage de genre noeud.
@ IK_Cell
Entité de maillage de genre maille.
eDataType
Type d'une donnée.
@ DT_Int32
Donnée de type entier 32 bits.
@ DT_Real3
Donnée de type vecteur 3.
@ DT_Int64
Donnée de type entier 64 bits.
@ DT_Real2
Donnée de type vecteur 2.
@ DT_Real
Donnée de type réel.
unsigned char Byte
Type d'un octet.
Ensemble des classes assurant la gestion des matériaux et des milieux.
Conserve les infos sur les données à sauver et l'offset associé.
Classe pour conserver un couple (hdf_group,nom_du_dataset).
Classe pour conserver les information d'un offset.
Int64 offset() const
Valeur de l'offset. (-1) si on écrit à la fin du tableau.
Informations collectives sur un ItemGroup;.
WritePartInfo m_write_part_info
Informations sur l'écriture.
ItemGroup m_item_group
Groupe associé
Informations sur l'offset de la partie à écrire associée à un rang.
Int64 m_offset
Offset de mon rang.
Int64 m_size
Nombre d'éléments de mon rang.
Int64 m_total_size
Nombre d'éléments sur tous les rangs.