14#include "arcane/utils/Collection.h"
15#include "arcane/utils/Enumerator.h"
16#include "arcane/utils/Iostream.h"
17#include "arcane/utils/StringBuilder.h"
18#include "arcane/utils/IOException.h"
19#include "arcane/utils/FixedArray.h"
20#include "arcane/utils/MemoryView.h"
22#include "arcane/core/PostProcessorWriterBase.h"
23#include "arcane/core/Directory.h"
24#include "arcane/core/FactoryService.h"
25#include "arcane/core/IDataWriter.h"
26#include "arcane/core/IData.h"
27#include "arcane/core/IItemFamily.h"
28#include "arcane/core/VariableCollection.h"
29#include "arcane/core/IParallelMng.h"
30#include "arcane/core/IMesh.h"
32#include "arcane/std/Hdf5Utils.h"
33#include "arcane/std/VtkHdfV2PostProcessor_axl.h"
34#include "arcane/std/internal/VtkCellTypes.h"
68using namespace Hdf5Utils;
72 template <
typename T> Span<const T>
73 asConstSpan(
const T* v)
75 return Span<const T>(v, 1);
121 bool isNull()
const {
return m_name.
null(); }
123 HGroup* group()
const {
return m_group; }
124 const String& name()
const {
return m_name; }
126 Int64
offset()
const {
return m_offset; }
127 void setOffset(Int64 v) { m_offset = v; }
128 friend bool operator<(
const DatasetInfo&
s1,
const DatasetInfo&
s2)
130 return (
s1.m_name <
s2.m_name);
135 HGroup* m_group =
nullptr;
146 void setSize(Int64 v) {
m_size = v; }
147 void setOffset(Int64 v) {
m_offset = v; }
150 Int64 size()
const {
return m_size; }
151 Int64 offset()
const {
return m_offset; }
207 DatasetInfo datasetInfo()
const {
return m_dataset_info; }
223 void endWrite()
override;
235 IMesh* m_mesh =
nullptr;
257 HGroup m_point_data_offsets_group;
258 HGroup m_cell_data_offsets_group;
259 HGroup m_field_data_offsets_group;
261 bool m_is_parallel =
false;
262 bool m_is_master_io =
false;
263 bool m_is_collective_io =
false;
264 bool m_is_first_call =
false;
265 bool m_is_writer =
false;
273 std::map<DatasetInfo, Int64> m_offset_info_list;
277 ItemGroupCollectiveInfo m_all_cells_info;
278 ItemGroupCollectiveInfo m_all_nodes_info;
291 void _addStringAttribute(
Hid&
hid,
const char* name,
const String& value);
293 template <
typename DataType>
void
295 template <
typename DataType>
void
297 template <
typename DataType>
void
299 template <
typename DataType>
void
301 template <
typename DataType>
void
303 template <
typename DataType>
void
305 template <
typename DataType>
void
314 return sb.toString();
316 template <
typename DataType>
void
318 Int64 dim1_size, Int64 dim2_size,
const DataType*
values_data,
321 Int64 dim1_size, Int64 dim2_size, ConstMemoryView
values_data,
323 void _addInt64Attribute(
Hid&
hid,
const char* name, Int64 value);
324 Int64 _readInt64Attribute(
Hid&
hid,
const char* name);
325 void _openOrCreateGroups();
328 void _initializeOffsets();
329 void _initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo&
group_info);
338: TraceAccessor(mesh->traceMng())
341, m_is_collective_io(is_collective_io)
342, m_all_cells_info(mesh->allCells())
343, m_all_nodes_info(mesh->allNodes())
350void VtkHdfV2DataWriter::
351beginWrite(
const VariableCollection& vars)
355 IParallelMng* pm = m_mesh->parallelMng();
357 m_is_parallel = nb_rank > 1;
361 const bool is_first_call = (time_index < 2);
362 m_is_first_call = is_first_call;
364 pwarning() <<
"L'implémentation au format 'VtkHdfV2' est expérimentale";
366 String filename = _getFileName();
380 m_is_collective_io =
false;
383 info() <<
"VtkHdfV2DataWriter: using collective MPI/IO ?=" << m_is_collective_io;
390 m_is_writer = m_is_master_io || m_is_collective_io;
394 if (m_is_collective_io)
395 plist_id.createFilePropertyMPIIO(pm);
397 if (is_first_call && m_is_master_io)
398 dir.createDirectory();
400 if (m_is_collective_io)
411 _openOrCreateGroups();
414 std::array<Int64, 2> version = { 2, 0 };
415 _addInt64ArrayAttribute(m_top_group,
"Version", version);
416 _addStringAttribute(m_top_group,
"Type",
"UnstructuredGrid");
421 _initializeItemGroupCollectiveInfos(m_all_cells_info);
422 _initializeItemGroupCollectiveInfos(m_all_nodes_info);
424 CellGroup all_cells = m_mesh->allCells();
425 NodeGroup all_nodes = m_mesh->allNodes();
427 const Int32 nb_cell = all_cells.size();
428 const Int32 nb_node = all_nodes.size();
430 Int32 total_nb_connected_node = 0;
433 total_nb_connected_node += cell.nodeIds().size();
438 UniqueArray<Int64> cells_connectivity(total_nb_connected_node);
439 UniqueArray<Int64> cells_offset(nb_cell + 1);
440 UniqueArray<unsigned char> cells_ghost_type(nb_cell);
441 UniqueArray<unsigned char> cells_type(nb_cell);
442 UniqueArray<Int64> cells_uid(nb_cell);
445 Int32 connected_node_index = 0;
447 Int32 index = icell.index();
450 cells_uid[index] = cell.uniqueId();
453 bool is_ghost = !cell.isOwn();
455 ghost_type = VtkUtils::CellGhostTypes::DUPLICATECELL;
456 cells_ghost_type[index] = ghost_type;
458 unsigned char vtk_type = VtkUtils::arcaneToVtkCellType(cell.type());
459 cells_type[index] = vtk_type;
460 for (NodeLocalId node : cell.nodeIds()) {
461 cells_connectivity[connected_node_index] = node;
462 ++connected_node_index;
464 cells_offset[index + 1] = connected_node_index;
468 _initializeOffsets();
471 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Offsets" }, m_offset_for_cell_offset_info }, cells_offset);
473 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Connectivity" }, m_connectivity_offset_info },
475 _writeDataSet1DCollective<unsigned char>({ { m_top_group,
"Types" }, m_cell_offset_info }, cells_type);
478 Int64 nb_cell_int64 = nb_cell;
479 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfCells" }, m_part_offset_info },
480 asConstSpan(&nb_cell_int64));
481 Int64 nb_node_int64 = nb_node;
482 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfPoints" }, m_part_offset_info },
483 asConstSpan(&nb_node_int64));
484 Int64 number_of_connectivity_ids = cells_connectivity.size();
485 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfConnectivityIds" }, m_part_offset_info },
486 asConstSpan(&number_of_connectivity_ids));
491 UniqueArray<Int64> nodes_uid(nb_node);
492 UniqueArray<unsigned char> nodes_ghost_type(nb_node);
494 UniqueArray2<Real> points;
495 points.resize(nb_node, 3);
497 Int32 index = inode.index();
500 nodes_uid[index] = node.uniqueId();
503 bool is_ghost = !node.isOwn();
505 ghost_type = VtkUtils::PointGhostTypes::DUPLICATEPOINT;
506 nodes_ghost_type[index] = ghost_type;
508 Real3 pos = nodes_coordinates[inode];
509 points[index][0] = pos.x;
510 points[index][1] = pos.y;
511 points[index][2] = pos.z;
515 _writeDataSet1DCollective<Int64>({ { m_node_data_group,
"GlobalNodeId" }, m_cell_offset_info }, nodes_uid);
518 _writeDataSet1DCollective<unsigned char>({ { m_node_data_group,
"vtkGhostType" }, m_cell_offset_info }, nodes_ghost_type);
521 _writeDataSet2DCollective<Real>({ { m_top_group,
"Points" }, m_point_offset_info }, points);
525 _writeDataSet1DCollective<unsigned char>({ { m_cell_data_group,
"vtkGhostType" }, m_cell_offset_info }, cells_ghost_type);
529 _writeDataSet1DCollective<Int64>({ { m_cell_data_group,
"GlobalCellId" }, m_cell_offset_info }, cells_uid);
535 _writeDataSet1D<Real>({ { m_steps_group,
"Values" }, m_time_offset_info }, asConstSpan(¤t_time));
539 _writeDataSet1D<Int64>({ { m_steps_group,
"PartOffsets" }, m_time_offset_info }, asConstSpan(&part_offset));
542 _addInt64Attribute(m_steps_group,
"NSteps", time_index);
565 for (Integer i = 0; i < nb_rank; ++i)
569 for (Integer i = 0; i <
my_rank; ++i)
582void VtkHdfV2DataWriter::
583_initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo&
group_info)
585 Int64 dim1_size =
group_info.m_item_group.size();
591 std::pair<Int64, Int64> _getInterval(
Int64 index,
Int64 nb_interval,
Int64 total_size)
593 Int64 n = total_size;
594 Int64 isize = n / nb_interval;
595 Int64 ibegin = index * isize;
597 if ((index + 1) == nb_interval)
599 return { ibegin, isize };
611void VtkHdfV2DataWriter::
613 Int64 dim1_size, Int64 dim2_size,
627 static constexpr int MAX_DIM = 2;
678 if (m_is_first_call) {
710 dataset.open(group, name.localstr());
783template <
typename DataType>
void VtkHdfV2DataWriter::
785 Int64 dim1_size, Int64 dim2_size,
const DataType*
values_data,
788 const hid_t hdf_type = m_standard_types.nativeType(DataType{});
789 ConstMemoryView mem_view = makeConstMemoryView(values_data,
sizeof(DataType), dim1_size * dim2_size);
790 _writeDataSetGeneric(data_info, nb_dim, dim1_size, dim2_size, mem_view, hdf_type, is_collective);
796template <
typename DataType>
void VtkHdfV2DataWriter::
797_writeDataSet1D(
const DataInfo& data_info, Span<const DataType> values)
799 _writeDataSetGeneric(data_info, 1, values.size(), 1, values.data(),
false);
805template <
typename DataType>
void VtkHdfV2DataWriter::
806_writeDataSet1DUsingCollectiveIO(
const DataInfo& data_info, Span<const DataType> values)
808 _writeDataSetGeneric(data_info, 1, values.size(), 1, values.data(),
true);
814template <
typename DataType>
void VtkHdfV2DataWriter::
815_writeDataSet1DCollective(
const DataInfo& data_info, Span<const DataType> values)
818 return _writeDataSet1D(data_info, values);
819 if (m_is_collective_io)
820 return _writeDataSet1DUsingCollectiveIO(data_info, values);
821 UniqueArray<DataType> all_values;
822 IParallelMng* pm = m_mesh->parallelMng();
825 _writeDataSet1D<DataType>(data_info, all_values);
831template <
typename DataType>
void VtkHdfV2DataWriter::
832_writeDataSet2D(
const DataInfo& data_info, Span2<const DataType> values)
834 _writeDataSetGeneric(data_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
false);
840template <
typename DataType>
void VtkHdfV2DataWriter::
841_writeDataSet2DUsingCollectiveIO(
const DataInfo& data_info, Span2<const DataType> values)
843 _writeDataSetGeneric(data_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
true);
849template <
typename DataType>
void VtkHdfV2DataWriter::
850_writeDataSet2DCollective(
const DataInfo& data_info, Span2<const DataType> values)
853 return _writeDataSet2D(data_info, values);
854 if (m_is_collective_io)
855 return _writeDataSet2DUsingCollectiveIO(data_info, values);
857 Int64 dim2_size = values.dim2Size();
858 UniqueArray<DataType> all_values;
859 IParallelMng* pm = m_mesh->parallelMng();
860 Span<const DataType> values_1d(values.data(), values.totalNbElement());
862 if (m_is_master_io) {
863 Int64 dim1_size = all_values.size();
865 dim1_size = dim1_size / dim2_size;
866 Span2<const DataType> span2(all_values.data(), dim1_size, dim2_size);
867 return _writeDataSet2D<DataType>(data_info, span2);
874void VtkHdfV2DataWriter::
875_addInt64ArrayAttribute(Hid& hid,
const char* name, Span<const Int64> values)
877 hsize_t
len = values.size();
878 hid_t aid = H5Screate_simple(1, &len,
nullptr);
879 hid_t attr = H5Acreate2(hid.id(), name, H5T_NATIVE_INT64, aid, H5P_DEFAULT, H5P_DEFAULT);
882 int ret = H5Awrite(attr, H5T_NATIVE_INT64, values.data());
892void VtkHdfV2DataWriter::
893_addInt64Attribute(Hid& hid,
const char* name,
Int64 value)
895 HSpace aid(H5Screate(H5S_SCALAR));
898 attr.create(hid, name, H5T_NATIVE_INT64, aid);
900 attr.open(hid, name);
903 herr_t ret = attr.write(H5T_NATIVE_INT64, &value);
911Int64 VtkHdfV2DataWriter::
912_readInt64Attribute(Hid& hid,
const char* name)
915 attr.open(hid, name);
919 herr_t ret = attr.read(H5T_NATIVE_INT64, &value);
928void VtkHdfV2DataWriter::
929_addStringAttribute(Hid& hid,
const char* name,
const String& value)
931 hid_t aid = H5Screate(H5S_SCALAR);
932 hid_t attr_type = H5Tcopy(H5T_C_S1);
933 H5Tset_size(attr_type, value.length());
934 hid_t attr = H5Acreate2(hid.id(), name, attr_type, aid, H5P_DEFAULT, H5P_DEFAULT);
937 int ret = H5Awrite(attr, attr_type, value.localstr());
938 ret = H5Tclose(attr_type);
948void VtkHdfV2DataWriter::
954 for (
const auto& i : m_offset_info_list) {
955 Int64 offset = i.second;
956 const DatasetInfo& offset_info = i.first;
957 HGroup* hdf_group = offset_info.group();
960 _writeDataSet1D<Int64>({ { *hdf_group, offset_info.name() }, m_time_offset_info }, asConstSpan(&offset));
970void VtkHdfV2DataWriter::
974 m_top_group.openOrCreate(
m_file_id,
"VTKHDF");
975 m_cell_data_group.openOrCreate(m_top_group,
"CellData");
976 m_node_data_group.openOrCreate(m_top_group,
"PointData");
977 m_steps_group.openOrCreate(m_top_group,
"Steps");
978 m_point_data_offsets_group.openOrCreate(m_steps_group,
"PointDataOffsets");
979 m_cell_data_offsets_group.openOrCreate(m_steps_group,
"CellDataOffsets");
980 m_field_data_offsets_group.openOrCreate(m_steps_group,
"FieldDataOffsets");
986void VtkHdfV2DataWriter::
989 m_cell_data_group.close();
990 m_node_data_group.close();
991 m_point_data_offsets_group.close();
992 m_cell_data_offsets_group.close();
993 m_field_data_offsets_group.close();
994 m_steps_group.close();
1013 info(4) <<
"Write VtkHdfV2 var=" <<
var->name();
1017 if (
var->dimension() != 1)
1018 ARCANE_FATAL(
"Only export of scalar item variable is implemented (name={0})",
var->name());
1019 if (
var->isPartial())
1020 ARCANE_FATAL(
"Export of partial variable is not implemented");
1025 switch (item_kind) {
1027 group = &m_cell_data_group;
1032 group = &m_node_data_group;
1037 ARCANE_FATAL(
"Only export of 'Cell' or 'Node' variable is implemented (name={0})",
var->name());
1061 warning() << String::format(
"Export for datatype '{0}' is not supported (var_name={1})",
data_type,
var->name());
1068template <
typename DataType>
void VtkHdfV2DataWriter::
1079void VtkHdfV2DataWriter::
1080_writeReal3Dataset(
const DataInfo& data_info, IData* data)
1082 auto* true_data =
dynamic_cast<IArrayDataT<Real3>*
>(data);
1084 SmallSpan<const Real3> values(true_data->view());
1085 Int32 nb_value = values.size();
1087 UniqueArray2<Real> scalar_values;
1088 scalar_values.resize(nb_value, 3);
1089 for (
Int32 i = 0; i < nb_value; ++i) {
1090 Real3 v = values[i];
1091 scalar_values[i][0] = v.x;
1092 scalar_values[i][1] = v.y;
1093 scalar_values[i][2] = v.z;
1095 _writeDataSet2DCollective<Real>(data_info, scalar_values);
1101void VtkHdfV2DataWriter::
1102_writeReal2Dataset(
const DataInfo& data_info, IData* data)
1105 auto* true_data =
dynamic_cast<IArrayDataT<Real2>*
>(data);
1107 SmallSpan<const Real2> values(true_data->view());
1108 Int32 nb_value = values.size();
1109 UniqueArray2<Real> scalar_values;
1110 scalar_values.resize(nb_value, 3);
1111 for (
Int32 i = 0; i < nb_value; ++i) {
1112 Real2 v = values[i];
1113 scalar_values[i][0] = v.x;
1114 scalar_values[i][1] = v.y;
1115 scalar_values[i][2] = 0.0;
1117 _writeDataSet2DCollective<Real>(data_info, scalar_values);
1123void VtkHdfV2DataWriter::
1124_readAndSetOffset(DatasetInfo& offset_info,
Int32 wanted_step)
1126 HGroup* hgroup = offset_info.group();
1128 StandardArrayT<Int64> a(hgroup->id(), offset_info.name());
1129 UniqueArray<Int64> values;
1130 a.directRead(m_standard_types, values);
1131 Int64 offset_value = values[wanted_step];
1132 offset_info.setOffset(offset_value);
1133 info() <<
"VALUES name=" << offset_info.name() <<
" values=" << values
1134 <<
" wanted_step=" << wanted_step <<
" v=" << offset_value;
1140void VtkHdfV2DataWriter::
1160 m_cell_offset_info = DatasetInfo(m_steps_group,
"CellOffsets");
1161 m_point_offset_info = DatasetInfo(m_steps_group,
"PointOffsets");
1162 m_connectivity_offset_info = DatasetInfo(m_steps_group,
"ConnectivityIdOffsets");
1164 m_offset_for_cell_offset_info = DatasetInfo(
"_OffsetForCellOffsetInfo");
1165 m_part_offset_info = DatasetInfo(
"_PartOffsetInfo");
1166 m_time_offset_info = DatasetInfo(
"_TimeOffsetInfo");
1171 if (m_is_writer && !m_is_first_call) {
1172 IParallelMng* pm = m_mesh->parallelMng();
1174 Int64 nb_current_step = _readInt64Attribute(m_steps_group,
"NSteps");
1176 info(4) <<
"NB_STEP=" << nb_current_step <<
" time_index=" << time_index
1178 const bool debug_times =
false;
1180 StandardArrayT<Real> a1(m_steps_group.id(),
"Values");
1181 UniqueArray<Real> times;
1182 a1.directRead(m_standard_types, times);
1183 info() <<
"TIMES=" << times;
1185 if ((nb_current_step + 1) != time_index) {
1186 info() <<
"[VtkHdf] go_backward detected";
1187 Int32 wanted_step = time_index - 1;
1190 _readAndSetOffset(m_cell_offset_info, wanted_step);
1191 _readAndSetOffset(m_point_offset_info, wanted_step);
1192 _readAndSetOffset(m_connectivity_offset_info, wanted_step);
1193 m_part_offset_info.setOffset(wanted_step * nb_rank);
1194 m_time_offset_info.setOffset(wanted_step);
1195 m_offset_for_cell_offset_info.setOffset(m_cell_offset_info.
offset() + wanted_step * nb_rank);
1231 w->setDirectoryName(
dir.file(
"vtkhdfv2"));
1232 m_writer = std::move(
w);
1242 std::unique_ptr<IDataWriter> m_writer;
1248ARCANE_REGISTER_SERVICE_VTKHDFV2POSTPROCESSOR(VtkHdfV2PostProcessor,
1249 VtkHdfV2PostProcessor);
#define ARCANE_CHECK_POINTER(ptr)
Macro retournant le pointeur ptr s'il est non nul ou lancant une exception s'il est nul.
#define ARCANE_THROW(exception_class,...)
Macro pour envoyer une exception avec formattage.
#define ARCANE_FATAL(...)
Macro envoyant une exception FatalErrorException.
Generation de la classe de base du Service.
CaseOptionsVtkHdfV2PostProcessor * options() const
Options du jeu de données du service.
Classe gérant un répertoire.
Encapsule un hid_t pour un dataset.
Encapsule un hid_t pour un fichier.
Encapsule un hid_t pour un groupe.
static bool hasParallelHdf5()
Vrai HDF5 est compilé avec le support de MPI.
Encapsule un hid_t pour une propriété (H5P*).
Encapsule un hid_t pour un dataspace.
Définition des types standards Arcane pour hdf5.
void initialize()
Initialise les types.
Interface d'écriture des données d'une variable.
Exception lorsqu'une erreur d'entrée/sortie est détectée.
Interface du gestionnaire de parallélisme pour un sous-domaine.
virtual void gatherVariable(ConstArrayView< char > send_buf, Array< char > &recv_buf, Int32 rank)=0
Effectue un regroupement sur tous les processeurs.
virtual bool isThreadImplementation() const =0
Indique si l'implémentation utilise les threads.
virtual Int32 commRank() const =0
Rang de cette instance dans le communicateur.
virtual bool isMasterIO() const =0
true si l'instance est un gestionnaire maître des entrées/sorties.
virtual Int32 commSize() const =0
Nombre d'instance dans le communicateur.
virtual bool isHybridImplementation() const =0
Indique si l'implémentation utilise le mode hybride.
virtual void allGather(ConstArrayView< char > send_buf, ArrayView< char > recv_buf)=0
Effectue un regroupement sur tous les processeurs. Il s'agit d'une opération collective....
virtual Integer masterIORank() const =0
Rang de l'instance gérant les entrées/sorties (pour laquelle isMasterIO() est vrai)
virtual bool isParallel() const =0
Retourne true si l'exécution est parallèle.
virtual void barrier()=0
Effectue une barière.
Interface d'une variable.
Groupe d'entités de maillage.
Lecteur des fichiers de maillage via la bibliothèque LIMA.
virtual RealConstArrayView times()
Liste des temps sauvés.
virtual const String & baseDirectoryName()
Nom du répertoire de sortie des fichiers.
virtual ItemGroupCollection groups()
Liste des groupes à sauver.
Structure contenant les informations pour créer un service.
void write(IVariable *var, IData *data) override
Ecrit les données data de la variable var.
Int64 m_max_write_size
Taille maximale (en kilo-octet) pour une écriture.
String m_directory_name
Répertoire de sortie.
WritePartInfo _computeWritePartInfo(Int64 local_size)
Calcule l'offset de notre partie et le nombre total d'éléments.
HFile m_file_id
Identifiant HDF du fichier.
ItemGroupCollection m_groups
Liste des groupes à sauver.
UniqueArray< Real > m_times
Liste des temps.
void setMetaData(const String &meta_data) override
Positionne les infos des méta-données.
String m_full_filename
Nom du fichier HDF courant.
Post-traitement au format VtkHdf V2.
IDataWriter * dataWriter() override
Retourne l'écrivain associé à ce post-processeur.
void close() override
Ferme l'écrivain. Après fermeture, il ne peut plus être utilisé
void notifyBeginWrite() override
Notifie qu'une sortie va être effectuée avec les paramètres courants.
void notifyEndWrite() override
Notifie qu'une sortie vient d'être effectuée.
Integer size() const
Nombre d'éléments du vecteur.
T & back()
Dernier élément du tableau.
Vue constante d'un tableau de type T.
Constructeur de chaîne de caractère unicode.
Chaîne de caractères unicode.
bool null() const
Retourne true si la chaîne est nulle.
Classe d'accès aux traces.
TraceMessage pwarning() const
TraceMessage warning() const
Flot pour un message d'avertissement.
TraceMessage info() const
Flot pour un message d'information.
ARCCORE_HOST_DEVICE Real2 min(Real2 a, Real2 b)
Retourne le minimum de deux Real2.
ItemGroupT< Cell > CellGroup
Groupe de mailles.
ItemGroupT< Node > NodeGroup
Groupe de noeuds.
MeshVariableScalarRefT< Node, Real3 > VariableNodeReal3
Grandeur au noeud de type coordonnées.
Integer len(const char *s)
Retourne la longueur de la chaîne s.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Collection< ItemGroup > ItemGroupCollection
Collection de groupes d'éléments du maillage.
eItemKind
Genre d'entité de maillage.
@ IK_Node
Entité de maillage de genre noeud.
@ IK_Cell
Entité de maillage de genre maille.
unsigned char Byte
Type d'un octet.
eDataType
Type d'une donnée.
@ DT_Int32
Donnée de type entier 32 bits.
@ DT_Real3
Donnée de type vecteur 3.
@ DT_Int64
Donnée de type entier 64 bits.
@ DT_Real2
Donnée de type vecteur 2.
@ DT_Real
Donnée de type réel.
Conserve les infos sur les données à sauver et l'offset associé.
Classe pour conserver un couple (hdf_group,nom_du_dataset).
Classe pour conserver les information d'un offset.
Int64 offset() const
Valeur de l'offset. (-1) si on écrit à la fin du tableau.
Informations collectives sur un ItemGroup;.
WritePartInfo m_write_part_info
Informations sur l'écriture.
ItemGroup m_item_group
Groupe associé
Informations sur l'offset de la partie à écrire associée à un rang.
Int64 m_offset
Offset de mon rang.
Int64 m_size
Nombre d'éléments de mon rang.
Int64 m_total_size
Nombre d'éléments sur tous les rangs.