14#include "arcane/utils/Collection.h"
15#include "arcane/utils/Enumerator.h"
16#include "arcane/utils/Iostream.h"
17#include "arcane/utils/StringBuilder.h"
18#include "arcane/utils/IOException.h"
19#include "arcane/utils/FixedArray.h"
20#include "arcane/utils/MemoryView.h"
22#include "arcane/core/PostProcessorWriterBase.h"
23#include "arcane/core/Directory.h"
24#include "arcane/core/FactoryService.h"
25#include "arcane/core/IDataWriter.h"
26#include "arcane/core/IData.h"
27#include "arcane/core/IItemFamily.h"
28#include "arcane/core/VariableCollection.h"
29#include "arcane/core/IParallelMng.h"
30#include "arcane/core/IMesh.h"
31#include "arcane/core/internal/VtkCellTypes.h"
33#include "arcane/core/materials/IMeshMaterialMng.h"
34#include "arcane/core/materials/IMeshEnvironment.h"
36#include "arcane/hdf5/Hdf5Utils.h"
37#include "arcane/hdf5/VtkHdfV2PostProcessor_axl.h"
77 asConstSpan(
const T* v)
86class VtkHdfV2DataWriter
98 struct DatasetGroupAndName
102 DatasetGroupAndName(
HGroup& group_,
const String& name_)
126 DatasetInfo() =
default;
127 explicit DatasetInfo(
const String& name)
134 bool isNull()
const {
return m_name.null(); }
136 HGroup* group()
const {
return m_group; }
137 const String& name()
const {
return m_name; }
140 void setOffset(
Int64 v) { m_offset = v; }
141 friend bool operator<(
const DatasetInfo& s1,
const DatasetInfo& s2)
143 return (s1.m_name < s2.m_name);
148 HGroup* m_group =
nullptr;
177 struct ItemGroupCollectiveInfo
181 explicit ItemGroupCollectiveInfo(
const ItemGroup& g)
207 , m_dataset_info(dataset_info)
213 , m_dataset_info(dataset_info)
214 , m_group_info(group_info)
220 DatasetInfo datasetInfo()
const {
return m_dataset_info; }
236 void endWrite()
override;
274 HGroup m_point_data_offsets_group;
275 HGroup m_cell_data_offsets_group;
276 HGroup m_field_data_offsets_group;
278 bool m_is_parallel =
false;
279 bool m_is_master_io =
false;
280 bool m_is_collective_io =
false;
281 bool m_is_first_call =
false;
282 bool m_is_writer =
false;
290 std::map<DatasetInfo, Int64> m_offset_info_list;
294 ItemGroupCollectiveInfo m_all_cells_info;
295 ItemGroupCollectiveInfo m_all_nodes_info;
309 void _addStringAttribute(
Hid& hid,
const char* name,
const String& value);
311 template <
typename DataType>
void
313 template <
typename DataType>
void
315 template <
typename DataType>
void
317 template <
typename DataType>
void
319 template <
typename DataType>
void
321 template <
typename DataType>
void
323 template <
typename DataType>
void
324 _writeBasicTypeDataset(
const DataInfo& data_info,
IData* data);
325 void _writeReal3Dataset(
const DataInfo& data_info,
IData* data);
326 void _writeReal2Dataset(
const DataInfo& data_info,
IData* data);
332 return sb.toString();
334 template <
typename DataType>
void
335 _writeDataSetGeneric(
const DataInfo& data_info,
Int32 nb_dim,
336 Int64 dim1_size,
Int64 dim2_size,
const DataType* values_data,
338 void _writeDataSetGeneric(
const DataInfo& data_info,
Int32 nb_dim,
340 const hid_t hdf_datatype_type,
bool is_collective);
341 void _addInt64Attribute(
Hid& hid,
const char* name,
Int64 value);
342 Int64 _readInt64Attribute(
Hid& hid,
const char* name);
343 void _openOrCreateGroups();
345 void _readAndSetOffset(DatasetInfo& offset_info,
Int32 wanted_step);
346 void _initializeOffsets();
347 void _initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo& group_info);
349 void _writeConstituentsGroups();
357: TraceAccessor(mesh->traceMng())
360, m_is_collective_io(is_collective_io)
361, m_all_cells_info(mesh->allCells())
362, m_all_nodes_info(mesh->allNodes())
369void VtkHdfV2DataWriter::
377 IParallelMng* pm =
m_mesh->parallelMng();
378 const Int32 nb_rank = pm->commSize();
379 m_is_parallel = nb_rank > 1;
380 m_is_master_io = pm->isMasterIO();
383 const bool is_first_call = (time_index < 2);
384 m_is_first_call = is_first_call;
386 info() <<
"WARNING: L'implémentation au format 'VtkHdfV2' est expérimentale";
388 String filename = _getFileName();
401 if (pm->isHybridImplementation() || pm->isThreadImplementation())
402 m_is_collective_io =
false;
405 info() <<
"VtkHdfV2DataWriter: using collective MPI/IO ?=" << m_is_collective_io;
414 m_is_writer = m_is_master_io || m_is_collective_io;
418 if (m_is_collective_io)
419 plist_id.createFilePropertyMPIIO(pm);
421 if (is_first_call && m_is_master_io)
422 dir.createDirectory();
424 if (m_is_collective_io)
428 m_standard_types.initialize();
435 _openOrCreateGroups();
438 std::array<Int64, 2> version = { 2, 0 };
439 _addInt64ArrayAttribute(m_top_group,
"Version", version);
440 _addStringAttribute(m_top_group,
"Type",
"UnstructuredGrid");
445 _initializeItemGroupCollectiveInfos(m_all_cells_info);
446 _initializeItemGroupCollectiveInfos(m_all_nodes_info);
451 const Int32 nb_cell = all_cells.size();
452 const Int32 nb_node = all_nodes.size();
454 Int32 total_nb_connected_node = 0;
457 total_nb_connected_node += cell.nodeIds().size();
462 UniqueArray<Int64> cells_connectivity(total_nb_connected_node);
463 UniqueArray<Int64> cells_offset(nb_cell + 1);
464 UniqueArray<unsigned char> cells_ghost_type(nb_cell);
465 UniqueArray<unsigned char> cells_type(nb_cell);
466 UniqueArray<Int64> cells_uid(nb_cell);
469 Int32 connected_node_index = 0;
471 Int32 index = icell.index();
474 cells_uid[index] = cell.uniqueId();
477 bool is_ghost = !cell.isOwn();
479 ghost_type = VtkUtils::CellGhostTypes::DUPLICATECELL;
480 cells_ghost_type[index] = ghost_type;
482 unsigned char vtk_type = VtkUtils::arcaneToVtkCellType(cell.type());
483 cells_type[index] = vtk_type;
484 for (NodeLocalId node : cell.nodeIds()) {
485 cells_connectivity[connected_node_index] = node;
486 ++connected_node_index;
488 cells_offset[index + 1] = connected_node_index;
492 _initializeOffsets();
495 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Offsets" }, m_offset_for_cell_offset_info }, cells_offset);
497 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Connectivity" }, m_connectivity_offset_info },
499 _writeDataSet1DCollective<unsigned char>({ { m_top_group,
"Types" }, m_cell_offset_info }, cells_type);
502 Int64 nb_cell_int64 = nb_cell;
503 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfCells" }, m_part_offset_info },
504 asConstSpan(&nb_cell_int64));
505 Int64 nb_node_int64 = nb_node;
506 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfPoints" }, m_part_offset_info },
507 asConstSpan(&nb_node_int64));
508 Int64 number_of_connectivity_ids = cells_connectivity.size();
509 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfConnectivityIds" }, m_part_offset_info },
510 asConstSpan(&number_of_connectivity_ids));
515 UniqueArray<Int64> nodes_uid(nb_node);
516 UniqueArray<unsigned char> nodes_ghost_type(nb_node);
518 UniqueArray2<Real> points;
519 points.resize(nb_node, 3);
521 Int32 index = inode.index();
524 nodes_uid[index] = node.uniqueId();
527 bool is_ghost = !node.isOwn();
529 ghost_type = VtkUtils::PointGhostTypes::DUPLICATEPOINT;
530 nodes_ghost_type[index] = ghost_type;
532 Real3 pos = nodes_coordinates[inode];
533 points[index][0] = pos.x;
534 points[index][1] = pos.y;
535 points[index][2] = pos.z;
539 _writeDataSet1DCollective<Int64>({ { m_node_data_group,
"GlobalNodeId" }, m_cell_offset_info }, nodes_uid);
542 _writeDataSet1DCollective<unsigned char>({ { m_node_data_group,
"vtkGhostType" }, m_cell_offset_info }, nodes_ghost_type);
545 _writeDataSet2DCollective<Real>({ { m_top_group,
"Points" }, m_point_offset_info }, points);
549 _writeDataSet1DCollective<unsigned char>({ { m_cell_data_group,
"vtkGhostType" }, m_cell_offset_info }, cells_ghost_type);
553 _writeDataSet1DCollective<Int64>({ { m_cell_data_group,
"GlobalCellId" }, m_cell_offset_info }, cells_uid);
558 _writeDataSet1D<Real>({ { m_steps_group,
"Values" }, m_time_offset_info }, asConstSpan(¤t_time));
561 Int64 comm_size = pm->commSize();
562 Int64 part_offset = (time_index - 1) * comm_size;
563 _writeDataSet1D<Int64>({ { m_steps_group,
"PartOffsets" }, m_time_offset_info }, asConstSpan(&part_offset));
566 _addInt64Attribute(m_steps_group,
"NSteps", time_index);
569 _writeConstituentsGroups();
575void VtkHdfV2DataWriter::
576_writeConstituentsGroups()
586 m_materials_groups.add(group_info_ref);
588 _initializeItemGroupCollectiveInfos(group_info);
589 ConstArrayView<Int32> groups_ids = cells.view().localIds();
590 DatasetGroupAndName dataset_group_name(m_top_group, String(
"Constituent_") + cells.name());
592 info() <<
"Writing infos for group '" << cells.name() <<
"'";
593 _writeDataSet1DCollective<Int32>({ dataset_group_name, m_cell_offset_info }, groups_ids);
612 Int64 dim1_size = local_size;
615 Int64 total_size = 0;
616 for (
Integer i = 0; i < nb_rank; ++i)
617 total_size += all_sizes[i];
620 for (
Integer i = 0; i < my_rank; ++i)
621 my_index += all_sizes[i];
624 part_info.setTotalSize(total_size);
625 part_info.setSize(local_size);
626 part_info.setOffset(my_index);
633void VtkHdfV2DataWriter::
634_initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo& group_info)
636 Int64 dim1_size = group_info.m_item_group.size();
642 std::pair<Int64, Int64> _getInterval(Int64 index, Int64 nb_interval, Int64 total_size)
644 Int64 n = total_size;
645 Int64 isize = n / nb_interval;
646 Int64 ibegin = index * isize;
648 if ((index + 1) == nb_interval)
650 return { ibegin, isize };
662void VtkHdfV2DataWriter::
666 const hid_t hdf_type,
bool is_collective)
671 HGroup& group = data_info.dataset.group;
672 const String& name = data_info.dataset.name;
676 Int64 wanted_offset = data_info.datasetInfo().
offset();
678 static constexpr int MAX_DIM = 2;
688 local_dims[0] = dim1_size;
689 local_dims[1] = dim2_size;
697 max_dims[0] = H5S_UNLIMITED;
698 max_dims[1] = dim2_size;
701 Int64 write_offset = 0;
704 Int64 global_dim1_size = dim1_size;
705 Int32 nb_participating_rank = 1;
708 nb_participating_rank =
m_mesh->parallelMng()->commSize();
710 if (data_info.m_group_info) {
713 part_info = data_info.m_group_info->writePartInfo();
718 global_dim1_size = part_info.totalSize();
719 my_index = part_info.offset();
729 if (m_is_first_call) {
732 global_dims[0] = global_dim1_size;
733 global_dims[1] = dim2_size;
735 Int64 chunk_size = global_dim1_size / nb_participating_rank;
736 if (chunk_size < 1024)
738 const Int64 max_chunk_size = 1024 * 1024 * 10;
739 chunk_size =
math::min(chunk_size, max_chunk_size);
740 chunk_dims[0] = chunk_size;
741 chunk_dims[1] = dim2_size;
742 info() <<
"CHUNK nb_dim=" << nb_dim
743 <<
" global_dim1_size=" << global_dim1_size
744 <<
" chunk0=" << chunk_dims[0]
745 <<
" chunk1=" << chunk_dims[1]
747 file_space.createSimple(nb_dim, global_dims.data(), max_dims.data());
749 plist_id.create(H5P_DATASET_CREATE);
750 H5Pset_chunk(plist_id.id(), nb_dim, chunk_dims.data());
751 dataset.create(group, name.localstr(), hdf_type, file_space,
HProperty{}, plist_id,
HProperty{});
754 hyperslab_offsets[0] = my_index;
755 hyperslab_offsets[1] = 0;
761 dataset.open(group, name.
localstr());
762 file_space = dataset.getSpace();
763 int nb_dimension = file_space.nbDimension();
764 if (nb_dimension != nb_dim)
769 file_space.getDimensions(original_dims.data(),
nullptr);
770 hsize_t offset0 = original_dims[0];
773 if (wanted_offset >= 0) {
774 offset0 = wanted_offset;
775 info() <<
"Forcing offset to " << wanted_offset;
777 global_dims[0] = offset0 + global_dim1_size;
778 global_dims[1] = dim2_size;
779 write_offset = offset0;
782 if ((herror = dataset.setExtent(global_dims.data())) < 0)
784 file_space = dataset.getSpace();
786 hyperslab_offsets[0] = offset0 + my_index;
787 hyperslab_offsets[1] = 0;
788 info(4) <<
"APPEND nb_dim=" << nb_dim
789 <<
" dim0=" << global_dims[0]
790 <<
" count0=" << local_dims[0]
791 <<
" offsets0=" << hyperslab_offsets[0] <<
" name=" << name;
798 Int64 nb_interval = 1;
802 info(4) <<
"WRITE global_size=" << nb_write_byte <<
" max_size=" <<
m_max_write_size <<
" nb_interval=" << nb_interval;
804 for (
Int64 i = 0; i < nb_interval; ++i) {
805 auto [index, nb_element] = _getInterval(i, nb_interval, dim1_size);
808 dims[0] = nb_element;
811 offsets[0] = hyperslab_offsets[0] + index;
813 if ((herror = H5Sselect_hyperslab(file_space.id(), H5S_SELECT_SET, offsets.data(),
nullptr, dims.data(),
nullptr)) < 0)
817 memory_space.createSimple(nb_dim, dims.data());
820 if ((herror = dataset.write(hdf_type, values_data.
data() + data_offset, memory_space, file_space, write_plist_id)) < 0)
827 if (!data_info.datasetInfo().isNull())
828 m_offset_info_list.insert(std::make_pair(data_info.datasetInfo(), write_offset));
834template <
typename DataType>
void VtkHdfV2DataWriter::
835_writeDataSetGeneric(
const DataInfo& data_info,
Int32 nb_dim,
836 Int64 dim1_size,
Int64 dim2_size,
const DataType* values_data,
839 const hid_t hdf_type = m_standard_types.nativeType(DataType{});
840 ConstMemoryView mem_view =
makeConstMemoryView(values_data,
sizeof(DataType), dim1_size * dim2_size);
841 _writeDataSetGeneric(data_info, nb_dim, dim1_size, dim2_size, mem_view, hdf_type, is_collective);
847template <
typename DataType>
void VtkHdfV2DataWriter::
850 _writeDataSetGeneric(data_info, 1, values.size(), 1, values.data(),
false);
856template <
typename DataType>
void VtkHdfV2DataWriter::
859 _writeDataSetGeneric(data_info, 1, values.size(), 1, values.data(),
true);
865template <
typename DataType>
void VtkHdfV2DataWriter::
869 return _writeDataSet1D(data_info, values);
870 if (m_is_collective_io)
871 return _writeDataSet1DUsingCollectiveIO(data_info, values);
872 UniqueArray<DataType> all_values;
873 IParallelMng* pm =
m_mesh->parallelMng();
874 pm->gatherVariable(values.smallView(), all_values, pm->masterIORank());
876 _writeDataSet1D<DataType>(data_info, all_values);
882template <
typename DataType>
void VtkHdfV2DataWriter::
885 _writeDataSetGeneric(data_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
false);
891template <
typename DataType>
void VtkHdfV2DataWriter::
894 _writeDataSetGeneric(data_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
true);
900template <
typename DataType>
void VtkHdfV2DataWriter::
904 return _writeDataSet2D(data_info, values);
905 if (m_is_collective_io)
906 return _writeDataSet2DUsingCollectiveIO(data_info, values);
908 Int64 dim2_size = values.dim2Size();
909 UniqueArray<DataType> all_values;
910 IParallelMng* pm =
m_mesh->parallelMng();
911 Span<const DataType> values_1d(values.data(), values.totalNbElement());
912 pm->gatherVariable(values_1d.smallView(), all_values, pm->masterIORank());
913 if (m_is_master_io) {
914 Int64 dim1_size = all_values.size();
916 dim1_size = dim1_size / dim2_size;
917 Span2<const DataType> span2(all_values.data(), dim1_size, dim2_size);
918 return _writeDataSet2D<DataType>(data_info, span2);
925void VtkHdfV2DataWriter::
928 hsize_t
len = values.size();
929 hid_t aid = H5Screate_simple(1, &len,
nullptr);
930 hid_t attr = H5Acreate2(hid.id(), name, H5T_NATIVE_INT64, aid, H5P_DEFAULT, H5P_DEFAULT);
933 int ret = H5Awrite(attr, H5T_NATIVE_INT64, values.data());
943void VtkHdfV2DataWriter::
944_addInt64Attribute(
Hid& hid,
const char* name,
Int64 value)
946 HSpace aid(H5Screate(H5S_SCALAR));
949 attr.create(hid, name, H5T_NATIVE_INT64, aid);
951 attr.open(hid, name);
954 herr_t ret = attr.write(H5T_NATIVE_INT64, &value);
962Int64 VtkHdfV2DataWriter::
963_readInt64Attribute(
Hid& hid,
const char* name)
966 attr.open(hid, name);
970 herr_t ret = attr.read(H5T_NATIVE_INT64, &value);
979void VtkHdfV2DataWriter::
980_addStringAttribute(
Hid& hid,
const char* name,
const String& value)
982 hid_t aid = H5Screate(H5S_SCALAR);
983 hid_t attr_type = H5Tcopy(H5T_C_S1);
984 H5Tset_size(attr_type, value.length());
985 hid_t attr = H5Acreate2(hid.id(), name, attr_type, aid, H5P_DEFAULT, H5P_DEFAULT);
988 int ret = H5Awrite(attr, attr_type, value.localstr());
989 ret = H5Tclose(attr_type);
999void VtkHdfV2DataWriter::
1005 for (
const auto& i : m_offset_info_list) {
1006 Int64 offset = i.second;
1008 HGroup* hdf_group = offset_info.group();
1011 _writeDataSet1D<Int64>({ { *hdf_group, offset_info.name() }, m_time_offset_info }, asConstSpan(&offset));
1021void VtkHdfV2DataWriter::
1022_openOrCreateGroups()
1025 m_top_group.openOrCreate(
m_file_id,
"VTKHDF");
1026 m_cell_data_group.openOrCreate(m_top_group,
"CellData");
1027 m_node_data_group.openOrCreate(m_top_group,
"PointData");
1028 m_steps_group.openOrCreate(m_top_group,
"Steps");
1029 m_point_data_offsets_group.openOrCreate(m_steps_group,
"PointDataOffsets");
1030 m_cell_data_offsets_group.openOrCreate(m_steps_group,
"CellDataOffsets");
1031 m_field_data_offsets_group.openOrCreate(m_steps_group,
"FieldDataOffsets");
1037void VtkHdfV2DataWriter::
1040 m_cell_data_group.close();
1041 m_node_data_group.close();
1042 m_point_data_offsets_group.close();
1043 m_cell_data_offsets_group.close();
1044 m_field_data_offsets_group.close();
1045 m_steps_group.close();
1046 m_top_group.close();
1055 ARCANE_UNUSED(meta_data);
1064 info(4) <<
"Write VtkHdfV2 var=" << var->
name();
1069 ARCANE_FATAL(
"Only export of scalar item variable is implemented (name={0})", var->
name());
1071 ARCANE_FATAL(
"Export of partial variable is not implemented");
1076 switch (item_kind) {
1078 group = &m_cell_data_group;
1079 offset_info = m_cell_offset_info;
1080 group_info = &m_all_cells_info;
1083 group = &m_node_data_group;
1084 offset_info = m_point_offset_info;
1085 group_info = &m_all_nodes_info;
1088 ARCANE_FATAL(
"Only export of 'Cell' or 'Node' variable is implemented (name={0})", var->
name());
1095 switch (data_type) {
1097 _writeBasicTypeDataset<Real>(data_info, data);
1100 _writeBasicTypeDataset<Int64>(data_info, data);
1103 _writeBasicTypeDataset<Int32>(data_info, data);
1106 _writeReal3Dataset(data_info, data);
1109 _writeReal2Dataset(data_info, data);
1112 warning() << String::format(
"Export for datatype '{0}' is not supported (var_name={1})", data_type, var->
name());
1119template <
typename DataType>
void VtkHdfV2DataWriter::
1120_writeBasicTypeDataset(
const DataInfo& data_info,
IData* data)
1130void VtkHdfV2DataWriter::
1131_writeReal3Dataset(
const DataInfo& data_info, IData* data)
1133 auto* true_data =
dynamic_cast<IArrayDataT<Real3>*
>(data);
1135 SmallSpan<const Real3> values(true_data->view());
1136 Int32 nb_value = values.size();
1138 UniqueArray2<Real> scalar_values;
1139 scalar_values.resize(nb_value, 3);
1140 for (Int32 i = 0; i < nb_value; ++i) {
1141 Real3 v = values[i];
1142 scalar_values[i][0] = v.x;
1143 scalar_values[i][1] = v.y;
1144 scalar_values[i][2] = v.z;
1146 _writeDataSet2DCollective<Real>(data_info, scalar_values);
1152void VtkHdfV2DataWriter::
1153_writeReal2Dataset(
const DataInfo& data_info,
IData* data)
1156 auto* true_data =
dynamic_cast<IArrayDataT<Real2>*
>(data);
1158 SmallSpan<const Real2> values(true_data->view());
1159 Int32 nb_value = values.size();
1160 UniqueArray2<Real> scalar_values;
1161 scalar_values.resize(nb_value, 3);
1162 for (
Int32 i = 0; i < nb_value; ++i) {
1163 Real2 v = values[i];
1164 scalar_values[i][0] = v.x;
1165 scalar_values[i][1] = v.y;
1166 scalar_values[i][2] = 0.0;
1168 _writeDataSet2DCollective<Real>(data_info, scalar_values);
1174void VtkHdfV2DataWriter::
1175_readAndSetOffset(DatasetInfo& offset_info,
Int32 wanted_step)
1177 HGroup* hgroup = offset_info.group();
1179 StandardArrayT<Int64> a(hgroup->id(), offset_info.name());
1180 UniqueArray<Int64> values;
1181 a.directRead(m_standard_types, values);
1182 Int64 offset_value = values[wanted_step];
1183 offset_info.setOffset(offset_value);
1184 info() <<
"VALUES name=" << offset_info.name() <<
" values=" << values
1185 <<
" wanted_step=" << wanted_step <<
" v=" << offset_value;
1191void VtkHdfV2DataWriter::
1212 m_cell_offset_info =
DatasetInfo(m_steps_group,
"CellOffsets");
1213 m_point_offset_info =
DatasetInfo(m_steps_group,
"PointOffsets");
1214 m_connectivity_offset_info =
DatasetInfo(m_steps_group,
"ConnectivityIdOffsets");
1216 m_offset_for_cell_offset_info =
DatasetInfo(
"_OffsetForCellOffsetInfo");
1217 m_part_offset_info =
DatasetInfo(
"_PartOffsetInfo");
1218 m_time_offset_info =
DatasetInfo(
"_TimeOffsetInfo");
1223 if (m_is_writer && !m_is_first_call) {
1224 IParallelMng* pm =
m_mesh->parallelMng();
1225 const Int32 nb_rank = pm->commSize();
1226 Int64 nb_current_step = _readInt64Attribute(m_steps_group,
"NSteps");
1228 info(4) <<
"NB_STEP=" << nb_current_step <<
" time_index=" << time_index
1229 <<
" current_time=" <<
m_times.back();
1230 const bool debug_times =
false;
1232 StandardArrayT<Real> a1(m_steps_group.id(),
"Values");
1233 UniqueArray<Real> times;
1234 a1.directRead(m_standard_types, times);
1235 info() <<
"TIMES=" << times;
1237 if ((nb_current_step + 1) != time_index) {
1238 info() <<
"[VtkHdf] go_backward detected";
1239 Int32 wanted_step = time_index - 1;
1242 _readAndSetOffset(m_cell_offset_info, wanted_step);
1243 _readAndSetOffset(m_point_offset_info, wanted_step);
1244 _readAndSetOffset(m_connectivity_offset_info, wanted_step);
1245 m_part_offset_info.setOffset(wanted_step * nb_rank);
1246 m_time_offset_info.setOffset(wanted_step);
1247 m_offset_for_cell_offset_info.setOffset(m_cell_offset_info.offset() + wanted_step * nb_rank);
1260class VtkHdfV2PostProcessor
1273 bool use_collective_io =
true;
1274 Int64 max_write_size = 0;
1276 use_collective_io =
options()->useCollectiveWrite();
1277 max_write_size =
options()->maxWriteSize();
1279 auto w = std::make_unique<VtkHdfV2DataWriter>(
mesh(),
groups(), use_collective_io);
1280 w->setMaxWriteSize(max_write_size);
1281 w->setTimes(
times());
1283 w->setDirectoryName(dir.
file(
"vtkhdfv2"));
1284 m_writer = std::move(w);
1294 std::unique_ptr<IDataWriter> m_writer;
1300ARCANE_REGISTER_SERVICE_VTKHDFV2POSTPROCESSOR(VtkHdfV2PostProcessor,
1301 VtkHdfV2PostProcessor);
#define ARCANE_CHECK_POINTER(ptr)
Macro retournant le pointeur ptr s'il est non nul ou lancant une exception s'il est nul.
#define ARCANE_THROW(exception_class,...)
Macro pour envoyer une exception avec formattage.
#define ARCANE_FATAL(...)
Macro envoyant une exception FatalErrorException.
CaseOptionsVtkHdfV2PostProcessor * options() const
Options du jeu de données du service.
ArcaneVtkHdfV2PostProcessorObject(const Arcane::ServiceBuildInfo &sbi)
Constructeur.
Vue modifiable d'un tableau d'un type T.
Vue constante d'un tableau de type T.
Vue constante sur une zone mémoire contigue contenant des éléments de taille fixe.
constexpr Int32 datatypeSize() const
Taille du type de donnée associé (1 par défaut)
constexpr const std::byte * data() const
Pointeur sur la zone mémoire.
Classe gérant un répertoire.
String file(const String &file_name) const override
Retourne le chemin complet du fichier file_name dans le répertoire.
Tableau 1D de taille fixe.
Encapsule un hid_t pour un dataset.
Encapsule un hid_t pour un fichier.
Encapsule un hid_t pour un groupe.
static bool hasParallelHdf5()
Vrai HDF5 est compilé avec le support de MPI.
Encapsule un hid_t pour une propriété (H5P*).
void createDatasetTransfertCollectiveMPIIO()
Créé une propriété de dataset pour MPIIO.
Encapsule un hid_t pour un dataspace.
Définition des types standards Arcane pour hdf5.
Interface d'une donnée tableau d'un type T.
Interface d'écriture des données d'une variable.
Exception lorsqu'une erreur d'entrée/sortie est détectée.
Interface du gestionnaire de parallélisme pour un sous-domaine.
virtual Int32 commRank() const =0
Rang de cette instance dans le communicateur.
virtual Int32 commSize() const =0
Nombre d'instance dans le communicateur.
virtual void allGather(ConstArrayView< char > send_buf, ArrayView< char > recv_buf)=0
Effectue un regroupement sur tous les processeurs. Il s'agit d'une opération collective....
Interface d'une variable.
virtual eDataType dataType() const =0
Type de la donnée gérée par la variable (Real, Integer, ...)
virtual eItemKind itemKind() const =0
Genre des entités du maillage sur lequel repose la variable.
virtual bool isPartial() const =0
Indique si la variable est partielle.
virtual Integer dimension() const =0
Dimension de la variable.
virtual String name() const =0
Nom de la variable.
Groupe d'entités de maillage.
Interface du gestionnaire des matériaux et des milieux d'un maillage.
static IMeshMaterialMng * getReference(const MeshHandleOrMesh &mesh_handle, bool create=true)
Récupère ou créé la référence associée à mesh.
RealConstArrayView times() override
Liste des temps sauvés.
const String & baseDirectoryName() override
Nom du répertoire de sortie des fichiers.
ItemGroupCollection groups() override
Liste des groupes à sauver.
Structure contenant les informations pour créer un service.
Vue pour un tableau 2D dont la taille est un 'Int64'.
Vue d'un tableau d'éléments de type T.
Constructeur de chaîne de caractère unicode.
Chaîne de caractères unicode.
const char * localstr() const
Retourne la conversion de l'instance dans l'encodage UTF-8.
TraceAccessor(ITraceMng *m)
Construit un accesseur via le gestionnaire de trace m.
TraceMessage info() const
Flot pour un message d'information.
TraceMessage warning() const
Flot pour un message d'avertissement.
Vecteur 1D de données avec sémantique par valeur (style STL).
void write(IVariable *var, IData *data) override
Ecrit les données data de la variable var.
IMeshMaterialMng * m_material_mng
Gestionnaire de matériaux associé (peut-être nul)
Int64 m_max_write_size
Taille maximale (en kilo-octet) pour une écriture.
String m_directory_name
Répertoire de sortie.
WritePartInfo _computeWritePartInfo(Int64 local_size)
Calcule l'offset de notre partie et le nombre total d'éléments.
IMesh * m_mesh
Maillage associé
HFile m_file_id
Identifiant HDF du fichier.
ItemGroupCollection m_groups
Liste des groupes à sauver.
UniqueArray< Real > m_times
Liste des temps.
void setMetaData(const String &meta_data) override
Positionne les infos des méta-données.
String m_full_filename
Nom du fichier HDF courant.
IDataWriter * dataWriter() override
Retourne l'écrivain associé à ce post-processeur.
void close() override
Ferme l'écrivain. Après fermeture, il ne peut plus être utilisé
void notifyBeginWrite() override
Notifie qu'une sortie va être effectuée avec les paramètres courants.
void notifyEndWrite() override
Notifie qu'une sortie vient d'être effectuée.
__host__ __device__ Real2 min(Real2 a, Real2 b)
Retourne le minimum de deux Real2.
ItemGroupT< Cell > CellGroup
Groupe de mailles.
ItemGroupT< Node > NodeGroup
Groupe de noeuds.
MeshVariableScalarRefT< Node, Real3 > VariableNodeReal3
Grandeur au noeud de type coordonnées.
Integer len(const char *s)
Retourne la longueur de la chaîne s.
Fonctions utilitaires pour Hdf5.
Active toujours les traces dans les parties Arcane concernant les matériaux.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Ref< TrueType > createRef(Args &&... args)
Créé une instance de type TrueType avec les arguments Args et retourne une référence dessus.
Collection< ItemGroup > ItemGroupCollection
Collection de groupes d'éléments du maillage.
ConstMemoryView makeConstMemoryView(const void *ptr, Int32 datatype_size, Int64 nb_element)
Créé une vue mémoire en lecture seule.
std::int64_t Int64
Type entier signé sur 64 bits.
Int32 Integer
Type représentant un entier.
bool operator<(const Item &item1, const Item &item2)
Compare deux entités.
eItemKind
Genre d'entité de maillage.
@ IK_Node
Entité de maillage de genre noeud.
@ IK_Cell
Entité de maillage de genre maille.
double Real
Type représentant un réel.
unsigned char Byte
Type d'un octet.
eDataType
Type d'une donnée.
@ DT_Int32
Donnée de type entier 32 bits.
@ DT_Real3
Donnée de type vecteur 3.
@ DT_Int64
Donnée de type entier 64 bits.
@ DT_Real2
Donnée de type vecteur 2.
@ DT_Real
Donnée de type réel.
@ Cell
Le maillage est AMR par maille.
std::int32_t Int32
Type entier signé sur 32 bits.
ConstArrayView< Real > RealConstArrayView
Equivalent C d'un tableau à une dimension de réels.
Conserve les infos sur les données à sauver et l'offset associé.
Classe pour conserver un couple (hdf_group,nom_du_dataset).
Classe pour conserver les information d'un offset.
Int64 offset() const
Valeur de l'offset. (-1) si on écrit à la fin du tableau.
Informations collectives sur un ItemGroup;.
WritePartInfo m_write_part_info
Informations sur l'écriture.
ItemGroup m_item_group
Groupe associé
Informations sur l'offset de la partie à écrire associée à un rang.
Int64 m_offset
Offset de mon rang.
Int64 m_size
Nombre d'éléments de mon rang.
Int64 m_total_size
Nombre d'éléments sur tous les rangs.