14#include "arcane/utils/Collection.h"
15#include "arcane/utils/Enumerator.h"
16#include "arcane/utils/Iostream.h"
17#include "arcane/utils/StringBuilder.h"
18#include "arcane/utils/IOException.h"
19#include "arcane/utils/FixedArray.h"
20#include "arcane/utils/MemoryView.h"
22#include "arcane/core/PostProcessorWriterBase.h"
23#include "arcane/core/Directory.h"
24#include "arcane/core/FactoryService.h"
25#include "arcane/core/IDataWriter.h"
26#include "arcane/core/IData.h"
27#include "arcane/core/IItemFamily.h"
28#include "arcane/core/VariableCollection.h"
29#include "arcane/core/IParallelMng.h"
30#include "arcane/core/IMesh.h"
31#include "arcane/core/internal/VtkCellTypes.h"
33#include "arcane/core/materials/IMeshMaterialMng.h"
34#include "arcane/core/materials/IMeshEnvironment.h"
36#include "arcane/hdf5/Hdf5Utils.h"
37#include "arcane/hdf5/VtkHdfV2PostProcessor_axl.h"
77 asConstSpan(
const T* v)
86class VtkHdfV2DataWriter
98 struct DatasetGroupAndName
102 DatasetGroupAndName(
HGroup& group_,
const String& name_)
126 DatasetInfo() =
default;
127 explicit DatasetInfo(
const String& name)
134 bool isNull()
const {
return m_name.null(); }
136 HGroup* group()
const {
return m_group; }
137 const String& name()
const {
return m_name; }
140 void setOffset(
Int64 v) { m_offset = v; }
141 friend bool operator<(
const DatasetInfo& s1,
const DatasetInfo& s2)
143 return (s1.m_name < s2.m_name);
148 HGroup* m_group =
nullptr;
177 struct ItemGroupCollectiveInfo
181 explicit ItemGroupCollectiveInfo(
const ItemGroup& g)
207 , m_dataset_info(dataset_info)
213 , m_dataset_info(dataset_info)
214 , m_group_info(group_info)
220 DatasetInfo datasetInfo()
const {
return m_dataset_info; }
236 void endWrite()
override;
274 HGroup m_point_data_offsets_group;
275 HGroup m_cell_data_offsets_group;
276 HGroup m_field_data_offsets_group;
278 bool m_is_parallel =
false;
279 bool m_is_master_io =
false;
280 bool m_is_collective_io =
false;
281 bool m_is_first_call =
false;
282 bool m_is_writer =
false;
290 std::map<DatasetInfo, Int64> m_offset_info_list;
294 ItemGroupCollectiveInfo m_all_cells_info;
295 ItemGroupCollectiveInfo m_all_nodes_info;
309 void _addStringAttribute(
Hid& hid,
const char* name,
const String& value);
311 template <
typename DataType>
void
313 template <
typename DataType>
void
315 template <
typename DataType>
void
317 template <
typename DataType>
void
319 template <
typename DataType>
void
321 template <
typename DataType>
void
323 template <
typename DataType>
void
324 _writeBasicTypeDataset(
const DataInfo& data_info,
IData* data);
325 void _writeReal3Dataset(
const DataInfo& data_info,
IData* data);
326 void _writeReal2Dataset(
const DataInfo& data_info,
IData* data);
332 return sb.toString();
334 template <
typename DataType>
void
335 _writeDataSetGeneric(
const DataInfo& data_info,
Int32 nb_dim,
336 Int64 dim1_size,
Int64 dim2_size,
const DataType* values_data,
338 void _writeDataSetGeneric(
const DataInfo& data_info,
Int32 nb_dim,
339 Int64 dim1_size,
Int64 dim2_size, ConstMemoryView values_data,
340 const hid_t hdf_datatype_type,
bool is_collective);
341 void _addInt64Attribute(
Hid& hid,
const char* name,
Int64 value);
342 Int64 _readInt64Attribute(
Hid& hid,
const char* name);
343 void _openOrCreateGroups();
345 void _readAndSetOffset(DatasetInfo& offset_info,
Int32 wanted_step);
346 void _initializeOffsets();
347 void _initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo& group_info);
349 void _writeConstituentsGroups();
357: TraceAccessor(mesh->traceMng())
360, m_is_collective_io(is_collective_io)
361, m_all_cells_info(mesh->allCells())
362, m_all_nodes_info(mesh->allNodes())
369void VtkHdfV2DataWriter::
377 IParallelMng* pm =
m_mesh->parallelMng();
378 const Int32 nb_rank = pm->commSize();
379 m_is_parallel = nb_rank > 1;
380 m_is_master_io = pm->isMasterIO();
383 const bool is_first_call = (time_index < 2);
384 m_is_first_call = is_first_call;
386 info() <<
"WARNING: L'implémentation au format 'VtkHdfV2' est expérimentale";
388 String filename = _getFileName();
401 if (pm->isHybridImplementation() || pm->isThreadImplementation())
402 m_is_collective_io =
false;
405 info() <<
"VtkHdfV2DataWriter: using collective MPI/IO ?=" << m_is_collective_io;
414 m_is_writer = m_is_master_io || m_is_collective_io;
418 if (m_is_collective_io)
419 plist_id.createFilePropertyMPIIO(pm);
421 if (is_first_call && m_is_master_io)
422 dir.createDirectory();
424 if (m_is_collective_io)
428 m_standard_types.initialize();
435 _openOrCreateGroups();
438 std::array<Int64, 2> version = { 2, 0 };
439 _addInt64ArrayAttribute(m_top_group,
"Version", version);
440 _addStringAttribute(m_top_group,
"Type",
"UnstructuredGrid");
445 _initializeItemGroupCollectiveInfos(m_all_cells_info);
446 _initializeItemGroupCollectiveInfos(m_all_nodes_info);
451 const Int32 nb_cell = all_cells.size();
452 const Int32 nb_node = all_nodes.size();
454 Int32 total_nb_connected_node = 0;
457 total_nb_connected_node += cell.nodeIds().size();
462 UniqueArray<Int64> cells_connectivity(total_nb_connected_node);
463 UniqueArray<Int64> cells_offset(nb_cell + 1);
464 UniqueArray<unsigned char> cells_ghost_type(nb_cell);
465 UniqueArray<unsigned char> cells_type(nb_cell);
466 UniqueArray<Int64> cells_uid(nb_cell);
469 Int32 connected_node_index = 0;
471 Int32 index = icell.index();
474 cells_uid[index] = cell.uniqueId();
477 bool is_ghost = !cell.isOwn();
479 ghost_type = VtkUtils::CellGhostTypes::DUPLICATECELL;
480 cells_ghost_type[index] = ghost_type;
482 unsigned char vtk_type = VtkUtils::arcaneToVtkCellType(cell.type());
483 cells_type[index] = vtk_type;
484 for (NodeLocalId node : cell.nodeIds()) {
485 cells_connectivity[connected_node_index] = node;
486 ++connected_node_index;
488 cells_offset[index + 1] = connected_node_index;
492 _initializeOffsets();
495 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Offsets" }, m_offset_for_cell_offset_info }, cells_offset);
497 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Connectivity" }, m_connectivity_offset_info },
499 _writeDataSet1DCollective<unsigned char>({ { m_top_group,
"Types" }, m_cell_offset_info }, cells_type);
502 Int64 nb_cell_int64 = nb_cell;
503 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfCells" }, m_part_offset_info },
504 asConstSpan(&nb_cell_int64));
505 Int64 nb_node_int64 = nb_node;
506 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfPoints" }, m_part_offset_info },
507 asConstSpan(&nb_node_int64));
508 Int64 number_of_connectivity_ids = cells_connectivity.size();
509 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfConnectivityIds" }, m_part_offset_info },
510 asConstSpan(&number_of_connectivity_ids));
515 UniqueArray<Int64> nodes_uid(nb_node);
516 UniqueArray<unsigned char> nodes_ghost_type(nb_node);
518 UniqueArray2<Real> points;
519 points.resize(nb_node, 3);
521 Int32 index = inode.index();
524 nodes_uid[index] = node.uniqueId();
527 bool is_ghost = !node.isOwn();
529 ghost_type = VtkUtils::PointGhostTypes::DUPLICATEPOINT;
530 nodes_ghost_type[index] = ghost_type;
532 Real3 pos = nodes_coordinates[inode];
533 points[index][0] = pos.x;
534 points[index][1] = pos.y;
535 points[index][2] = pos.z;
539 _writeDataSet1DCollective<Int64>({ { m_node_data_group,
"GlobalNodeId" }, m_cell_offset_info }, nodes_uid);
542 _writeDataSet1DCollective<unsigned char>({ { m_node_data_group,
"vtkGhostType" }, m_cell_offset_info }, nodes_ghost_type);
545 _writeDataSet2DCollective<Real>({ { m_top_group,
"Points" }, m_point_offset_info }, points);
549 _writeDataSet1DCollective<unsigned char>({ { m_cell_data_group,
"vtkGhostType" }, m_cell_offset_info }, cells_ghost_type);
553 _writeDataSet1DCollective<Int64>({ { m_cell_data_group,
"GlobalCellId" }, m_cell_offset_info }, cells_uid);
558 _writeDataSet1D<Real>({ { m_steps_group,
"Values" }, m_time_offset_info }, asConstSpan(¤t_time));
561 Int64 part_offset = (time_index - 1) * pm->commSize();
562 _writeDataSet1D<Int64>({ { m_steps_group,
"PartOffsets" }, m_time_offset_info }, asConstSpan(&part_offset));
565 _addInt64Attribute(m_steps_group,
"NSteps", time_index);
568 _writeConstituentsGroups();
574void VtkHdfV2DataWriter::
575_writeConstituentsGroups()
585 m_materials_groups.add(group_info_ref);
587 _initializeItemGroupCollectiveInfos(group_info);
588 ConstArrayView<Int32> groups_ids = cells.view().localIds();
589 DatasetGroupAndName dataset_group_name(m_top_group, String(
"Constituent_") + cells.name());
591 info() <<
"Writing infos for group '" << cells.name() <<
"'";
592 _writeDataSet1DCollective<Int32>({ dataset_group_name, m_cell_offset_info }, groups_ids);
611 Int64 dim1_size = local_size;
614 Int64 total_size = 0;
615 for (
Integer i = 0; i < nb_rank; ++i)
616 total_size += all_sizes[i];
619 for (
Integer i = 0; i < my_rank; ++i)
620 my_index += all_sizes[i];
623 part_info.setTotalSize(total_size);
624 part_info.setSize(local_size);
625 part_info.setOffset(my_index);
632void VtkHdfV2DataWriter::
633_initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo& group_info)
635 Int64 dim1_size = group_info.m_item_group.size();
641 std::pair<Int64, Int64> _getInterval(Int64 index, Int64 nb_interval, Int64 total_size)
643 Int64 n = total_size;
644 Int64 isize = n / nb_interval;
645 Int64 ibegin = index * isize;
647 if ((index + 1) == nb_interval)
649 return { ibegin, isize };
661void VtkHdfV2DataWriter::
664 ConstMemoryView values_data,
665 const hid_t hdf_type,
bool is_collective)
670 HGroup& group = data_info.dataset.group;
671 const String& name = data_info.dataset.name;
675 Int64 wanted_offset = data_info.datasetInfo().
offset();
677 static constexpr int MAX_DIM = 2;
687 local_dims[0] = dim1_size;
688 local_dims[1] = dim2_size;
696 max_dims[0] = H5S_UNLIMITED;
697 max_dims[1] = dim2_size;
700 Int64 write_offset = 0;
703 Int64 global_dim1_size = dim1_size;
704 Int32 nb_participating_rank = 1;
707 nb_participating_rank =
m_mesh->parallelMng()->commSize();
709 if (data_info.m_group_info) {
712 part_info = data_info.m_group_info->writePartInfo();
717 global_dim1_size = part_info.totalSize();
718 my_index = part_info.offset();
728 if (m_is_first_call) {
731 global_dims[0] = global_dim1_size;
732 global_dims[1] = dim2_size;
734 Int64 chunk_size = global_dim1_size / nb_participating_rank;
735 if (chunk_size < 1024)
737 const Int64 max_chunk_size = 1024 * 1024 * 10;
738 chunk_size =
math::min(chunk_size, max_chunk_size);
739 chunk_dims[0] = chunk_size;
740 chunk_dims[1] = dim2_size;
741 info() <<
"CHUNK nb_dim=" << nb_dim
742 <<
" global_dim1_size=" << global_dim1_size
743 <<
" chunk0=" << chunk_dims[0]
744 <<
" chunk1=" << chunk_dims[1]
746 file_space.createSimple(nb_dim, global_dims.data(), max_dims.data());
748 plist_id.create(H5P_DATASET_CREATE);
749 H5Pset_chunk(plist_id.id(), nb_dim, chunk_dims.data());
750 dataset.create(group, name.localstr(), hdf_type, file_space,
HProperty{}, plist_id,
HProperty{});
753 hyperslab_offsets[0] = my_index;
754 hyperslab_offsets[1] = 0;
760 dataset.open(group, name.
localstr());
761 file_space = dataset.getSpace();
762 int nb_dimension = file_space.nbDimension();
763 if (nb_dimension != nb_dim)
768 file_space.getDimensions(original_dims.data(),
nullptr);
769 hsize_t offset0 = original_dims[0];
772 if (wanted_offset >= 0) {
773 offset0 = wanted_offset;
774 info() <<
"Forcing offset to " << wanted_offset;
776 global_dims[0] = offset0 + global_dim1_size;
777 global_dims[1] = dim2_size;
778 write_offset = offset0;
781 if ((herror = dataset.setExtent(global_dims.data())) < 0)
783 file_space = dataset.getSpace();
785 hyperslab_offsets[0] = offset0 + my_index;
786 hyperslab_offsets[1] = 0;
787 info(4) <<
"APPEND nb_dim=" << nb_dim
788 <<
" dim0=" << global_dims[0]
789 <<
" count0=" << local_dims[0]
790 <<
" offsets0=" << hyperslab_offsets[0] <<
" name=" << name;
793 Int64 nb_write_byte = global_dim1_size * dim2_size * values_data.datatypeSize();
797 Int64 nb_interval = 1;
801 info(4) <<
"WRITE global_size=" << nb_write_byte <<
" max_size=" <<
m_max_write_size <<
" nb_interval=" << nb_interval;
803 for (
Int64 i = 0; i < nb_interval; ++i) {
804 auto [index, nb_element] = _getInterval(i, nb_interval, dim1_size);
807 dims[0] = nb_element;
810 offsets[0] = hyperslab_offsets[0] + index;
812 if ((herror = H5Sselect_hyperslab(file_space.id(), H5S_SELECT_SET, offsets.data(),
nullptr, dims.data(),
nullptr)) < 0)
816 memory_space.createSimple(nb_dim, dims.data());
817 Int64 data_offset = index * values_data.datatypeSize() * dim2_size;
819 if ((herror = dataset.write(hdf_type, values_data.data() + data_offset, memory_space, file_space, write_plist_id)) < 0)
826 if (!data_info.datasetInfo().isNull())
827 m_offset_info_list.insert(std::make_pair(data_info.datasetInfo(), write_offset));
833template <
typename DataType>
void VtkHdfV2DataWriter::
834_writeDataSetGeneric(
const DataInfo& data_info,
Int32 nb_dim,
835 Int64 dim1_size,
Int64 dim2_size,
const DataType* values_data,
838 const hid_t hdf_type = m_standard_types.nativeType(DataType{});
839 ConstMemoryView mem_view = makeConstMemoryView(values_data,
sizeof(DataType), dim1_size * dim2_size);
840 _writeDataSetGeneric(data_info, nb_dim, dim1_size, dim2_size, mem_view, hdf_type, is_collective);
846template <
typename DataType>
void VtkHdfV2DataWriter::
849 _writeDataSetGeneric(data_info, 1, values.size(), 1, values.data(),
false);
855template <
typename DataType>
void VtkHdfV2DataWriter::
858 _writeDataSetGeneric(data_info, 1, values.size(), 1, values.data(),
true);
864template <
typename DataType>
void VtkHdfV2DataWriter::
868 return _writeDataSet1D(data_info, values);
869 if (m_is_collective_io)
870 return _writeDataSet1DUsingCollectiveIO(data_info, values);
871 UniqueArray<DataType> all_values;
872 IParallelMng* pm =
m_mesh->parallelMng();
873 pm->gatherVariable(values.smallView(), all_values, pm->masterIORank());
875 _writeDataSet1D<DataType>(data_info, all_values);
881template <
typename DataType>
void VtkHdfV2DataWriter::
884 _writeDataSetGeneric(data_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
false);
890template <
typename DataType>
void VtkHdfV2DataWriter::
893 _writeDataSetGeneric(data_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
true);
899template <
typename DataType>
void VtkHdfV2DataWriter::
903 return _writeDataSet2D(data_info, values);
904 if (m_is_collective_io)
905 return _writeDataSet2DUsingCollectiveIO(data_info, values);
907 Int64 dim2_size = values.dim2Size();
908 UniqueArray<DataType> all_values;
909 IParallelMng* pm =
m_mesh->parallelMng();
910 Span<const DataType> values_1d(values.data(), values.totalNbElement());
911 pm->gatherVariable(values_1d.smallView(), all_values, pm->masterIORank());
912 if (m_is_master_io) {
913 Int64 dim1_size = all_values.size();
915 dim1_size = dim1_size / dim2_size;
916 Span2<const DataType> span2(all_values.data(), dim1_size, dim2_size);
917 return _writeDataSet2D<DataType>(data_info, span2);
924void VtkHdfV2DataWriter::
927 hsize_t
len = values.size();
928 hid_t aid = H5Screate_simple(1, &len,
nullptr);
929 hid_t attr = H5Acreate2(hid.id(), name, H5T_NATIVE_INT64, aid, H5P_DEFAULT, H5P_DEFAULT);
932 int ret = H5Awrite(attr, H5T_NATIVE_INT64, values.data());
942void VtkHdfV2DataWriter::
943_addInt64Attribute(
Hid& hid,
const char* name,
Int64 value)
945 HSpace aid(H5Screate(H5S_SCALAR));
948 attr.create(hid, name, H5T_NATIVE_INT64, aid);
950 attr.open(hid, name);
953 herr_t ret = attr.write(H5T_NATIVE_INT64, &value);
961Int64 VtkHdfV2DataWriter::
962_readInt64Attribute(
Hid& hid,
const char* name)
965 attr.open(hid, name);
969 herr_t ret = attr.read(H5T_NATIVE_INT64, &value);
978void VtkHdfV2DataWriter::
979_addStringAttribute(
Hid& hid,
const char* name,
const String& value)
981 hid_t aid = H5Screate(H5S_SCALAR);
982 hid_t attr_type = H5Tcopy(H5T_C_S1);
983 H5Tset_size(attr_type, value.length());
984 hid_t attr = H5Acreate2(hid.id(), name, attr_type, aid, H5P_DEFAULT, H5P_DEFAULT);
987 int ret = H5Awrite(attr, attr_type, value.localstr());
988 ret = H5Tclose(attr_type);
998void VtkHdfV2DataWriter::
1004 for (
const auto& i : m_offset_info_list) {
1005 Int64 offset = i.second;
1007 HGroup* hdf_group = offset_info.group();
1010 _writeDataSet1D<Int64>({ { *hdf_group, offset_info.name() }, m_time_offset_info }, asConstSpan(&offset));
1020void VtkHdfV2DataWriter::
1021_openOrCreateGroups()
1024 m_top_group.openOrCreate(
m_file_id,
"VTKHDF");
1025 m_cell_data_group.openOrCreate(m_top_group,
"CellData");
1026 m_node_data_group.openOrCreate(m_top_group,
"PointData");
1027 m_steps_group.openOrCreate(m_top_group,
"Steps");
1028 m_point_data_offsets_group.openOrCreate(m_steps_group,
"PointDataOffsets");
1029 m_cell_data_offsets_group.openOrCreate(m_steps_group,
"CellDataOffsets");
1030 m_field_data_offsets_group.openOrCreate(m_steps_group,
"FieldDataOffsets");
1036void VtkHdfV2DataWriter::
1039 m_cell_data_group.close();
1040 m_node_data_group.close();
1041 m_point_data_offsets_group.close();
1042 m_cell_data_offsets_group.close();
1043 m_field_data_offsets_group.close();
1044 m_steps_group.close();
1045 m_top_group.close();
1054 ARCANE_UNUSED(meta_data);
1063 info(4) <<
"Write VtkHdfV2 var=" << var->
name();
1068 ARCANE_FATAL(
"Only export of scalar item variable is implemented (name={0})", var->
name());
1070 ARCANE_FATAL(
"Export of partial variable is not implemented");
1075 switch (item_kind) {
1077 group = &m_cell_data_group;
1078 offset_info = m_cell_offset_info;
1079 group_info = &m_all_cells_info;
1082 group = &m_node_data_group;
1083 offset_info = m_point_offset_info;
1084 group_info = &m_all_nodes_info;
1087 ARCANE_FATAL(
"Only export of 'Cell' or 'Node' variable is implemented (name={0})", var->
name());
1094 switch (data_type) {
1096 _writeBasicTypeDataset<Real>(data_info, data);
1099 _writeBasicTypeDataset<Int64>(data_info, data);
1102 _writeBasicTypeDataset<Int32>(data_info, data);
1105 _writeReal3Dataset(data_info, data);
1108 _writeReal2Dataset(data_info, data);
1111 warning() << String::format(
"Export for datatype '{0}' is not supported (var_name={1})", data_type, var->
name());
1118template <
typename DataType>
void VtkHdfV2DataWriter::
1119_writeBasicTypeDataset(
const DataInfo& data_info,
IData* data)
1129void VtkHdfV2DataWriter::
1130_writeReal3Dataset(
const DataInfo& data_info, IData* data)
1132 auto* true_data =
dynamic_cast<IArrayDataT<Real3>*
>(data);
1134 SmallSpan<const Real3> values(true_data->view());
1135 Int32 nb_value = values.size();
1137 UniqueArray2<Real> scalar_values;
1138 scalar_values.resize(nb_value, 3);
1139 for (Int32 i = 0; i < nb_value; ++i) {
1140 Real3 v = values[i];
1141 scalar_values[i][0] = v.x;
1142 scalar_values[i][1] = v.y;
1143 scalar_values[i][2] = v.z;
1145 _writeDataSet2DCollective<Real>(data_info, scalar_values);
1151void VtkHdfV2DataWriter::
1152_writeReal2Dataset(
const DataInfo& data_info,
IData* data)
1155 auto* true_data =
dynamic_cast<IArrayDataT<Real2>*
>(data);
1157 SmallSpan<const Real2> values(true_data->view());
1158 Int32 nb_value = values.size();
1159 UniqueArray2<Real> scalar_values;
1160 scalar_values.resize(nb_value, 3);
1161 for (
Int32 i = 0; i < nb_value; ++i) {
1162 Real2 v = values[i];
1163 scalar_values[i][0] = v.x;
1164 scalar_values[i][1] = v.y;
1165 scalar_values[i][2] = 0.0;
1167 _writeDataSet2DCollective<Real>(data_info, scalar_values);
1173void VtkHdfV2DataWriter::
1174_readAndSetOffset(DatasetInfo& offset_info,
Int32 wanted_step)
1176 HGroup* hgroup = offset_info.group();
1178 StandardArrayT<Int64> a(hgroup->id(), offset_info.name());
1179 UniqueArray<Int64> values;
1180 a.directRead(m_standard_types, values);
1181 Int64 offset_value = values[wanted_step];
1182 offset_info.setOffset(offset_value);
1183 info() <<
"VALUES name=" << offset_info.name() <<
" values=" << values
1184 <<
" wanted_step=" << wanted_step <<
" v=" << offset_value;
1190void VtkHdfV2DataWriter::
1211 m_cell_offset_info =
DatasetInfo(m_steps_group,
"CellOffsets");
1212 m_point_offset_info =
DatasetInfo(m_steps_group,
"PointOffsets");
1213 m_connectivity_offset_info =
DatasetInfo(m_steps_group,
"ConnectivityIdOffsets");
1215 m_offset_for_cell_offset_info =
DatasetInfo(
"_OffsetForCellOffsetInfo");
1216 m_part_offset_info =
DatasetInfo(
"_PartOffsetInfo");
1217 m_time_offset_info =
DatasetInfo(
"_TimeOffsetInfo");
1222 if (m_is_writer && !m_is_first_call) {
1223 IParallelMng* pm =
m_mesh->parallelMng();
1224 const Int32 nb_rank = pm->commSize();
1225 Int64 nb_current_step = _readInt64Attribute(m_steps_group,
"NSteps");
1227 info(4) <<
"NB_STEP=" << nb_current_step <<
" time_index=" << time_index
1228 <<
" current_time=" <<
m_times.back();
1229 const bool debug_times =
false;
1231 StandardArrayT<Real> a1(m_steps_group.id(),
"Values");
1232 UniqueArray<Real> times;
1233 a1.directRead(m_standard_types, times);
1234 info() <<
"TIMES=" << times;
1236 if ((nb_current_step + 1) != time_index) {
1237 info() <<
"[VtkHdf] go_backward detected";
1238 Int32 wanted_step = time_index - 1;
1241 _readAndSetOffset(m_cell_offset_info, wanted_step);
1242 _readAndSetOffset(m_point_offset_info, wanted_step);
1243 _readAndSetOffset(m_connectivity_offset_info, wanted_step);
1244 m_part_offset_info.setOffset(wanted_step * nb_rank);
1245 m_time_offset_info.setOffset(wanted_step);
1246 m_offset_for_cell_offset_info.setOffset(m_cell_offset_info.offset() + wanted_step * nb_rank);
1259class VtkHdfV2PostProcessor
1272 bool use_collective_io =
true;
1273 Int64 max_write_size = 0;
1275 use_collective_io =
options()->useCollectiveWrite();
1276 max_write_size =
options()->maxWriteSize();
1278 auto w = std::make_unique<VtkHdfV2DataWriter>(
mesh(),
groups(), use_collective_io);
1279 w->setMaxWriteSize(max_write_size);
1280 w->setTimes(
times());
1282 w->setDirectoryName(dir.
file(
"vtkhdfv2"));
1283 m_writer = std::move(w);
1293 std::unique_ptr<IDataWriter> m_writer;
1299ARCANE_REGISTER_SERVICE_VTKHDFV2POSTPROCESSOR(VtkHdfV2PostProcessor,
1300 VtkHdfV2PostProcessor);
#define ARCANE_CHECK_POINTER(ptr)
Macro retournant le pointeur ptr s'il est non nul ou lancant une exception s'il est nul.
#define ARCANE_THROW(exception_class,...)
Macro pour envoyer une exception avec formattage.
#define ARCANE_FATAL(...)
Macro envoyant une exception FatalErrorException.
CaseOptionsVtkHdfV2PostProcessor * options() const
Options du jeu de données du service.
ArcaneVtkHdfV2PostProcessorObject(const Arcane::ServiceBuildInfo &sbi)
Constructeur.
Vue modifiable d'un tableau d'un type T.
Vue constante d'un tableau de type T.
Classe gérant un répertoire.
virtual String file(const String &file_name) const
Retourne le chemin complet du fichier file_name dans le répertoire.
Tableau 1D de taille fixe.
Encapsule un hid_t pour un dataset.
Encapsule un hid_t pour un fichier.
Encapsule un hid_t pour un groupe.
static bool hasParallelHdf5()
Vrai HDF5 est compilé avec le support de MPI.
Encapsule un hid_t pour une propriété (H5P*).
void createDatasetTransfertCollectiveMPIIO()
Créé une propriété de dataset pour MPIIO.
Encapsule un hid_t pour un dataspace.
Définition des types standards Arcane pour hdf5.
Interface d'une donnée tableau d'un type T.
Interface d'écriture des données d'une variable.
Exception lorsqu'une erreur d'entrée/sortie est détectée.
Interface du gestionnaire de parallélisme pour un sous-domaine.
virtual Int32 commRank() const =0
Rang de cette instance dans le communicateur.
virtual Int32 commSize() const =0
Nombre d'instance dans le communicateur.
virtual void allGather(ConstArrayView< char > send_buf, ArrayView< char > recv_buf)=0
Effectue un regroupement sur tous les processeurs. Il s'agit d'une opération collective....
Interface d'une variable.
virtual eDataType dataType() const =0
Type de la donnée gérée par la variable (Real, Integer, ...)
virtual eItemKind itemKind() const =0
Type des entités du maillage sur lequel repose la variable.
virtual bool isPartial() const =0
Indique si la variable est partielle.
virtual Integer dimension() const =0
Dimension de la variable.
virtual String name() const =0
Nom de la variable.
Groupe d'entités de maillage.
Interface du gestionnaire des matériaux et des milieux d'un maillage.
static IMeshMaterialMng * getReference(const MeshHandleOrMesh &mesh_handle, bool create=true)
Récupère ou créé la référence associée à mesh.
virtual RealConstArrayView times()
Liste des temps sauvés.
virtual const String & baseDirectoryName()
Nom du répertoire de sortie des fichiers.
virtual ItemGroupCollection groups()
Liste des groupes à sauver.
Structure contenant les informations pour créer un service.
Vue pour un tableau 2D dont la taille est un 'Int64'.
Vue d'un tableau d'éléments de type T.
Constructeur de chaîne de caractère unicode.
Chaîne de caractères unicode.
const char * localstr() const
Retourne la conversion de l'instance dans l'encodage UTF-8.
TraceAccessor(ITraceMng *m)
Construit un accesseur via le gestionnaire de trace m.
TraceMessage info() const
Flot pour un message d'information.
TraceMessage warning() const
Flot pour un message d'avertissement.
Vecteur 1D de données avec sémantique par valeur (style STL).
void write(IVariable *var, IData *data) override
Ecrit les données data de la variable var.
IMeshMaterialMng * m_material_mng
Gestionnaire de matériaux associé (peut-être nul)
Int64 m_max_write_size
Taille maximale (en kilo-octet) pour une écriture.
String m_directory_name
Répertoire de sortie.
WritePartInfo _computeWritePartInfo(Int64 local_size)
Calcule l'offset de notre partie et le nombre total d'éléments.
IMesh * m_mesh
Maillage associé
HFile m_file_id
Identifiant HDF du fichier.
ItemGroupCollection m_groups
Liste des groupes à sauver.
UniqueArray< Real > m_times
Liste des temps.
void setMetaData(const String &meta_data) override
Positionne les infos des méta-données.
String m_full_filename
Nom du fichier HDF courant.
IDataWriter * dataWriter() override
Retourne l'écrivain associé à ce post-processeur.
void close() override
Ferme l'écrivain. Après fermeture, il ne peut plus être utilisé
void notifyBeginWrite() override
Notifie qu'une sortie va être effectuée avec les paramètres courants.
void notifyEndWrite() override
Notifie qu'une sortie vient d'être effectuée.
__host__ __device__ Real2 min(Real2 a, Real2 b)
Retourne le minimum de deux Real2.
ItemGroupT< Cell > CellGroup
Groupe de mailles.
ItemGroupT< Node > NodeGroup
Groupe de noeuds.
MeshVariableScalarRefT< Node, Real3 > VariableNodeReal3
Grandeur au noeud de type coordonnées.
Integer len(const char *s)
Retourne la longueur de la chaîne s.
Fonctions utilitaires pour Hdf5.
Active toujours les traces dans les parties Arcane concernant les matériaux.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Ref< TrueType > createRef(Args &&... args)
Créé une instance de type TrueType avec les arguments Args et retourne une référence dessus.
Collection< ItemGroup > ItemGroupCollection
Collection de groupes d'éléments du maillage.
std::int64_t Int64
Type entier signé sur 64 bits.
Int32 Integer
Type représentant un entier.
bool operator<(const Item &item1, const Item &item2)
Compare deux entités.
eItemKind
Genre d'entité de maillage.
@ IK_Node
Entité de maillage de genre noeud.
@ IK_Cell
Entité de maillage de genre maille.
double Real
Type représentant un réel.
unsigned char Byte
Type d'un octet.
eDataType
Type d'une donnée.
@ DT_Int32
Donnée de type entier 32 bits.
@ DT_Real3
Donnée de type vecteur 3.
@ DT_Int64
Donnée de type entier 64 bits.
@ DT_Real2
Donnée de type vecteur 2.
@ DT_Real
Donnée de type réel.
std::int32_t Int32
Type entier signé sur 32 bits.
ConstArrayView< Real > RealConstArrayView
Equivalent C d'un tableau à une dimension de réels.
Conserve les infos sur les données à sauver et l'offset associé.
Classe pour conserver un couple (hdf_group,nom_du_dataset).
Classe pour conserver les information d'un offset.
Int64 offset() const
Valeur de l'offset. (-1) si on écrit à la fin du tableau.
Informations collectives sur un ItemGroup;.
WritePartInfo m_write_part_info
Informations sur l'écriture.
ItemGroup m_item_group
Groupe associé
Informations sur l'offset de la partie à écrire associée à un rang.
Int64 m_offset
Offset de mon rang.
Int64 m_size
Nombre d'éléments de mon rang.
Int64 m_total_size
Nombre d'éléments sur tous les rangs.