14#include "arcane/utils/Collection.h"
15#include "arcane/utils/Enumerator.h"
16#include "arcane/utils/Iostream.h"
17#include "arcane/utils/StringBuilder.h"
18#include "arcane/utils/IOException.h"
19#include "arcane/utils/FixedArray.h"
20#include "arcane/utils/MemoryView.h"
22#include "arcane/core/PostProcessorWriterBase.h"
23#include "arcane/core/Directory.h"
24#include "arcane/core/FactoryService.h"
25#include "arcane/core/IDataWriter.h"
26#include "arcane/core/IData.h"
27#include "arcane/core/IItemFamily.h"
28#include "arcane/core/VariableCollection.h"
29#include "arcane/core/IParallelMng.h"
30#include "arcane/core/IMesh.h"
31#include "arcane/core/internal/IParallelMngInternal.h"
32#include "arcane/core/internal/VtkCellTypes.h"
33#include "arcane/core/internal/GatherGroup.h"
35#include "arcane/core/materials/IMeshMaterialMng.h"
36#include "arcane/core/materials/IMeshEnvironment.h"
38#include "arcane/hdf5/Hdf5Utils.h"
39#include "arcane/hdf5/VtkHdfV2PostProcessor_axl.h"
79 asConstSpan(
const T* v)
88class VtkHdfV2DataWriter
100 struct DatasetGroupAndName
104 DatasetGroupAndName(
HGroup& group_,
const String& name_)
128 DatasetInfo() =
default;
129 explicit DatasetInfo(
const String& name)
136 bool isNull()
const {
return m_name.null(); }
138 HGroup* group()
const {
return m_group; }
139 const String& name()
const {
return m_name; }
142 void setOffset(
Int64 v) { m_offset = v; }
143 friend bool operator<(
const DatasetInfo& s1,
const DatasetInfo& s2)
145 return (s1.m_name < s2.m_name);
150 HGroup* m_group =
nullptr;
179 struct ItemGroupCollectiveInfo
183 explicit ItemGroupCollectiveInfo(
const ItemGroup& g)
209 , m_dataset_info(dataset_info)
215 , m_dataset_info(dataset_info)
216 , m_group_info(group_info)
222 DatasetInfo datasetInfo()
const {
return m_dataset_info; }
238 void endWrite()
override;
276 HGroup m_point_data_offsets_group;
277 HGroup m_cell_data_offsets_group;
278 HGroup m_field_data_offsets_group;
280 bool m_is_parallel =
false;
281 bool m_is_collective_io =
false;
282 bool m_is_first_call =
false;
283 bool m_is_writer =
false;
292 std::map<DatasetInfo, Int64> m_offset_info_list;
296 ItemGroupCollectiveInfo m_all_cells_info;
297 ItemGroupCollectiveInfo m_all_nodes_info;
315 void _addStringAttribute(
Hid& hid,
const char* name,
const String& value);
317 template <
typename DataType>
void
319 template <
typename DataType>
void
321 template <
typename DataType>
void
323 template <
typename DataType>
void
325 template <
typename DataType>
void
327 template <
typename DataType>
void
329 template <
typename DataType>
void
338 return sb.toString();
340 template <
typename DataType>
void
342 Int64 dim1_size,
Int64 dim2_size,
const DataType* values_data,
344 void _writeDataSetGeneric(
const DataInfo& data_info,
GatherGroupInfo* gather_info,
Int32 nb_dim,
346 const hid_t hdf_datatype_type,
bool is_collective);
347 void _addInt64Attribute(
Hid& hid,
const char* name,
Int64 value);
348 Int64 _readInt64Attribute(
Hid& hid,
const char* name);
349 void _openOrCreateGroups();
351 void _readAndSetOffset(DatasetInfo& offset_info,
Int32 wanted_step);
352 void _initializeOffsets();
353 void _initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo& group_info,
GatherGroupInfo& gather_info);
355 void _writeConstituentsGroups();
363: TraceAccessor(mesh->traceMng())
366, m_is_collective_io(is_collective_io)
367, m_all_cells_info(mesh->allCells())
368, m_all_nodes_info(mesh->allNodes())
369, m_all_cells_gather_group_info(mesh->parallelMng(), is_collective_io)
370, m_all_nodes_gather_group_info(mesh->parallelMng(), is_collective_io)
377void VtkHdfV2DataWriter::
385 IParallelMng* pm =
m_mesh->parallelMng();
386 const Int32 nb_rank = pm->commSize();
387 m_is_parallel = nb_rank > 1;
390 const bool is_first_call = (time_index < 2);
391 m_is_first_call = is_first_call;
393 info() <<
"WARNING: L'implémentation au format 'VtkHdfV2' est expérimentale";
395 String filename = _getFileName();
409 if (pm->isThreadImplementation() && !pm->isHybridImplementation())
410 m_is_collective_io =
false;
413 info() <<
"VtkHdfV2DataWriter: using collective MPI/IO ?=" << m_is_collective_io;
418 bool is_master_io = pm->isMasterIO();
424 if (m_is_collective_io) {
425 m_writer = pm->_internalApi()->masterParallelIORank();
426 m_is_writer = (m_writer == pm->commRank());
429 m_writer = pm->masterIORank();
430 m_is_writer = is_master_io;
435 if (m_is_collective_io && m_is_writer)
436 plist_id.createFilePropertyMPIIO(pm);
439 if (is_first_call && is_master_io)
440 dir.createDirectory();
442 if (m_is_collective_io)
446 m_standard_types.initialize();
453 _openOrCreateGroups();
456 std::array<Int64, 2> version = { 2, 0 };
457 _addInt64ArrayAttribute(m_top_group,
"Version", version);
458 _addStringAttribute(m_top_group,
"Type",
"UnstructuredGrid");
463 _initializeItemGroupCollectiveInfos(m_all_cells_info, m_all_cells_gather_group_info);
464 _initializeItemGroupCollectiveInfos(m_all_nodes_info, m_all_nodes_gather_group_info);
469 const Int32 nb_cell = all_cells.size();
470 const Int32 nb_node = all_nodes.size();
472 Int32 total_nb_connected_node = 0;
475 total_nb_connected_node += cell.nodeIds().size();
480 UniqueArray<Int64> cells_connectivity(total_nb_connected_node);
481 UniqueArray<Int64> cells_offset(nb_cell + 1);
482 UniqueArray<unsigned char> cells_ghost_type(nb_cell);
483 UniqueArray<unsigned char> cells_type(nb_cell);
484 UniqueArray<Int64> cells_uid(nb_cell);
487 Int32 connected_node_index = 0;
489 Int32 index = icell.index();
492 cells_uid[index] = cell.uniqueId();
495 bool is_ghost = !cell.isOwn();
497 ghost_type = VtkUtils::CellGhostTypes::DUPLICATECELL;
498 cells_ghost_type[index] = ghost_type;
500 unsigned char vtk_type = VtkUtils::arcaneToVtkCellType(cell.type());
501 cells_type[index] = vtk_type;
502 for (NodeLocalId node : cell.nodeIds()) {
503 cells_connectivity[connected_node_index] = node;
504 ++connected_node_index;
506 cells_offset[index + 1] = connected_node_index;
510 _initializeOffsets();
514 GatherGroupInfo* ggi = ggi_ref.get();
517 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Offsets" }, m_offset_for_cell_offset_info }, ggi, cells_offset);
518 ggi->setNeedRecompute();
520 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Connectivity" }, m_connectivity_offset_info }, ggi, cells_connectivity);
521 ggi->setNeedRecompute();
523 _writeDataSet1DCollective<unsigned char>({ { m_top_group,
"Types" }, m_cell_offset_info }, &m_all_cells_gather_group_info, cells_type);
526 Int64 nb_cell_int64 = nb_cell;
527 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfCells" }, m_part_offset_info }, ggi,
528 asConstSpan(&nb_cell_int64));
530 Int64 nb_node_int64 = nb_node;
531 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfPoints" }, m_part_offset_info }, ggi,
532 asConstSpan(&nb_node_int64));
534 Int64 number_of_connectivity_ids = cells_connectivity.size();
535 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfConnectivityIds" }, m_part_offset_info }, ggi,
536 asConstSpan(&number_of_connectivity_ids));
538 ggi->setNeedRecompute();
542 UniqueArray<Int64> nodes_uid(nb_node);
543 UniqueArray<unsigned char> nodes_ghost_type(nb_node);
545 UniqueArray2<Real> points;
546 points.resize(nb_node, 3);
548 Int32 index = inode.index();
551 nodes_uid[index] = node.uniqueId();
554 bool is_ghost = !node.isOwn();
556 ghost_type = VtkUtils::PointGhostTypes::DUPLICATEPOINT;
557 nodes_ghost_type[index] = ghost_type;
559 Real3 pos = nodes_coordinates[inode];
560 points[index][0] = pos.x;
561 points[index][1] = pos.y;
562 points[index][2] = pos.z;
566 _writeDataSet1DCollective<Int64>({ { m_node_data_group,
"GlobalIds" }, m_cell_offset_info }, &m_all_nodes_gather_group_info, nodes_uid);
569 _writeDataSet1DCollective<unsigned char>({ { m_node_data_group,
"vtkGhostType" }, m_cell_offset_info }, &m_all_nodes_gather_group_info, nodes_ghost_type);
572 _writeDataSet2DCollective<Real>({ { m_top_group,
"Points" }, m_point_offset_info }, ggi, points);
573 ggi->setNeedRecompute();
577 _writeDataSet1DCollective<unsigned char>({ { m_cell_data_group,
"vtkGhostType" }, m_cell_offset_info }, &m_all_cells_gather_group_info, cells_ghost_type);
581 _writeDataSet1DCollective<Int64>({ { m_cell_data_group,
"GlobalIds" }, m_cell_offset_info }, &m_all_cells_gather_group_info, cells_uid);
587 _writeDataSet1D<Real>({ { m_steps_group,
"Values" }, m_time_offset_info }, ggi, asConstSpan(¤t_time));
590 Int64 comm_size = pm->commSize();
591 Int64 part_offset = (time_index - 1) * comm_size;
592 _writeDataSet1D<Int64>({ { m_steps_group,
"PartOffsets" }, m_time_offset_info }, ggi, asConstSpan(&part_offset));
595 _addInt64Attribute(m_steps_group,
"NSteps", time_index);
599 _writeConstituentsGroups();
605void VtkHdfV2DataWriter::
606_writeConstituentsGroups()
617 m_materials_groups.add(group_info_ref);
620 m_gather_info_materials_groups.add(gather_info_ref);
623 GatherGroupInfo& gather_info = *gather_info_ref.get();
624 _initializeItemGroupCollectiveInfos(group_info, gather_info);
626 ConstArrayView<Int32> groups_ids = cells.view().localIds();
627 DatasetGroupAndName dataset_group_name(m_top_group, String(
"Constituent_") + cells.name());
629 info() <<
"Writing infos for group '" << cells.name() <<
"'";
630 _writeDataSet1DCollective<Int32>({ dataset_group_name, m_cell_offset_info, group_info_ref.get() }, gather_info_ref.get(), groups_ids);
649 Int64 dim1_size = local_size;
652 Int64 total_size = 0;
653 for (
Integer i = 0; i < nb_rank; ++i)
654 total_size += all_sizes[i];
657 for (
Integer i = 0; i < my_rank; ++i)
658 my_index += all_sizes[i];
661 part_info.setTotalSize(total_size);
662 part_info.setSize(local_size);
663 part_info.setOffset(my_index);
670void VtkHdfV2DataWriter::
671_initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo& group_info,
GatherGroupInfo& gather_info)
673 Int32 dim1_size = group_info.m_item_group.size();
675 gather_info.setCollectiveIO(m_is_collective_io);
685 std::pair<Int64, Int64> _getInterval(Int64 index, Int64 nb_interval, Int64 total_size)
687 Int64 n = total_size;
688 Int64 isize = n / nb_interval;
689 Int64 ibegin = index * isize;
691 if ((index + 1) == nb_interval)
693 return { ibegin, isize };
705void VtkHdfV2DataWriter::
709 const hid_t hdf_type,
bool is_collective)
715 HGroup& group = data_info.dataset.group;
716 const String& name = data_info.dataset.name;
720 Int64 wanted_offset = data_info.datasetInfo().
offset();
722 static constexpr int MAX_DIM = 2;
732 local_dims[0] = dim1_size;
733 local_dims[1] = dim2_size;
741 max_dims[0] = H5S_UNLIMITED;
742 max_dims[1] = dim2_size;
745 Int64 write_offset = 0;
748 Int64 global_dim1_size = dim1_size;
749 Int32 nb_participating_rank = 1;
754 if (data_info.m_group_info) {
757 part_info = data_info.m_group_info->writePartInfo();
762 global_dim1_size = part_info.totalSize();
763 my_index = part_info.offset();
782 if (m_is_first_call) {
785 global_dims[0] = global_dim1_size;
786 global_dims[1] = dim2_size;
788 Int64 chunk_size = global_dim1_size / nb_participating_rank;
789 if (chunk_size < 1024)
791 const Int64 max_chunk_size = 1024 * 1024 * 10;
792 chunk_size =
math::min(chunk_size, max_chunk_size);
793 chunk_dims[0] = chunk_size;
794 chunk_dims[1] = dim2_size;
795 info() <<
"CHUNK nb_dim=" << nb_dim
796 <<
" global_dim1_size=" << global_dim1_size
797 <<
" chunk0=" << chunk_dims[0]
798 <<
" chunk1=" << chunk_dims[1]
800 file_space.createSimple(nb_dim, global_dims.data(), max_dims.data());
802 plist_id.create(H5P_DATASET_CREATE);
803 H5Pset_chunk(plist_id.id(), nb_dim, chunk_dims.data());
804 dataset.create(group, name.localstr(), hdf_type, file_space,
HProperty{}, plist_id,
HProperty{});
807 hyperslab_offsets[0] = my_index;
808 hyperslab_offsets[1] = 0;
814 dataset.open(group, name.
localstr());
815 file_space = dataset.getSpace();
816 int nb_dimension = file_space.nbDimension();
817 if (nb_dimension != nb_dim)
822 file_space.getDimensions(original_dims.data(),
nullptr);
823 hsize_t offset0 = original_dims[0];
826 if (wanted_offset >= 0) {
827 offset0 = wanted_offset;
828 info() <<
"Forcing offset to " << wanted_offset;
830 global_dims[0] = offset0 + global_dim1_size;
831 global_dims[1] = dim2_size;
832 write_offset = offset0;
835 if ((herror = dataset.setExtent(global_dims.data())) < 0)
837 file_space = dataset.getSpace();
839 hyperslab_offsets[0] = offset0 + my_index;
840 hyperslab_offsets[1] = 0;
841 info(4) <<
"APPEND nb_dim=" << nb_dim
842 <<
" dim0=" << global_dims[0]
843 <<
" count0=" << local_dims[0]
844 <<
" offsets0=" << hyperslab_offsets[0] <<
" name=" << name;
851 Int64 nb_interval = 1;
855 info(4) <<
"WRITE global_size=" << nb_write_byte <<
" max_size=" <<
m_max_write_size <<
" nb_interval=" << nb_interval;
857 for (
Int64 i = 0; i < nb_interval; ++i) {
858 auto [index, nb_element] = _getInterval(i, nb_interval, dim1_size);
861 dims[0] = nb_element;
864 offsets[0] = hyperslab_offsets[0] + index;
866 if ((herror = H5Sselect_hyperslab(file_space.id(), H5S_SELECT_SET, offsets.data(),
nullptr, dims.data(),
nullptr)) < 0)
870 memory_space.createSimple(nb_dim, dims.data());
873 if ((herror = dataset.write(hdf_type, values_data.
data() + data_offset, memory_space, file_space, write_plist_id)) < 0)
880 if (!data_info.datasetInfo().isNull())
881 m_offset_info_list.insert(std::make_pair(data_info.datasetInfo(), write_offset));
887template <
typename DataType>
void VtkHdfV2DataWriter::
889 Int64 dim1_size,
Int64 dim2_size,
const DataType* values_data,
892 const hid_t hdf_type = m_standard_types.nativeType(DataType{});
893 ConstMemoryView mem_view =
makeConstMemoryView(values_data,
sizeof(DataType), dim1_size * dim2_size);
894 _writeDataSetGeneric(data_info, gather_info, nb_dim, dim1_size, dim2_size, mem_view, hdf_type, is_collective);
900template <
typename DataType>
void VtkHdfV2DataWriter::
903 _writeDataSetGeneric(data_info, gather_info, 1, values.size(), 1, values.data(),
false);
909template <
typename DataType>
void VtkHdfV2DataWriter::
912 _writeDataSetGeneric(data_info, gather_info, 1, values.size(), 1, values.data(),
true);
918template <
typename DataType>
void VtkHdfV2DataWriter::
925 gather_info->computeSizeT(values);
926 gg.setGatherGroupInfo(gather_info);
928 if (gg.isNeedGather()) {
929 UniqueArray<DataType> all_values;
930 gg.gatherToMasterIOT(values, all_values);
932 if (m_is_collective_io)
933 _writeDataSet1DUsingCollectiveIO(data_info, gather_info, all_values.constSpan());
935 _writeDataSet1D(data_info, gather_info, all_values.constSpan());
938 if (m_is_collective_io)
939 _writeDataSet1DUsingCollectiveIO(data_info, gather_info, values);
941 _writeDataSet1D(data_info, gather_info, values);
948template <
typename DataType>
void VtkHdfV2DataWriter::
951 _writeDataSetGeneric(data_info, gather_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
false);
957template <
typename DataType>
void VtkHdfV2DataWriter::
960 _writeDataSetGeneric(data_info, gather_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
true);
966template <
typename DataType>
void VtkHdfV2DataWriter::
973 gather_info->computeSizeT(values);
974 gg.setGatherGroupInfo(gather_info);
976 if (gg.isNeedGather()) {
977 UniqueArray2<DataType> all_values;
978 gg.gatherToMasterIOT(values, all_values);
980 if (m_is_collective_io)
981 _writeDataSet2DUsingCollectiveIO(data_info, gather_info, all_values.constSpan());
983 _writeDataSet2D(data_info, gather_info, all_values.constSpan());
986 if (m_is_collective_io)
987 _writeDataSet2DUsingCollectiveIO(data_info, gather_info, values);
989 _writeDataSet2D(data_info, gather_info, values);
996void VtkHdfV2DataWriter::
999 hsize_t
len = values.size();
1000 hid_t aid = H5Screate_simple(1, &len,
nullptr);
1001 hid_t attr = H5Acreate2(hid.id(), name, H5T_NATIVE_INT64, aid, H5P_DEFAULT, H5P_DEFAULT);
1004 int ret = H5Awrite(attr, H5T_NATIVE_INT64, values.data());
1014void VtkHdfV2DataWriter::
1015_addInt64Attribute(
Hid& hid,
const char* name,
Int64 value)
1017 HSpace aid(H5Screate(H5S_SCALAR));
1019 if (m_is_first_call)
1020 attr.create(hid, name, H5T_NATIVE_INT64, aid);
1022 attr.open(hid, name);
1025 herr_t ret = attr.write(H5T_NATIVE_INT64, &value);
1033Int64 VtkHdfV2DataWriter::
1034_readInt64Attribute(
Hid& hid,
const char* name)
1037 attr.open(hid, name);
1041 herr_t ret = attr.read(H5T_NATIVE_INT64, &value);
1050void VtkHdfV2DataWriter::
1051_addStringAttribute(
Hid& hid,
const char* name,
const String& value)
1053 hid_t aid = H5Screate(H5S_SCALAR);
1054 hid_t attr_type = H5Tcopy(H5T_C_S1);
1055 H5Tset_size(attr_type, value.length());
1056 hid_t attr = H5Acreate2(hid.id(), name, attr_type, aid, H5P_DEFAULT, H5P_DEFAULT);
1059 int ret = H5Awrite(attr, attr_type, value.localstr());
1060 ret = H5Tclose(attr_type);
1070void VtkHdfV2DataWriter::
1076 for (
const auto& i : m_offset_info_list) {
1077 Int64 offset = i.second;
1079 HGroup* hdf_group = offset_info.group();
1083 _writeDataSet1D<Int64>({ { *hdf_group, offset_info.name() }, m_time_offset_info }, ggi.get(), asConstSpan(&offset));
1094void VtkHdfV2DataWriter::
1095_openOrCreateGroups()
1098 m_top_group.openOrCreate(
m_file_id,
"VTKHDF");
1099 m_cell_data_group.openOrCreate(m_top_group,
"CellData");
1100 m_node_data_group.openOrCreate(m_top_group,
"PointData");
1101 m_steps_group.openOrCreate(m_top_group,
"Steps");
1102 m_point_data_offsets_group.openOrCreate(m_steps_group,
"PointDataOffsets");
1103 m_cell_data_offsets_group.openOrCreate(m_steps_group,
"CellDataOffsets");
1104 m_field_data_offsets_group.openOrCreate(m_steps_group,
"FieldDataOffsets");
1110void VtkHdfV2DataWriter::
1113 m_cell_data_group.close();
1114 m_node_data_group.close();
1115 m_point_data_offsets_group.close();
1116 m_cell_data_offsets_group.close();
1117 m_field_data_offsets_group.close();
1118 m_steps_group.close();
1119 m_top_group.close();
1128 ARCANE_UNUSED(meta_data);
1137 info(4) <<
"Write VtkHdfV2 var=" << var->
name();
1142 ARCANE_FATAL(
"Only export of scalar item variable is implemented (name={0})", var->
name());
1144 ARCANE_FATAL(
"Export of partial variable is not implemented");
1150 switch (item_kind) {
1152 group = &m_cell_data_group;
1153 offset_info = m_cell_offset_info;
1154 group_info = &m_all_cells_info;
1155 gather_info = &m_all_cells_gather_group_info;
1158 group = &m_node_data_group;
1159 offset_info = m_point_offset_info;
1160 group_info = &m_all_nodes_info;
1161 gather_info = &m_all_nodes_gather_group_info;
1164 ARCANE_FATAL(
"Only export of 'Cell' or 'Node' variable is implemented (name={0})", var->
name());
1171 switch (data_type) {
1173 _writeBasicTypeDataset<Real>(data_info, gather_info, data);
1176 _writeBasicTypeDataset<Int64>(data_info, gather_info, data);
1179 _writeBasicTypeDataset<Int32>(data_info, gather_info, data);
1182 _writeReal3Dataset(data_info, gather_info, data);
1185 _writeReal2Dataset(data_info, gather_info, data);
1188 warning() << String::format(
"Export for datatype '{0}' is not supported (var_name={1})", data_type, var->
name());
1195template <
typename DataType>
void VtkHdfV2DataWriter::
1206void VtkHdfV2DataWriter::
1207_writeReal3Dataset(
const DataInfo& data_info, GatherGroupInfo* gather_info, IData* data)
1209 auto* true_data =
dynamic_cast<IArrayDataT<Real3>*
>(data);
1211 SmallSpan<const Real3> values(true_data->view());
1212 Int32 nb_value = values.size();
1214 UniqueArray2<Real> scalar_values;
1215 scalar_values.resize(nb_value, 3);
1216 for (Int32 i = 0; i < nb_value; ++i) {
1217 Real3 v = values[i];
1218 scalar_values[i][0] = v.x;
1219 scalar_values[i][1] = v.y;
1220 scalar_values[i][2] = v.z;
1222 _writeDataSet2DCollective<Real>(data_info, gather_info, scalar_values);
1228void VtkHdfV2DataWriter::
1232 auto* true_data =
dynamic_cast<IArrayDataT<Real2>*
>(data);
1234 SmallSpan<const Real2> values(true_data->view());
1235 Int32 nb_value = values.size();
1236 UniqueArray2<Real> scalar_values;
1237 scalar_values.resize(nb_value, 3);
1238 for (
Int32 i = 0; i < nb_value; ++i) {
1239 Real2 v = values[i];
1240 scalar_values[i][0] = v.x;
1241 scalar_values[i][1] = v.y;
1242 scalar_values[i][2] = 0.0;
1244 _writeDataSet2DCollective<Real>(data_info, gather_info, scalar_values);
1250void VtkHdfV2DataWriter::
1251_readAndSetOffset(DatasetInfo& offset_info,
Int32 wanted_step)
1253 HGroup* hgroup = offset_info.group();
1255 StandardArrayT<Int64> a(hgroup->id(), offset_info.name());
1256 UniqueArray<Int64> values;
1257 a.directRead(m_standard_types, values);
1258 Int64 offset_value = values[wanted_step];
1259 offset_info.setOffset(offset_value);
1260 info() <<
"VALUES name=" << offset_info.name() <<
" values=" << values
1261 <<
" wanted_step=" << wanted_step <<
" v=" << offset_value;
1267void VtkHdfV2DataWriter::
1288 m_cell_offset_info =
DatasetInfo(m_steps_group,
"CellOffsets");
1289 m_point_offset_info =
DatasetInfo(m_steps_group,
"PointOffsets");
1290 m_connectivity_offset_info =
DatasetInfo(m_steps_group,
"ConnectivityIdOffsets");
1292 m_offset_for_cell_offset_info =
DatasetInfo(
"_OffsetForCellOffsetInfo");
1293 m_part_offset_info =
DatasetInfo(
"_PartOffsetInfo");
1294 m_time_offset_info =
DatasetInfo(
"_TimeOffsetInfo");
1299 if (m_is_writer && !m_is_first_call) {
1300 IParallelMng* pm =
m_mesh->parallelMng();
1301 const Int32 nb_rank = pm->commSize();
1302 Int64 nb_current_step = _readInt64Attribute(m_steps_group,
"NSteps");
1304 info(4) <<
"NB_STEP=" << nb_current_step <<
" time_index=" << time_index
1305 <<
" current_time=" <<
m_times.back();
1306 const bool debug_times =
false;
1308 StandardArrayT<Real> a1(m_steps_group.id(),
"Values");
1309 UniqueArray<Real> times;
1310 a1.directRead(m_standard_types, times);
1311 info() <<
"TIMES=" << times;
1313 if ((nb_current_step + 1) != time_index) {
1314 info() <<
"[VtkHdf] go_backward detected";
1315 Int32 wanted_step = time_index - 1;
1318 _readAndSetOffset(m_cell_offset_info, wanted_step);
1319 _readAndSetOffset(m_point_offset_info, wanted_step);
1320 _readAndSetOffset(m_connectivity_offset_info, wanted_step);
1321 m_part_offset_info.setOffset(wanted_step * nb_rank);
1322 m_time_offset_info.setOffset(wanted_step);
1323 m_offset_for_cell_offset_info.setOffset(m_cell_offset_info.offset() + wanted_step * nb_rank);
1336class VtkHdfV2PostProcessor
1349 bool use_collective_io =
true;
1350 Int64 max_write_size = 0;
1352 use_collective_io =
options()->useCollectiveWrite();
1353 max_write_size =
options()->maxWriteSize();
1355 auto w = std::make_unique<VtkHdfV2DataWriter>(
mesh(),
groups(), use_collective_io);
1356 w->setMaxWriteSize(max_write_size);
1357 w->setTimes(
times());
1359 w->setDirectoryName(dir.
file(
"vtkhdfv2"));
1360 m_writer = std::move(w);
1370 std::unique_ptr<IDataWriter> m_writer;
1376ARCANE_REGISTER_SERVICE_VTKHDFV2POSTPROCESSOR(VtkHdfV2PostProcessor,
1377 VtkHdfV2PostProcessor);
#define ARCANE_CHECK_POINTER(ptr)
Macro retournant le pointeur ptr s'il est non nul ou lancant une exception s'il est nul.
#define ARCANE_THROW(exception_class,...)
Macro pour envoyer une exception avec formattage.
#define ARCANE_FATAL(...)
Macro envoyant une exception FatalErrorException.
CaseOptionsVtkHdfV2PostProcessor * options() const
Options du jeu de données du service.
ArcaneVtkHdfV2PostProcessorObject(const Arcane::ServiceBuildInfo &sbi)
Constructeur.
Vue modifiable d'un tableau d'un type T.
Vue constante d'un tableau de type T.
Vue constante sur une zone mémoire contigue contenant des éléments de taille fixe.
constexpr Int32 datatypeSize() const
Taille du type de donnée associé (1 par défaut)
constexpr const std::byte * data() const
Pointeur sur la zone mémoire.
Classe gérant un répertoire.
String file(const String &file_name) const override
Retourne le chemin complet du fichier file_name dans le répertoire.
Tableau 1D de taille fixe.
Classe permettant de calculer et de conserver les informations de regroupements.
void computeSize(Int32 nb_elem_in) override
Méthode permettant de calculer les informations de regroupements.
Int32 nbWriterGlobal() override
Méthode pemettant de connaitre le nombre de sous-domaines écrivains.
Int32 nbElemOutput() override
Méthode permettant de connaitre le nombre d'éléments que notre sous-domaine devra traiter après récep...
Encapsule un hid_t pour un dataset.
Encapsule un hid_t pour un fichier.
Encapsule un hid_t pour un groupe.
static void useMutex(bool is_active, IParallelMng *pm)
Fonction permettant d'activer ou de désactiver les verrous à chaque appel à HDF5.
static constexpr bool hasParallelHdf5()
Vrai HDF5 est compilé avec le support de MPI.
Encapsule un hid_t pour une propriété (H5P*).
void createDatasetTransfertCollectiveMPIIO()
Créé une propriété de dataset pour MPIIO.
void createDatasetTransfertIndependentMPIIO()
Créé une propriété de dataset pour MPIIO.
Encapsule un hid_t pour un dataspace.
Définition des types standards Arcane pour hdf5.
Interface d'une donnée tableau d'un type T.
Interface d'écriture des données d'une variable.
Exception lorsqu'une erreur d'entrée/sortie est détectée.
Interface du gestionnaire de parallélisme pour un sous-domaine.
virtual Int32 commRank() const =0
Rang de cette instance dans le communicateur.
virtual Int32 commSize() const =0
Nombre d'instance dans le communicateur.
virtual void allGather(ConstArrayView< char > send_buf, ArrayView< char > recv_buf)=0
Effectue un regroupement sur tous les processeurs. Il s'agit d'une opération collective....
Interface d'une variable.
virtual eDataType dataType() const =0
Type de la donnée gérée par la variable (Real, Integer, ...)
virtual eItemKind itemKind() const =0
Genre des entités du maillage sur lequel repose la variable.
virtual bool isPartial() const =0
Indique si la variable est partielle.
virtual Integer dimension() const =0
Dimension de la variable.
virtual String name() const =0
Nom de la variable.
Groupe d'entités de maillage.
Interface du gestionnaire des matériaux et des milieux d'un maillage.
static IMeshMaterialMng * getReference(const MeshHandleOrMesh &mesh_handle, bool create=true)
Récupère ou créé la référence associée à mesh.
ItemGroupCollection groups() override
Liste des groupes à sauver.
ConstArrayView< Real > times() override
Liste des temps sauvés.
const String & baseDirectoryName() override
Nom du répertoire de sortie des fichiers.
Structure contenant les informations pour créer un service.
Vue pour un tableau 2D dont la taille est un 'Int64'.
Vue d'un tableau d'éléments de type T.
Constructeur de chaîne de caractère unicode.
Chaîne de caractères unicode.
const char * localstr() const
Retourne la conversion de l'instance dans l'encodage UTF-8.
TraceAccessor(ITraceMng *m)
Construit un accesseur via le gestionnaire de trace m.
TraceMessage info() const
Flot pour un message d'information.
TraceMessage warning() const
Flot pour un message d'avertissement.
Vecteur 1D de données avec sémantique par valeur (style STL).
void write(IVariable *var, IData *data) override
Ecrit les données data de la variable var.
IMeshMaterialMng * m_material_mng
Gestionnaire de matériaux associé (peut-être nul)
Int64 m_max_write_size
Taille maximale (en kilo-octet) pour une écriture.
String m_directory_name
Répertoire de sortie.
WritePartInfo _computeWritePartInfo(Int64 local_size)
Calcule l'offset de notre partie et le nombre total d'éléments.
IMesh * m_mesh
Maillage associé
HFile m_file_id
Identifiant HDF du fichier.
ItemGroupCollection m_groups
Liste des groupes à sauver.
UniqueArray< Real > m_times
Liste des temps.
void setMetaData(const String &meta_data) override
Positionne les infos des méta-données.
String m_full_filename
Nom du fichier HDF courant.
IDataWriter * dataWriter() override
Retourne l'écrivain associé à ce post-processeur.
void close() override
Ferme l'écrivain. Après fermeture, il ne peut plus être utilisé
void notifyBeginWrite() override
Notifie qu'une sortie va être effectuée avec les paramètres courants.
void notifyEndWrite() override
Notifie qu'une sortie vient d'être effectuée.
__host__ __device__ Real2 min(Real2 a, Real2 b)
Retourne le minimum de deux Real2.
ItemGroupT< Cell > CellGroup
Groupe de mailles.
ItemGroupT< Node > NodeGroup
Groupe de noeuds.
MeshVariableScalarRefT< Node, Real3 > VariableNodeReal3
Grandeur au noeud de type coordonnées.
Integer len(const char *s)
Retourne la longueur de la chaîne s.
Fonctions utilitaires pour Hdf5.
Active toujours les traces dans les parties Arcane concernant les matériaux.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Ref< TrueType > createRef(Args &&... args)
Créé une instance de type TrueType avec les arguments Args et retourne une référence dessus.
Collection< ItemGroup > ItemGroupCollection
Collection de groupes d'éléments du maillage.
std::int64_t Int64
Type entier signé sur 64 bits.
Int32 Integer
Type représentant un entier.
bool operator<(const Item &item1, const Item &item2)
Compare deux entités.
ConstMemoryView makeConstMemoryView(const void *ptr, Int32 datatype_size, Int64 nb_element)
Créé une vue mémoire en lecture seule.
eItemKind
Genre d'entité de maillage.
@ IK_Node
Entité de maillage de genre noeud.
@ IK_Cell
Entité de maillage de genre maille.
double Real
Type représentant un réel.
unsigned char Byte
Type d'un octet.
eDataType
Type d'une donnée.
@ DT_Int32
Donnée de type entier 32 bits.
@ DT_Real3
Donnée de type vecteur 3.
@ DT_Int64
Donnée de type entier 64 bits.
@ DT_Real2
Donnée de type vecteur 2.
@ DT_Real
Donnée de type réel.
@ Cell
Le maillage est AMR par maille.
std::int32_t Int32
Type entier signé sur 32 bits.
ConstArrayView< Real > RealConstArrayView
Equivalent C d'un tableau à une dimension de réels.
Conserve les infos sur les données à sauver et l'offset associé.
Classe pour conserver un couple (hdf_group,nom_du_dataset).
Classe pour conserver les information d'un offset.
Int64 offset() const
Valeur de l'offset. (-1) si on écrit à la fin du tableau.
Informations collectives sur un ItemGroup;.
WritePartInfo m_write_part_info
Informations sur l'écriture.
ItemGroup m_item_group
Groupe associé
Informations sur l'offset de la partie à écrire associée à un rang.
Int64 m_offset
Offset de mon rang.
Int64 m_size
Nombre d'éléments de mon rang.
Int64 m_total_size
Nombre d'éléments sur tous les rangs.