14#include "arcane/utils/Collection.h"
15#include "arcane/utils/Enumerator.h"
16#include "arcane/utils/Iostream.h"
17#include "arcane/utils/StringBuilder.h"
18#include "arcane/utils/IOException.h"
19#include "arcane/utils/FixedArray.h"
20#include "arcane/utils/MemoryView.h"
22#include "arcane/core/PostProcessorWriterBase.h"
23#include "arcane/core/Directory.h"
24#include "arcane/core/FactoryService.h"
25#include "arcane/core/IDataWriter.h"
26#include "arcane/core/IData.h"
27#include "arcane/core/IItemFamily.h"
28#include "arcane/core/VariableCollection.h"
29#include "arcane/core/IParallelMng.h"
30#include "arcane/core/IMesh.h"
31#include "arcane/core/internal/IParallelMngInternal.h"
32#include "arcane/core/internal/VtkCellTypes.h"
34#include "arcane/core/materials/IMeshMaterialMng.h"
35#include "arcane/core/materials/IMeshEnvironment.h"
37#include "arcane/hdf5/Hdf5Utils.h"
38#include "arcane/hdf5/VtkHdfV2PostProcessor_axl.h"
78 asConstSpan(
const T* v)
87class VtkHdfV2DataWriter
99 struct DatasetGroupAndName
103 DatasetGroupAndName(
HGroup& group_,
const String& name_)
127 DatasetInfo() =
default;
128 explicit DatasetInfo(
const String& name)
135 bool isNull()
const {
return m_name.null(); }
137 HGroup* group()
const {
return m_group; }
138 const String& name()
const {
return m_name; }
141 void setOffset(
Int64 v) { m_offset = v; }
142 friend bool operator<(
const DatasetInfo& s1,
const DatasetInfo& s2)
144 return (s1.m_name < s2.m_name);
149 HGroup* m_group =
nullptr;
178 struct ItemGroupCollectiveInfo
182 explicit ItemGroupCollectiveInfo(
const ItemGroup& g)
208 , m_dataset_info(dataset_info)
214 , m_dataset_info(dataset_info)
215 , m_group_info(group_info)
221 DatasetInfo datasetInfo()
const {
return m_dataset_info; }
237 void endWrite()
override;
275 HGroup m_point_data_offsets_group;
276 HGroup m_cell_data_offsets_group;
277 HGroup m_field_data_offsets_group;
279 bool m_is_parallel =
false;
280 bool m_is_collective_io =
false;
281 bool m_is_first_call =
false;
282 bool m_is_writer =
false;
291 std::map<DatasetInfo, Int64> m_offset_info_list;
295 ItemGroupCollectiveInfo m_all_cells_info;
296 ItemGroupCollectiveInfo m_all_nodes_info;
310 void _addStringAttribute(
Hid& hid,
const char* name,
const String& value);
312 template <
typename DataType>
void
314 template <
typename DataType>
void
316 template <
typename DataType>
void
318 template <
typename DataType>
void
320 template <
typename DataType>
void
322 template <
typename DataType>
void
324 template <
typename DataType>
void
325 _writeBasicTypeDataset(
const DataInfo& data_info,
IData* data);
326 void _writeReal3Dataset(
const DataInfo& data_info,
IData* data);
327 void _writeReal2Dataset(
const DataInfo& data_info,
IData* data);
333 return sb.toString();
335 template <
typename DataType>
void
336 _writeDataSetGeneric(
const DataInfo& data_info,
Int32 nb_dim,
337 Int64 dim1_size,
Int64 dim2_size,
const DataType* values_data,
339 void _writeDataSetGeneric(
const DataInfo& data_info,
Int32 nb_dim,
341 const hid_t hdf_datatype_type,
bool is_collective);
342 void _addInt64Attribute(
Hid& hid,
const char* name,
Int64 value);
343 Int64 _readInt64Attribute(
Hid& hid,
const char* name);
344 void _openOrCreateGroups();
346 void _readAndSetOffset(DatasetInfo& offset_info,
Int32 wanted_step);
347 void _initializeOffsets();
348 void _initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo& group_info);
350 void _writeConstituentsGroups();
358: TraceAccessor(mesh->traceMng())
361, m_is_collective_io(is_collective_io)
362, m_all_cells_info(mesh->allCells())
363, m_all_nodes_info(mesh->allNodes())
370void VtkHdfV2DataWriter::
378 IParallelMng* pm =
m_mesh->parallelMng();
379 const Int32 nb_rank = pm->commSize();
380 m_is_parallel = nb_rank > 1;
383 const bool is_first_call = (time_index < 2);
384 m_is_first_call = is_first_call;
386 info() <<
"WARNING: L'implémentation au format 'VtkHdfV2' est expérimentale";
388 String filename = _getFileName();
402 if (pm->isThreadImplementation() && !pm->isHybridImplementation())
403 m_is_collective_io =
false;
406 info() <<
"VtkHdfV2DataWriter: using collective MPI/IO ?=" << m_is_collective_io;
411 bool is_master_io = pm->isMasterIO();
417 if (m_is_collective_io) {
418 m_writer = pm->_internalApi()->masterParallelIORank();
419 m_is_writer = (m_writer == pm->commRank());
422 m_writer = pm->masterIORank();
423 m_is_writer = is_master_io;
428 if (m_is_collective_io && m_is_writer)
429 plist_id.createFilePropertyMPIIO(pm);
432 if (is_first_call && is_master_io)
433 dir.createDirectory();
435 if (m_is_collective_io)
439 m_standard_types.initialize();
446 _openOrCreateGroups();
449 std::array<Int64, 2> version = { 2, 0 };
450 _addInt64ArrayAttribute(m_top_group,
"Version", version);
451 _addStringAttribute(m_top_group,
"Type",
"UnstructuredGrid");
456 _initializeItemGroupCollectiveInfos(m_all_cells_info);
457 _initializeItemGroupCollectiveInfos(m_all_nodes_info);
462 const Int32 nb_cell = all_cells.size();
463 const Int32 nb_node = all_nodes.size();
465 Int32 total_nb_connected_node = 0;
468 total_nb_connected_node += cell.nodeIds().size();
473 UniqueArray<Int64> cells_connectivity(total_nb_connected_node);
474 UniqueArray<Int64> cells_offset(nb_cell + 1);
475 UniqueArray<unsigned char> cells_ghost_type(nb_cell);
476 UniqueArray<unsigned char> cells_type(nb_cell);
477 UniqueArray<Int64> cells_uid(nb_cell);
480 Int32 connected_node_index = 0;
482 Int32 index = icell.index();
485 cells_uid[index] = cell.uniqueId();
488 bool is_ghost = !cell.isOwn();
490 ghost_type = VtkUtils::CellGhostTypes::DUPLICATECELL;
491 cells_ghost_type[index] = ghost_type;
493 unsigned char vtk_type = VtkUtils::arcaneToVtkCellType(cell.type());
494 cells_type[index] = vtk_type;
495 for (NodeLocalId node : cell.nodeIds()) {
496 cells_connectivity[connected_node_index] = node;
497 ++connected_node_index;
499 cells_offset[index + 1] = connected_node_index;
503 _initializeOffsets();
506 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Offsets" }, m_offset_for_cell_offset_info }, cells_offset);
508 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Connectivity" }, m_connectivity_offset_info },
510 _writeDataSet1DCollective<unsigned char>({ { m_top_group,
"Types" }, m_cell_offset_info }, cells_type);
513 Int64 nb_cell_int64 = nb_cell;
514 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfCells" }, m_part_offset_info },
515 asConstSpan(&nb_cell_int64));
516 Int64 nb_node_int64 = nb_node;
517 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfPoints" }, m_part_offset_info },
518 asConstSpan(&nb_node_int64));
519 Int64 number_of_connectivity_ids = cells_connectivity.size();
520 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfConnectivityIds" }, m_part_offset_info },
521 asConstSpan(&number_of_connectivity_ids));
526 UniqueArray<Int64> nodes_uid(nb_node);
527 UniqueArray<unsigned char> nodes_ghost_type(nb_node);
529 UniqueArray2<Real> points;
530 points.resize(nb_node, 3);
532 Int32 index = inode.index();
535 nodes_uid[index] = node.uniqueId();
538 bool is_ghost = !node.isOwn();
540 ghost_type = VtkUtils::PointGhostTypes::DUPLICATEPOINT;
541 nodes_ghost_type[index] = ghost_type;
543 Real3 pos = nodes_coordinates[inode];
544 points[index][0] = pos.x;
545 points[index][1] = pos.y;
546 points[index][2] = pos.z;
550 _writeDataSet1DCollective<Int64>({ { m_node_data_group,
"GlobalIds" }, m_cell_offset_info }, nodes_uid);
553 _writeDataSet1DCollective<unsigned char>({ { m_node_data_group,
"vtkGhostType" }, m_cell_offset_info }, nodes_ghost_type);
556 _writeDataSet2DCollective<Real>({ { m_top_group,
"Points" }, m_point_offset_info }, points);
560 _writeDataSet1DCollective<unsigned char>({ { m_cell_data_group,
"vtkGhostType" }, m_cell_offset_info }, cells_ghost_type);
564 _writeDataSet1DCollective<Int64>({ { m_cell_data_group,
"GlobalIds" }, m_cell_offset_info }, cells_uid);
569 _writeDataSet1D<Real>({ { m_steps_group,
"Values" }, m_time_offset_info }, asConstSpan(¤t_time));
572 Int64 comm_size = pm->commSize();
573 Int64 part_offset = (time_index - 1) * comm_size;
574 _writeDataSet1D<Int64>({ { m_steps_group,
"PartOffsets" }, m_time_offset_info }, asConstSpan(&part_offset));
577 _addInt64Attribute(m_steps_group,
"NSteps", time_index);
580 _writeConstituentsGroups();
586void VtkHdfV2DataWriter::
587_writeConstituentsGroups()
597 m_materials_groups.add(group_info_ref);
599 _initializeItemGroupCollectiveInfos(group_info);
600 ConstArrayView<Int32> groups_ids = cells.view().localIds();
601 DatasetGroupAndName dataset_group_name(m_top_group, String(
"Constituent_") + cells.name());
603 info() <<
"Writing infos for group '" << cells.name() <<
"'";
604 _writeDataSet1DCollective<Int32>({ dataset_group_name, m_cell_offset_info }, groups_ids);
623 Int64 dim1_size = local_size;
626 Int64 total_size = 0;
627 for (
Integer i = 0; i < nb_rank; ++i)
628 total_size += all_sizes[i];
631 for (
Integer i = 0; i < my_rank; ++i)
632 my_index += all_sizes[i];
635 part_info.setTotalSize(total_size);
636 part_info.setSize(local_size);
637 part_info.setOffset(my_index);
644void VtkHdfV2DataWriter::
645_initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo& group_info)
647 Int64 dim1_size = group_info.m_item_group.size();
653 std::pair<Int64, Int64> _getInterval(Int64 index, Int64 nb_interval, Int64 total_size)
655 Int64 n = total_size;
656 Int64 isize = n / nb_interval;
657 Int64 ibegin = index * isize;
659 if ((index + 1) == nb_interval)
661 return { ibegin, isize };
673void VtkHdfV2DataWriter::
677 const hid_t hdf_type,
bool is_collective)
682 HGroup& group = data_info.dataset.group;
683 const String& name = data_info.dataset.name;
687 Int64 wanted_offset = data_info.datasetInfo().
offset();
689 static constexpr int MAX_DIM = 2;
699 local_dims[0] = dim1_size;
700 local_dims[1] = dim2_size;
708 max_dims[0] = H5S_UNLIMITED;
709 max_dims[1] = dim2_size;
712 Int64 write_offset = 0;
715 Int64 global_dim1_size = dim1_size;
716 Int32 nb_participating_rank = 1;
719 nb_participating_rank =
m_mesh->parallelMng()->commSize();
721 if (data_info.m_group_info) {
724 part_info = data_info.m_group_info->writePartInfo();
729 global_dim1_size = part_info.totalSize();
730 my_index = part_info.offset();
744 if (m_is_first_call) {
747 global_dims[0] = global_dim1_size;
748 global_dims[1] = dim2_size;
750 Int64 chunk_size = global_dim1_size / nb_participating_rank;
751 if (chunk_size < 1024)
753 const Int64 max_chunk_size = 1024 * 1024 * 10;
754 chunk_size =
math::min(chunk_size, max_chunk_size);
755 chunk_dims[0] = chunk_size;
756 chunk_dims[1] = dim2_size;
757 info() <<
"CHUNK nb_dim=" << nb_dim
758 <<
" global_dim1_size=" << global_dim1_size
759 <<
" chunk0=" << chunk_dims[0]
760 <<
" chunk1=" << chunk_dims[1]
762 file_space.createSimple(nb_dim, global_dims.data(), max_dims.data());
764 plist_id.create(H5P_DATASET_CREATE);
765 H5Pset_chunk(plist_id.id(), nb_dim, chunk_dims.data());
766 dataset.create(group, name.localstr(), hdf_type, file_space,
HProperty{}, plist_id,
HProperty{});
769 hyperslab_offsets[0] = my_index;
770 hyperslab_offsets[1] = 0;
776 dataset.open(group, name.
localstr());
777 file_space = dataset.getSpace();
778 int nb_dimension = file_space.nbDimension();
779 if (nb_dimension != nb_dim)
784 file_space.getDimensions(original_dims.data(),
nullptr);
785 hsize_t offset0 = original_dims[0];
788 if (wanted_offset >= 0) {
789 offset0 = wanted_offset;
790 info() <<
"Forcing offset to " << wanted_offset;
792 global_dims[0] = offset0 + global_dim1_size;
793 global_dims[1] = dim2_size;
794 write_offset = offset0;
797 if ((herror = dataset.setExtent(global_dims.data())) < 0)
799 file_space = dataset.getSpace();
801 hyperslab_offsets[0] = offset0 + my_index;
802 hyperslab_offsets[1] = 0;
803 info(4) <<
"APPEND nb_dim=" << nb_dim
804 <<
" dim0=" << global_dims[0]
805 <<
" count0=" << local_dims[0]
806 <<
" offsets0=" << hyperslab_offsets[0] <<
" name=" << name;
813 Int64 nb_interval = 1;
817 info(4) <<
"WRITE global_size=" << nb_write_byte <<
" max_size=" <<
m_max_write_size <<
" nb_interval=" << nb_interval;
819 for (
Int64 i = 0; i < nb_interval; ++i) {
820 auto [index, nb_element] = _getInterval(i, nb_interval, dim1_size);
823 dims[0] = nb_element;
826 offsets[0] = hyperslab_offsets[0] + index;
828 if ((herror = H5Sselect_hyperslab(file_space.id(), H5S_SELECT_SET, offsets.data(),
nullptr, dims.data(),
nullptr)) < 0)
832 memory_space.createSimple(nb_dim, dims.data());
835 if ((herror = dataset.write(hdf_type, values_data.
data() + data_offset, memory_space, file_space, write_plist_id)) < 0)
842 if (!data_info.datasetInfo().isNull())
843 m_offset_info_list.insert(std::make_pair(data_info.datasetInfo(), write_offset));
849template <
typename DataType>
void VtkHdfV2DataWriter::
850_writeDataSetGeneric(
const DataInfo& data_info,
Int32 nb_dim,
851 Int64 dim1_size,
Int64 dim2_size,
const DataType* values_data,
854 const hid_t hdf_type = m_standard_types.nativeType(DataType{});
855 ConstMemoryView mem_view =
makeConstMemoryView(values_data,
sizeof(DataType), dim1_size * dim2_size);
856 _writeDataSetGeneric(data_info, nb_dim, dim1_size, dim2_size, mem_view, hdf_type, is_collective);
862template <
typename DataType>
void VtkHdfV2DataWriter::
865 _writeDataSetGeneric(data_info, 1, values.size(), 1, values.data(),
false);
871template <
typename DataType>
void VtkHdfV2DataWriter::
874 _writeDataSetGeneric(data_info, 1, values.size(), 1, values.data(),
true);
880template <
typename DataType>
void VtkHdfV2DataWriter::
884 return _writeDataSet1D(data_info, values);
886 if (m_is_collective_io) {
887 IParallelMng* pm =
m_mesh->parallelMng();
888 if (!pm->isHybridImplementation()) {
889 return _writeDataSet1DUsingCollectiveIO(data_info, values);
893 Int64 size = values.size();
894 ArrayView size_value(1, &size);
895 pm->send(size_value, m_writer);
896 pm->send(values.constSmallView(), m_writer);
897 return _writeDataSet1DUsingCollectiveIO(data_info, Span<const DataType>{});
900 UniqueArray<DataType> all_values = values;
901 Int32 nb_sender = pm->_internalApi()->nbSendersToMasterParallelIO();
903 ArrayView s_recv_size(1, &recv_size);
905 for (
Int32 rank = m_writer + 1; rank < m_writer + nb_sender; ++rank) {
906 pm->recv(s_recv_size, rank);
908 Int64 old_size = all_values.size();
909 all_values.resizeNoInit(old_size + recv_size);
910 ArrayView recv_elem = all_values.subView(old_size, recv_size);
912 pm->recv(recv_elem, rank);
914 return _writeDataSet1DUsingCollectiveIO(data_info, all_values.constSpan());
919 UniqueArray<DataType> all_values;
920 IParallelMng* pm =
m_mesh->parallelMng();
921 pm->gatherVariable(values.smallView(), all_values, m_writer);
923 _writeDataSet1D<DataType>(data_info, all_values);
929template <
typename DataType>
void VtkHdfV2DataWriter::
932 _writeDataSetGeneric(data_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
false);
938template <
typename DataType>
void VtkHdfV2DataWriter::
941 _writeDataSetGeneric(data_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
true);
947template <
typename DataType>
void VtkHdfV2DataWriter::
951 return _writeDataSet2D(data_info, values);
953 if (m_is_collective_io) {
954 IParallelMng* pm =
m_mesh->parallelMng();
955 if (!pm->isHybridImplementation()) {
956 return _writeDataSet2DUsingCollectiveIO(data_info, values);
959 Span<const DataType> values_1d(values.data(), values.totalNbElement());
962 Int64 size = values.totalNbElement();
963 ArrayView size_value(1, &size);
964 pm->send(size_value, m_writer);
965 pm->send(values_1d.smallView(), m_writer);
966 return _writeDataSet2DUsingCollectiveIO(data_info, Span2<const DataType>{});
970 UniqueArray<DataType> all_values = values_1d;
971 Int32 nb_sender = pm->_internalApi()->nbSendersToMasterParallelIO();
973 ArrayView s_recv_size(1, &recv_size);
975 for (
Int32 rank = m_writer + 1; rank < m_writer + nb_sender; ++rank) {
976 pm->recv(s_recv_size, rank);
978 Int64 old_size = all_values.size();
979 all_values.resizeNoInit(old_size + recv_size);
980 ArrayView recv_elem = all_values.subView(old_size, recv_size);
982 pm->recv(recv_elem, rank);
984 Int64 dim1_size = all_values.size();
985 Int64 dim2_size = values.dim2Size();
987 dim1_size = dim1_size / dim2_size;
989 Span2<const DataType> span2(all_values.data(), dim1_size, dim2_size);
990 return _writeDataSet2DUsingCollectiveIO(data_info, span2);
995 Int64 dim2_size = values.dim2Size();
996 UniqueArray<DataType> all_values;
997 IParallelMng* pm =
m_mesh->parallelMng();
998 Span<const DataType> values_1d(values.data(), values.totalNbElement());
999 pm->gatherVariable(values_1d.smallView(), all_values, m_writer);
1001 Int64 dim1_size = all_values.size();
1003 dim1_size = dim1_size / dim2_size;
1004 Span2<const DataType> span2(all_values.data(), dim1_size, dim2_size);
1005 return _writeDataSet2D<DataType>(data_info, span2);
1012void VtkHdfV2DataWriter::
1015 hsize_t
len = values.size();
1016 hid_t aid = H5Screate_simple(1, &len,
nullptr);
1017 hid_t attr = H5Acreate2(hid.id(), name, H5T_NATIVE_INT64, aid, H5P_DEFAULT, H5P_DEFAULT);
1020 int ret = H5Awrite(attr, H5T_NATIVE_INT64, values.data());
1030void VtkHdfV2DataWriter::
1031_addInt64Attribute(
Hid& hid,
const char* name,
Int64 value)
1033 HSpace aid(H5Screate(H5S_SCALAR));
1035 if (m_is_first_call)
1036 attr.create(hid, name, H5T_NATIVE_INT64, aid);
1038 attr.open(hid, name);
1041 herr_t ret = attr.write(H5T_NATIVE_INT64, &value);
1049Int64 VtkHdfV2DataWriter::
1050_readInt64Attribute(
Hid& hid,
const char* name)
1053 attr.open(hid, name);
1057 herr_t ret = attr.read(H5T_NATIVE_INT64, &value);
1066void VtkHdfV2DataWriter::
1067_addStringAttribute(
Hid& hid,
const char* name,
const String& value)
1069 hid_t aid = H5Screate(H5S_SCALAR);
1070 hid_t attr_type = H5Tcopy(H5T_C_S1);
1071 H5Tset_size(attr_type, value.length());
1072 hid_t attr = H5Acreate2(hid.id(), name, attr_type, aid, H5P_DEFAULT, H5P_DEFAULT);
1075 int ret = H5Awrite(attr, attr_type, value.localstr());
1076 ret = H5Tclose(attr_type);
1086void VtkHdfV2DataWriter::
1092 for (
const auto& i : m_offset_info_list) {
1093 Int64 offset = i.second;
1095 HGroup* hdf_group = offset_info.group();
1098 _writeDataSet1D<Int64>({ { *hdf_group, offset_info.name() }, m_time_offset_info }, asConstSpan(&offset));
1108void VtkHdfV2DataWriter::
1109_openOrCreateGroups()
1112 m_top_group.openOrCreate(
m_file_id,
"VTKHDF");
1113 m_cell_data_group.openOrCreate(m_top_group,
"CellData");
1114 m_node_data_group.openOrCreate(m_top_group,
"PointData");
1115 m_steps_group.openOrCreate(m_top_group,
"Steps");
1116 m_point_data_offsets_group.openOrCreate(m_steps_group,
"PointDataOffsets");
1117 m_cell_data_offsets_group.openOrCreate(m_steps_group,
"CellDataOffsets");
1118 m_field_data_offsets_group.openOrCreate(m_steps_group,
"FieldDataOffsets");
1124void VtkHdfV2DataWriter::
1127 m_cell_data_group.close();
1128 m_node_data_group.close();
1129 m_point_data_offsets_group.close();
1130 m_cell_data_offsets_group.close();
1131 m_field_data_offsets_group.close();
1132 m_steps_group.close();
1133 m_top_group.close();
1142 ARCANE_UNUSED(meta_data);
1151 info(4) <<
"Write VtkHdfV2 var=" << var->
name();
1156 ARCANE_FATAL(
"Only export of scalar item variable is implemented (name={0})", var->
name());
1158 ARCANE_FATAL(
"Export of partial variable is not implemented");
1163 switch (item_kind) {
1165 group = &m_cell_data_group;
1166 offset_info = m_cell_offset_info;
1167 group_info = &m_all_cells_info;
1170 group = &m_node_data_group;
1171 offset_info = m_point_offset_info;
1172 group_info = &m_all_nodes_info;
1175 ARCANE_FATAL(
"Only export of 'Cell' or 'Node' variable is implemented (name={0})", var->
name());
1182 switch (data_type) {
1184 _writeBasicTypeDataset<Real>(data_info, data);
1187 _writeBasicTypeDataset<Int64>(data_info, data);
1190 _writeBasicTypeDataset<Int32>(data_info, data);
1193 _writeReal3Dataset(data_info, data);
1196 _writeReal2Dataset(data_info, data);
1199 warning() << String::format(
"Export for datatype '{0}' is not supported (var_name={1})", data_type, var->
name());
1206template <
typename DataType>
void VtkHdfV2DataWriter::
1207_writeBasicTypeDataset(
const DataInfo& data_info,
IData* data)
1217void VtkHdfV2DataWriter::
1218_writeReal3Dataset(
const DataInfo& data_info, IData* data)
1220 auto* true_data =
dynamic_cast<IArrayDataT<Real3>*
>(data);
1222 SmallSpan<const Real3> values(true_data->view());
1223 Int32 nb_value = values.size();
1225 UniqueArray2<Real> scalar_values;
1226 scalar_values.resize(nb_value, 3);
1227 for (Int32 i = 0; i < nb_value; ++i) {
1228 Real3 v = values[i];
1229 scalar_values[i][0] = v.x;
1230 scalar_values[i][1] = v.y;
1231 scalar_values[i][2] = v.z;
1233 _writeDataSet2DCollective<Real>(data_info, scalar_values);
1239void VtkHdfV2DataWriter::
1240_writeReal2Dataset(
const DataInfo& data_info,
IData* data)
1243 auto* true_data =
dynamic_cast<IArrayDataT<Real2>*
>(data);
1245 SmallSpan<const Real2> values(true_data->view());
1246 Int32 nb_value = values.size();
1247 UniqueArray2<Real> scalar_values;
1248 scalar_values.resize(nb_value, 3);
1249 for (
Int32 i = 0; i < nb_value; ++i) {
1250 Real2 v = values[i];
1251 scalar_values[i][0] = v.x;
1252 scalar_values[i][1] = v.y;
1253 scalar_values[i][2] = 0.0;
1255 _writeDataSet2DCollective<Real>(data_info, scalar_values);
1261void VtkHdfV2DataWriter::
1262_readAndSetOffset(DatasetInfo& offset_info,
Int32 wanted_step)
1264 HGroup* hgroup = offset_info.group();
1266 StandardArrayT<Int64> a(hgroup->id(), offset_info.name());
1267 UniqueArray<Int64> values;
1268 a.directRead(m_standard_types, values);
1269 Int64 offset_value = values[wanted_step];
1270 offset_info.setOffset(offset_value);
1271 info() <<
"VALUES name=" << offset_info.name() <<
" values=" << values
1272 <<
" wanted_step=" << wanted_step <<
" v=" << offset_value;
1278void VtkHdfV2DataWriter::
1299 m_cell_offset_info =
DatasetInfo(m_steps_group,
"CellOffsets");
1300 m_point_offset_info =
DatasetInfo(m_steps_group,
"PointOffsets");
1301 m_connectivity_offset_info =
DatasetInfo(m_steps_group,
"ConnectivityIdOffsets");
1303 m_offset_for_cell_offset_info =
DatasetInfo(
"_OffsetForCellOffsetInfo");
1304 m_part_offset_info =
DatasetInfo(
"_PartOffsetInfo");
1305 m_time_offset_info =
DatasetInfo(
"_TimeOffsetInfo");
1310 if (m_is_writer && !m_is_first_call) {
1311 IParallelMng* pm =
m_mesh->parallelMng();
1312 const Int32 nb_rank = pm->commSize();
1313 Int64 nb_current_step = _readInt64Attribute(m_steps_group,
"NSteps");
1315 info(4) <<
"NB_STEP=" << nb_current_step <<
" time_index=" << time_index
1316 <<
" current_time=" <<
m_times.back();
1317 const bool debug_times =
false;
1319 StandardArrayT<Real> a1(m_steps_group.id(),
"Values");
1320 UniqueArray<Real> times;
1321 a1.directRead(m_standard_types, times);
1322 info() <<
"TIMES=" << times;
1324 if ((nb_current_step + 1) != time_index) {
1325 info() <<
"[VtkHdf] go_backward detected";
1326 Int32 wanted_step = time_index - 1;
1329 _readAndSetOffset(m_cell_offset_info, wanted_step);
1330 _readAndSetOffset(m_point_offset_info, wanted_step);
1331 _readAndSetOffset(m_connectivity_offset_info, wanted_step);
1332 m_part_offset_info.setOffset(wanted_step * nb_rank);
1333 m_time_offset_info.setOffset(wanted_step);
1334 m_offset_for_cell_offset_info.setOffset(m_cell_offset_info.offset() + wanted_step * nb_rank);
1347class VtkHdfV2PostProcessor
1360 bool use_collective_io =
true;
1361 Int64 max_write_size = 0;
1363 use_collective_io =
options()->useCollectiveWrite();
1364 max_write_size =
options()->maxWriteSize();
1366 auto w = std::make_unique<VtkHdfV2DataWriter>(
mesh(),
groups(), use_collective_io);
1367 w->setMaxWriteSize(max_write_size);
1368 w->setTimes(
times());
1370 w->setDirectoryName(dir.
file(
"vtkhdfv2"));
1371 m_writer = std::move(w);
1381 std::unique_ptr<IDataWriter> m_writer;
1387ARCANE_REGISTER_SERVICE_VTKHDFV2POSTPROCESSOR(VtkHdfV2PostProcessor,
1388 VtkHdfV2PostProcessor);
#define ARCANE_CHECK_POINTER(ptr)
Macro retournant le pointeur ptr s'il est non nul ou lancant une exception s'il est nul.
#define ARCANE_THROW(exception_class,...)
Macro pour envoyer une exception avec formattage.
#define ARCANE_FATAL(...)
Macro envoyant une exception FatalErrorException.
CaseOptionsVtkHdfV2PostProcessor * options() const
Options du jeu de données du service.
ArcaneVtkHdfV2PostProcessorObject(const Arcane::ServiceBuildInfo &sbi)
Constructeur.
Vue modifiable d'un tableau d'un type T.
Vue constante d'un tableau de type T.
Vue constante sur une zone mémoire contigue contenant des éléments de taille fixe.
constexpr Int32 datatypeSize() const
Taille du type de donnée associé (1 par défaut)
constexpr const std::byte * data() const
Pointeur sur la zone mémoire.
Classe gérant un répertoire.
String file(const String &file_name) const override
Retourne le chemin complet du fichier file_name dans le répertoire.
Tableau 1D de taille fixe.
Encapsule un hid_t pour un dataset.
Encapsule un hid_t pour un fichier.
Encapsule un hid_t pour un groupe.
static void useMutex(bool is_active, IParallelMng *pm)
Fonction permettant d'activer ou de désactiver les verrous à chaque appel à HDF5.
static bool hasParallelHdf5()
Vrai HDF5 est compilé avec le support de MPI.
Encapsule un hid_t pour une propriété (H5P*).
void createDatasetTransfertCollectiveMPIIO()
Créé une propriété de dataset pour MPIIO.
Encapsule un hid_t pour un dataspace.
Définition des types standards Arcane pour hdf5.
Interface d'une donnée tableau d'un type T.
Interface d'écriture des données d'une variable.
Exception lorsqu'une erreur d'entrée/sortie est détectée.
Interface du gestionnaire de parallélisme pour un sous-domaine.
virtual Int32 commRank() const =0
Rang de cette instance dans le communicateur.
virtual Int32 commSize() const =0
Nombre d'instance dans le communicateur.
virtual void allGather(ConstArrayView< char > send_buf, ArrayView< char > recv_buf)=0
Effectue un regroupement sur tous les processeurs. Il s'agit d'une opération collective....
Interface d'une variable.
virtual eDataType dataType() const =0
Type de la donnée gérée par la variable (Real, Integer, ...)
virtual eItemKind itemKind() const =0
Genre des entités du maillage sur lequel repose la variable.
virtual bool isPartial() const =0
Indique si la variable est partielle.
virtual Integer dimension() const =0
Dimension de la variable.
virtual String name() const =0
Nom de la variable.
Groupe d'entités de maillage.
Interface du gestionnaire des matériaux et des milieux d'un maillage.
static IMeshMaterialMng * getReference(const MeshHandleOrMesh &mesh_handle, bool create=true)
Récupère ou créé la référence associée à mesh.
ItemGroupCollection groups() override
Liste des groupes à sauver.
ConstArrayView< Real > times() override
Liste des temps sauvés.
const String & baseDirectoryName() override
Nom du répertoire de sortie des fichiers.
Structure contenant les informations pour créer un service.
Vue pour un tableau 2D dont la taille est un 'Int64'.
Vue d'un tableau d'éléments de type T.
Constructeur de chaîne de caractère unicode.
Chaîne de caractères unicode.
const char * localstr() const
Retourne la conversion de l'instance dans l'encodage UTF-8.
TraceAccessor(ITraceMng *m)
Construit un accesseur via le gestionnaire de trace m.
TraceMessage info() const
Flot pour un message d'information.
TraceMessage warning() const
Flot pour un message d'avertissement.
Vecteur 1D de données avec sémantique par valeur (style STL).
void write(IVariable *var, IData *data) override
Ecrit les données data de la variable var.
IMeshMaterialMng * m_material_mng
Gestionnaire de matériaux associé (peut-être nul)
Int64 m_max_write_size
Taille maximale (en kilo-octet) pour une écriture.
String m_directory_name
Répertoire de sortie.
WritePartInfo _computeWritePartInfo(Int64 local_size)
Calcule l'offset de notre partie et le nombre total d'éléments.
IMesh * m_mesh
Maillage associé
HFile m_file_id
Identifiant HDF du fichier.
ItemGroupCollection m_groups
Liste des groupes à sauver.
UniqueArray< Real > m_times
Liste des temps.
void setMetaData(const String &meta_data) override
Positionne les infos des méta-données.
String m_full_filename
Nom du fichier HDF courant.
IDataWriter * dataWriter() override
Retourne l'écrivain associé à ce post-processeur.
void close() override
Ferme l'écrivain. Après fermeture, il ne peut plus être utilisé
void notifyBeginWrite() override
Notifie qu'une sortie va être effectuée avec les paramètres courants.
void notifyEndWrite() override
Notifie qu'une sortie vient d'être effectuée.
__host__ __device__ Real2 min(Real2 a, Real2 b)
Retourne le minimum de deux Real2.
ItemGroupT< Cell > CellGroup
Groupe de mailles.
ItemGroupT< Node > NodeGroup
Groupe de noeuds.
MeshVariableScalarRefT< Node, Real3 > VariableNodeReal3
Grandeur au noeud de type coordonnées.
Integer len(const char *s)
Retourne la longueur de la chaîne s.
Fonctions utilitaires pour Hdf5.
Active toujours les traces dans les parties Arcane concernant les matériaux.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Ref< TrueType > createRef(Args &&... args)
Créé une instance de type TrueType avec les arguments Args et retourne une référence dessus.
Collection< ItemGroup > ItemGroupCollection
Collection de groupes d'éléments du maillage.
ConstMemoryView makeConstMemoryView(const void *ptr, Int32 datatype_size, Int64 nb_element)
Créé une vue mémoire en lecture seule.
std::int64_t Int64
Type entier signé sur 64 bits.
Int32 Integer
Type représentant un entier.
bool operator<(const Item &item1, const Item &item2)
Compare deux entités.
eItemKind
Genre d'entité de maillage.
@ IK_Node
Entité de maillage de genre noeud.
@ IK_Cell
Entité de maillage de genre maille.
double Real
Type représentant un réel.
unsigned char Byte
Type d'un octet.
eDataType
Type d'une donnée.
@ DT_Int32
Donnée de type entier 32 bits.
@ DT_Real3
Donnée de type vecteur 3.
@ DT_Int64
Donnée de type entier 64 bits.
@ DT_Real2
Donnée de type vecteur 2.
@ DT_Real
Donnée de type réel.
@ Cell
Le maillage est AMR par maille.
std::int32_t Int32
Type entier signé sur 32 bits.
ConstArrayView< Real > RealConstArrayView
Equivalent C d'un tableau à une dimension de réels.
Conserve les infos sur les données à sauver et l'offset associé.
Classe pour conserver un couple (hdf_group,nom_du_dataset).
Classe pour conserver les information d'un offset.
Int64 offset() const
Valeur de l'offset. (-1) si on écrit à la fin du tableau.
Informations collectives sur un ItemGroup;.
WritePartInfo m_write_part_info
Informations sur l'écriture.
ItemGroup m_item_group
Groupe associé
Informations sur l'offset de la partie à écrire associée à un rang.
Int64 m_offset
Offset de mon rang.
Int64 m_size
Nombre d'éléments de mon rang.
Int64 m_total_size
Nombre d'éléments sur tous les rangs.