14#include "arcane/utils/Collection.h"
15#include "arcane/utils/Enumerator.h"
16#include "arcane/utils/Iostream.h"
17#include "arcane/utils/StringBuilder.h"
18#include "arcane/utils/IOException.h"
19#include "arcane/utils/FixedArray.h"
20#include "arcane/utils/MemoryView.h"
22#include "arcane/core/PostProcessorWriterBase.h"
23#include "arcane/core/Directory.h"
24#include "arcane/core/FactoryService.h"
25#include "arcane/core/IDataWriter.h"
26#include "arcane/core/IData.h"
27#include "arcane/core/IItemFamily.h"
28#include "arcane/core/VariableCollection.h"
29#include "arcane/core/IParallelMng.h"
30#include "arcane/core/IMesh.h"
31#include "arcane/core/internal/VtkCellTypes.h"
33#include "arcane/core/materials/IMeshMaterialMng.h"
34#include "arcane/core/materials/IMeshEnvironment.h"
36#include "arcane/hdf5/Hdf5Utils.h"
37#include "arcane/hdf5/VtkHdfV2PostProcessor_axl.h"
71using namespace Hdf5Utils;
76 template <
typename T> Span<const T>
77 asConstSpan(
const T* v)
79 return Span<const T>(v, 1);
134 bool isNull()
const {
return m_name.
null(); }
136 HGroup* group()
const {
return m_group; }
137 const String& name()
const {
return m_name; }
139 Int64
offset()
const {
return m_offset; }
140 void setOffset(Int64 v) { m_offset = v; }
141 friend bool operator<(
const DatasetInfo&
s1,
const DatasetInfo&
s2)
143 return (
s1.m_name <
s2.m_name);
148 HGroup* m_group =
nullptr;
158 void setTotalSize(Int64 v) { m_total_size = v; }
159 void setSize(Int64 v) { m_size = v; }
160 void setOffset(Int64 v) { m_offset = v; }
162 Int64 totalSize()
const {
return m_total_size; }
163 Int64 size()
const {
return m_size; }
164 Int64 offset()
const {
return m_offset; }
169 Int64 m_total_size = 0;
220 DatasetInfo datasetInfo()
const {
return m_dataset_info; }
236 void endWrite()
override;
244 void setMaxWriteSize(Int64 v) { m_max_write_size = v; }
249 IMesh* m_mesh =
nullptr;
252 IMeshMaterialMng* m_material_mng =
nullptr;
258 UniqueArray<Real> m_times;
261 String m_full_filename;
264 String m_directory_name;
270 HGroup m_cell_data_group;
271 HGroup m_node_data_group;
273 HGroup m_steps_group;
274 HGroup m_point_data_offsets_group;
275 HGroup m_cell_data_offsets_group;
276 HGroup m_field_data_offsets_group;
278 bool m_is_parallel =
false;
279 bool m_is_master_io =
false;
280 bool m_is_collective_io =
false;
281 bool m_is_first_call =
false;
282 bool m_is_writer =
false;
284 DatasetInfo m_cell_offset_info;
285 DatasetInfo m_point_offset_info;
286 DatasetInfo m_connectivity_offset_info;
287 DatasetInfo m_offset_for_cell_offset_info;
288 DatasetInfo m_part_offset_info;
289 DatasetInfo m_time_offset_info;
290 std::map<DatasetInfo, Int64> m_offset_info_list;
292 StandardTypes m_standard_types{
false };
294 ItemGroupCollectiveInfo m_all_cells_info;
295 ItemGroupCollectiveInfo m_all_nodes_info;
296 UniqueArray<Ref<ItemGroupCollectiveInfo>> m_materials_groups;
304 Int64 m_max_write_size = 0;
308 void _addInt64ArrayAttribute(Hid& hid,
const char* name, Span<const Int64> values);
309 void _addStringAttribute(Hid& hid,
const char* name,
const String& value);
311 template <
typename DataType>
void
312 _writeDataSet1D(
const DataInfo& data_info, Span<const DataType> values);
313 template <
typename DataType>
void
314 _writeDataSet1DUsingCollectiveIO(
const DataInfo& data_info, Span<const DataType> values);
315 template <
typename DataType>
void
316 _writeDataSet1DCollective(
const DataInfo& data_info, Span<const DataType> values);
317 template <
typename DataType>
void
318 _writeDataSet2D(
const DataInfo& data_info, Span2<const DataType> values);
319 template <
typename DataType>
void
320 _writeDataSet2DUsingCollectiveIO(
const DataInfo& data_info, Span2<const DataType> values);
321 template <
typename DataType>
void
322 _writeDataSet2DCollective(
const DataInfo& data_info, Span2<const DataType> values);
323 template <
typename DataType>
void
324 _writeBasicTypeDataset(
const DataInfo& data_info, IData* data);
325 void _writeReal3Dataset(
const DataInfo& data_info, IData* data);
326 void _writeReal2Dataset(
const DataInfo& data_info, IData* data);
328 String _getFileName()
330 StringBuilder sb(m_mesh->name());
332 return sb.toString();
334 template <
typename DataType>
void
335 _writeDataSetGeneric(
const DataInfo& data_info,
Int32 nb_dim,
336 Int64 dim1_size, Int64 dim2_size,
const DataType* values_data,
338 void _writeDataSetGeneric(
const DataInfo& data_info,
Int32 nb_dim,
339 Int64 dim1_size, Int64 dim2_size, ConstMemoryView values_data,
340 const hid_t hdf_datatype_type,
bool is_collective);
341 void _addInt64Attribute(Hid& hid,
const char* name, Int64 value);
342 Int64 _readInt64Attribute(Hid& hid,
const char* name);
343 void _openOrCreateGroups();
345 void _readAndSetOffset(DatasetInfo& offset_info,
Int32 wanted_step);
346 void _initializeOffsets();
347 void _initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo& group_info);
348 WritePartInfo _computeWritePartInfo(Int64 local_size);
349 void _writeConstituentsGroups();
357: TraceAccessor(mesh->traceMng())
360, m_is_collective_io(is_collective_io)
361, m_all_cells_info(mesh->allCells())
362, m_all_nodes_info(mesh->allNodes())
369void VtkHdfV2DataWriter::
370beginWrite(
const VariableCollection& vars)
377 IParallelMng* pm = m_mesh->parallelMng();
378 const Int32 nb_rank = pm->commSize();
379 m_is_parallel = nb_rank > 1;
380 m_is_master_io = pm->isMasterIO();
382 Int32 time_index = m_times.size();
383 const bool is_first_call = (time_index < 2);
384 m_is_first_call = is_first_call;
386 info() <<
"WARNING: L'implémentation au format 'VtkHdfV2' est expérimentale";
388 String filename = _getFileName();
390 Directory dir(m_directory_name);
392 m_full_filename = dir.file(filename);
393 info(4) <<
"VtkHdfV2DataWriter::beginWrite() file=" << m_full_filename;
401 if (pm->isHybridImplementation() || pm->isThreadImplementation())
402 m_is_collective_io =
false;
405 info() <<
"VtkHdfV2DataWriter: using collective MPI/IO ?=" << m_is_collective_io;
406 info() <<
"VtkHdfV2DataWriter: max_write_size (kB) =" << m_max_write_size;
407 info() <<
"VtkHdfV2DataWriter: has_material?=" << (m_material_mng !=
nullptr);
414 m_is_writer = m_is_master_io || m_is_collective_io;
418 if (m_is_collective_io)
419 plist_id.createFilePropertyMPIIO(pm);
421 if (is_first_call && m_is_master_io)
422 dir.createDirectory();
424 if (m_is_collective_io)
431 m_file_id.openTruncate(m_full_filename, plist_id.id());
433 m_file_id.openAppend(m_full_filename, plist_id.id());
435 _openOrCreateGroups();
438 std::array<Int64, 2> version = { 2, 0 };
439 _addInt64ArrayAttribute(m_top_group,
"Version", version);
440 _addStringAttribute(m_top_group,
"Type",
"UnstructuredGrid");
445 _initializeItemGroupCollectiveInfos(m_all_cells_info);
446 _initializeItemGroupCollectiveInfos(m_all_nodes_info);
448 CellGroup all_cells = m_mesh->allCells();
449 NodeGroup all_nodes = m_mesh->allNodes();
451 const Int32 nb_cell = all_cells.size();
452 const Int32 nb_node = all_nodes.size();
454 Int32 total_nb_connected_node = 0;
457 total_nb_connected_node += cell.nodeIds().size();
462 UniqueArray<Int64> cells_connectivity(total_nb_connected_node);
463 UniqueArray<Int64> cells_offset(nb_cell + 1);
464 UniqueArray<unsigned char> cells_ghost_type(nb_cell);
465 UniqueArray<unsigned char> cells_type(nb_cell);
466 UniqueArray<Int64> cells_uid(nb_cell);
469 Int32 connected_node_index = 0;
471 Int32 index = icell.index();
474 cells_uid[index] = cell.uniqueId();
477 bool is_ghost = !cell.isOwn();
479 ghost_type = VtkUtils::CellGhostTypes::DUPLICATECELL;
480 cells_ghost_type[index] = ghost_type;
482 unsigned char vtk_type = VtkUtils::arcaneToVtkCellType(cell.type());
483 cells_type[index] = vtk_type;
484 for (NodeLocalId node : cell.nodeIds()) {
485 cells_connectivity[connected_node_index] = node;
486 ++connected_node_index;
488 cells_offset[index + 1] = connected_node_index;
492 _initializeOffsets();
495 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Offsets" }, m_offset_for_cell_offset_info }, cells_offset);
497 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Connectivity" }, m_connectivity_offset_info },
499 _writeDataSet1DCollective<unsigned char>({ { m_top_group,
"Types" }, m_cell_offset_info }, cells_type);
502 Int64 nb_cell_int64 = nb_cell;
503 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfCells" }, m_part_offset_info },
504 asConstSpan(&nb_cell_int64));
505 Int64 nb_node_int64 = nb_node;
506 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfPoints" }, m_part_offset_info },
507 asConstSpan(&nb_node_int64));
508 Int64 number_of_connectivity_ids = cells_connectivity.size();
509 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfConnectivityIds" }, m_part_offset_info },
510 asConstSpan(&number_of_connectivity_ids));
515 UniqueArray<Int64> nodes_uid(nb_node);
516 UniqueArray<unsigned char> nodes_ghost_type(nb_node);
518 UniqueArray2<Real> points;
519 points.resize(nb_node, 3);
521 Int32 index = inode.index();
524 nodes_uid[index] = node.uniqueId();
527 bool is_ghost = !node.isOwn();
529 ghost_type = VtkUtils::PointGhostTypes::DUPLICATEPOINT;
530 nodes_ghost_type[index] = ghost_type;
532 Real3 pos = nodes_coordinates[inode];
533 points[index][0] = pos.x;
534 points[index][1] = pos.y;
535 points[index][2] = pos.z;
539 _writeDataSet1DCollective<Int64>({ { m_node_data_group,
"GlobalNodeId" }, m_cell_offset_info }, nodes_uid);
542 _writeDataSet1DCollective<unsigned char>({ { m_node_data_group,
"vtkGhostType" }, m_cell_offset_info }, nodes_ghost_type);
545 _writeDataSet2DCollective<Real>({ { m_top_group,
"Points" }, m_point_offset_info }, points);
549 _writeDataSet1DCollective<unsigned char>({ { m_cell_data_group,
"vtkGhostType" }, m_cell_offset_info }, cells_ghost_type);
553 _writeDataSet1DCollective<Int64>({ { m_cell_data_group,
"GlobalCellId" }, m_cell_offset_info }, cells_uid);
557 Real current_time = m_times[time_index - 1];
558 _writeDataSet1D<Real>({ { m_steps_group,
"Values" }, m_time_offset_info }, asConstSpan(¤t_time));
561 Int64 part_offset = (time_index - 1) * pm->commSize();
562 _writeDataSet1D<Int64>({ { m_steps_group,
"PartOffsets" }, m_time_offset_info }, asConstSpan(&part_offset));
565 _addInt64Attribute(m_steps_group,
"NSteps", time_index);
568 _writeConstituentsGroups();
574void VtkHdfV2DataWriter::
575_writeConstituentsGroups()
582 for (IMeshEnvironment* env : m_material_mng->environments()) {
584 Ref<ItemGroupCollectiveInfo> group_info_ref = createRef<ItemGroupCollectiveInfo>(cells);
585 m_materials_groups.add(group_info_ref);
586 ItemGroupCollectiveInfo& group_info = *group_info_ref.get();
587 _initializeItemGroupCollectiveInfos(group_info);
588 ConstArrayView<Int32> groups_ids = cells.view().localIds();
589 DatasetGroupAndName dataset_group_name(m_top_group, String(
"Constituent_") + cells.name());
591 info() <<
"Writing infos for group '" << cells.name() <<
"'";
592 _writeDataSet1DCollective<Int32>({ dataset_group_name, m_cell_offset_info }, groups_ids);
601VtkHdfV2DataWriter::WritePartInfo VtkHdfV2DataWriter::
602_computeWritePartInfo(Int64 local_size)
605 IParallelMng* pm = m_mesh->parallelMng();
606 Int32 nb_rank = pm->commSize();
607 Int32 my_rank = pm->commRank();
609 UniqueArray<Int64> ranks_size(nb_rank);
610 ArrayView<Int64> all_sizes(ranks_size);
611 Int64 dim1_size = local_size;
612 pm->allGather(ConstArrayView<Int64>(1, &dim1_size), all_sizes);
614 Int64 total_size = 0;
615 for (Integer i = 0; i < nb_rank; ++i)
616 total_size += all_sizes[i];
619 for (Integer i = 0; i < my_rank; ++i)
620 my_index += all_sizes[i];
622 WritePartInfo part_info;
623 part_info.setTotalSize(total_size);
624 part_info.setSize(local_size);
625 part_info.setOffset(my_index);
632void VtkHdfV2DataWriter::
633_initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo& group_info)
635 Int64 dim1_size = group_info.m_item_group.size();
636 group_info.setWritePartInfo(_computeWritePartInfo(dim1_size));
641 std::pair<Int64, Int64> _getInterval(Int64 index, Int64 nb_interval, Int64 total_size)
643 Int64 n = total_size;
644 Int64 isize = n / nb_interval;
645 Int64 ibegin = index * isize;
647 if ((index + 1) == nb_interval)
649 return { ibegin, isize };
661void VtkHdfV2DataWriter::
662_writeDataSetGeneric(
const DataInfo& data_info,
Int32 nb_dim,
663 Int64 dim1_size, Int64 dim2_size,
664 ConstMemoryView values_data,
665 const hid_t hdf_type,
bool is_collective)
670 HGroup& group = data_info.dataset.group;
671 const String& name = data_info.dataset.name;
675 Int64 wanted_offset = data_info.datasetInfo().offset();
677 static constexpr int MAX_DIM = 2;
686 FixedArray<hsize_t, MAX_DIM> local_dims;
687 local_dims[0] = dim1_size;
688 local_dims[1] = dim2_size;
691 FixedArray<hsize_t, MAX_DIM> global_dims;
695 FixedArray<hsize_t, MAX_DIM> max_dims;
696 max_dims[0] = H5S_UNLIMITED;
697 max_dims[1] = dim2_size;
700 Int64 write_offset = 0;
703 Int64 global_dim1_size = dim1_size;
704 Int32 nb_participating_rank = 1;
707 nb_participating_rank = m_mesh->parallelMng()->commSize();
708 WritePartInfo part_info;
709 if (data_info.m_group_info) {
712 part_info = data_info.m_group_info->writePartInfo();
715 part_info = _computeWritePartInfo(dim1_size);
717 global_dim1_size = part_info.totalSize();
718 my_index = part_info.offset();
721 HProperty write_plist_id;
723 write_plist_id.createDatasetTransfertCollectiveMPIIO();
726 FixedArray<hsize_t, MAX_DIM> hyperslab_offsets;
728 if (m_is_first_call) {
730 FixedArray<hsize_t, MAX_DIM> chunk_dims;
731 global_dims[0] = global_dim1_size;
732 global_dims[1] = dim2_size;
734 Int64 chunk_size = global_dim1_size / nb_participating_rank;
735 if (chunk_size < 1024)
737 const Int64 max_chunk_size = 1024 * 1024 * 10;
738 chunk_size =
math::min(chunk_size, max_chunk_size);
739 chunk_dims[0] = chunk_size;
740 chunk_dims[1] = dim2_size;
741 info() <<
"CHUNK nb_dim=" << nb_dim
742 <<
" global_dim1_size=" << global_dim1_size
743 <<
" chunk0=" << chunk_dims[0]
744 <<
" chunk1=" << chunk_dims[1]
746 file_space.createSimple(nb_dim, global_dims.data(), max_dims.data());
748 plist_id.create(H5P_DATASET_CREATE);
749 H5Pset_chunk(plist_id.id(), nb_dim, chunk_dims.data());
750 dataset.create(group, name.localstr(), hdf_type, file_space, HProperty{}, plist_id, HProperty{});
753 hyperslab_offsets[0] = my_index;
754 hyperslab_offsets[1] = 0;
760 dataset.open(group, name.localstr());
761 file_space = dataset.getSpace();
762 int nb_dimension = file_space.nbDimension();
763 if (nb_dimension != nb_dim)
764 ARCANE_THROW(IOException,
"Bad dimension '{0}' for dataset '{1}' (should be 1)",
767 FixedArray<hsize_t, MAX_DIM> original_dims;
768 file_space.getDimensions(original_dims.data(),
nullptr);
769 hsize_t offset0 = original_dims[0];
772 if (wanted_offset >= 0) {
773 offset0 = wanted_offset;
774 info() <<
"Forcing offset to " << wanted_offset;
776 global_dims[0] = offset0 + global_dim1_size;
777 global_dims[1] = dim2_size;
778 write_offset = offset0;
781 if ((herror = dataset.setExtent(global_dims.data())) < 0)
782 ARCANE_THROW(IOException,
"Can not extent dataset '{0}' (err={1})", name, herror);
783 file_space = dataset.getSpace();
785 hyperslab_offsets[0] = offset0 + my_index;
786 hyperslab_offsets[1] = 0;
787 info(4) <<
"APPEND nb_dim=" << nb_dim
788 <<
" dim0=" << global_dims[0]
789 <<
" count0=" << local_dims[0]
790 <<
" offsets0=" << hyperslab_offsets[0] <<
" name=" << name;
793 Int64 nb_write_byte = global_dim1_size * dim2_size * values_data.datatypeSize();
797 Int64 nb_interval = 1;
798 if (is_collective && m_max_write_size > 0) {
799 nb_interval = 1 + nb_write_byte / (m_max_write_size * 1024);
801 info(4) <<
"WRITE global_size=" << nb_write_byte <<
" max_size=" << m_max_write_size <<
" nb_interval=" << nb_interval;
803 for (Int64 i = 0; i < nb_interval; ++i) {
804 auto [index, nb_element] = _getInterval(i, nb_interval, dim1_size);
806 FixedArray<hsize_t, 2> dims;
807 dims[0] = nb_element;
809 FixedArray<hsize_t, 2> offsets;
810 offsets[0] = hyperslab_offsets[0] + index;
812 if ((herror = H5Sselect_hyperslab(file_space.id(), H5S_SELECT_SET, offsets.data(),
nullptr, dims.data(),
nullptr)) < 0)
813 ARCANE_THROW(IOException,
"Can not select hyperslab '{0}' (err={1})", name, herror);
816 memory_space.createSimple(nb_dim, dims.data());
817 Int64 data_offset = index * values_data.datatypeSize() * dim2_size;
819 if ((herror = dataset.write(hdf_type, values_data.data() + data_offset, memory_space, file_space, write_plist_id)) < 0)
820 ARCANE_THROW(IOException,
"Can not write dataset '{0}' (err={1})", name, herror);
823 ARCANE_THROW(IOException,
"Can not write dataset '{0}'", name);
826 if (!data_info.datasetInfo().isNull())
827 m_offset_info_list.insert(std::make_pair(data_info.datasetInfo(), write_offset));
833template <
typename DataType>
void VtkHdfV2DataWriter::
834_writeDataSetGeneric(
const DataInfo& data_info,
Int32 nb_dim,
835 Int64 dim1_size, Int64 dim2_size,
const DataType* values_data,
838 const hid_t hdf_type = m_standard_types.nativeType(DataType{});
839 ConstMemoryView mem_view = makeConstMemoryView(values_data,
sizeof(DataType), dim1_size * dim2_size);
840 _writeDataSetGeneric(data_info, nb_dim, dim1_size, dim2_size, mem_view, hdf_type, is_collective);
846template <
typename DataType>
void VtkHdfV2DataWriter::
847_writeDataSet1D(
const DataInfo& data_info, Span<const DataType> values)
849 _writeDataSetGeneric(data_info, 1, values.size(), 1, values.data(),
false);
855template <
typename DataType>
void VtkHdfV2DataWriter::
856_writeDataSet1DUsingCollectiveIO(
const DataInfo& data_info, Span<const DataType> values)
858 _writeDataSetGeneric(data_info, 1, values.size(), 1, values.data(),
true);
864template <
typename DataType>
void VtkHdfV2DataWriter::
865_writeDataSet1DCollective(
const DataInfo& data_info, Span<const DataType> values)
868 return _writeDataSet1D(data_info, values);
869 if (m_is_collective_io)
870 return _writeDataSet1DUsingCollectiveIO(data_info, values);
871 UniqueArray<DataType> all_values;
872 IParallelMng* pm = m_mesh->parallelMng();
873 pm->gatherVariable(values.smallView(), all_values, pm->masterIORank());
875 _writeDataSet1D<DataType>(data_info, all_values);
881template <
typename DataType>
void VtkHdfV2DataWriter::
882_writeDataSet2D(
const DataInfo& data_info, Span2<const DataType> values)
884 _writeDataSetGeneric(data_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
false);
890template <
typename DataType>
void VtkHdfV2DataWriter::
891_writeDataSet2DUsingCollectiveIO(
const DataInfo& data_info, Span2<const DataType> values)
893 _writeDataSetGeneric(data_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
true);
899template <
typename DataType>
void VtkHdfV2DataWriter::
900_writeDataSet2DCollective(
const DataInfo& data_info, Span2<const DataType> values)
903 return _writeDataSet2D(data_info, values);
904 if (m_is_collective_io)
905 return _writeDataSet2DUsingCollectiveIO(data_info, values);
907 Int64 dim2_size = values.dim2Size();
908 UniqueArray<DataType> all_values;
909 IParallelMng* pm = m_mesh->parallelMng();
910 Span<const DataType> values_1d(values.data(), values.totalNbElement());
911 pm->gatherVariable(values_1d.smallView(), all_values, pm->masterIORank());
912 if (m_is_master_io) {
913 Int64 dim1_size = all_values.size();
915 dim1_size = dim1_size / dim2_size;
916 Span2<const DataType> span2(all_values.data(), dim1_size, dim2_size);
917 return _writeDataSet2D<DataType>(data_info, span2);
924void VtkHdfV2DataWriter::
925_addInt64ArrayAttribute(Hid& hid,
const char* name, Span<const Int64> values)
927 hsize_t
len = values.size();
928 hid_t aid = H5Screate_simple(1, &len,
nullptr);
929 hid_t attr = H5Acreate2(hid.id(), name, H5T_NATIVE_INT64, aid, H5P_DEFAULT, H5P_DEFAULT);
932 int ret = H5Awrite(attr, H5T_NATIVE_INT64, values.data());
942void VtkHdfV2DataWriter::
943_addInt64Attribute(Hid& hid,
const char* name, Int64 value)
945 HSpace aid(H5Screate(H5S_SCALAR));
948 attr.create(hid, name, H5T_NATIVE_INT64, aid);
950 attr.open(hid, name);
953 herr_t ret = attr.write(H5T_NATIVE_INT64, &value);
961Int64 VtkHdfV2DataWriter::
962_readInt64Attribute(Hid& hid,
const char* name)
965 attr.open(hid, name);
969 herr_t ret = attr.read(H5T_NATIVE_INT64, &value);
978void VtkHdfV2DataWriter::
979_addStringAttribute(Hid& hid,
const char* name,
const String& value)
981 hid_t aid = H5Screate(H5S_SCALAR);
982 hid_t attr_type = H5Tcopy(H5T_C_S1);
983 H5Tset_size(attr_type, value.length());
984 hid_t attr = H5Acreate2(hid.id(), name, attr_type, aid, H5P_DEFAULT, H5P_DEFAULT);
987 int ret = H5Awrite(attr, attr_type, value.localstr());
988 ret = H5Tclose(attr_type);
998void VtkHdfV2DataWriter::
1004 for (
const auto& i : m_offset_info_list) {
1005 Int64 offset = i.second;
1006 const DatasetInfo& offset_info = i.first;
1007 HGroup* hdf_group = offset_info.group();
1010 _writeDataSet1D<Int64>({ { *hdf_group, offset_info.name() }, m_time_offset_info }, asConstSpan(&offset));
1020void VtkHdfV2DataWriter::
1021_openOrCreateGroups()
1024 m_top_group.openOrCreate(m_file_id,
"VTKHDF");
1025 m_cell_data_group.openOrCreate(m_top_group,
"CellData");
1026 m_node_data_group.openOrCreate(m_top_group,
"PointData");
1027 m_steps_group.openOrCreate(m_top_group,
"Steps");
1028 m_point_data_offsets_group.openOrCreate(m_steps_group,
"PointDataOffsets");
1029 m_cell_data_offsets_group.openOrCreate(m_steps_group,
"CellDataOffsets");
1030 m_field_data_offsets_group.openOrCreate(m_steps_group,
"FieldDataOffsets");
1036void VtkHdfV2DataWriter::
1039 m_cell_data_group.close();
1040 m_node_data_group.close();
1041 m_point_data_offsets_group.close();
1042 m_cell_data_offsets_group.close();
1043 m_field_data_offsets_group.close();
1044 m_steps_group.close();
1045 m_top_group.close();
1063 info(4) <<
"Write VtkHdfV2 var=" <<
var->name();
1067 if (
var->dimension() != 1)
1068 ARCANE_FATAL(
"Only export of scalar item variable is implemented (name={0})",
var->name());
1069 if (
var->isPartial())
1070 ARCANE_FATAL(
"Export of partial variable is not implemented");
1075 switch (item_kind) {
1077 group = &m_cell_data_group;
1082 group = &m_node_data_group;
1087 ARCANE_FATAL(
"Only export of 'Cell' or 'Node' variable is implemented (name={0})",
var->name());
1111 warning() << String::format(
"Export for datatype '{0}' is not supported (var_name={1})",
data_type,
var->name());
1118template <
typename DataType>
void VtkHdfV2DataWriter::
1129void VtkHdfV2DataWriter::
1130_writeReal3Dataset(
const DataInfo& data_info, IData* data)
1132 auto* true_data =
dynamic_cast<IArrayDataT<Real3>*
>(data);
1134 SmallSpan<const Real3> values(true_data->view());
1135 Int32 nb_value = values.size();
1137 UniqueArray2<Real> scalar_values;
1138 scalar_values.resize(nb_value, 3);
1139 for (
Int32 i = 0; i < nb_value; ++i) {
1140 Real3 v = values[i];
1141 scalar_values[i][0] = v.x;
1142 scalar_values[i][1] = v.y;
1143 scalar_values[i][2] = v.z;
1145 _writeDataSet2DCollective<Real>(data_info, scalar_values);
1151void VtkHdfV2DataWriter::
1152_writeReal2Dataset(
const DataInfo& data_info, IData* data)
1155 auto* true_data =
dynamic_cast<IArrayDataT<Real2>*
>(data);
1157 SmallSpan<const Real2> values(true_data->view());
1158 Int32 nb_value = values.size();
1159 UniqueArray2<Real> scalar_values;
1160 scalar_values.resize(nb_value, 3);
1161 for (
Int32 i = 0; i < nb_value; ++i) {
1162 Real2 v = values[i];
1163 scalar_values[i][0] = v.x;
1164 scalar_values[i][1] = v.y;
1165 scalar_values[i][2] = 0.0;
1167 _writeDataSet2DCollective<Real>(data_info, scalar_values);
1173void VtkHdfV2DataWriter::
1174_readAndSetOffset(DatasetInfo& offset_info,
Int32 wanted_step)
1176 HGroup* hgroup = offset_info.group();
1178 StandardArrayT<Int64> a(hgroup->id(), offset_info.name());
1179 UniqueArray<Int64> values;
1180 a.directRead(m_standard_types, values);
1181 Int64 offset_value = values[wanted_step];
1182 offset_info.setOffset(offset_value);
1183 info() <<
"VALUES name=" << offset_info.name() <<
" values=" << values
1184 <<
" wanted_step=" << wanted_step <<
" v=" << offset_value;
1190void VtkHdfV2DataWriter::
1211 m_cell_offset_info = DatasetInfo(m_steps_group,
"CellOffsets");
1212 m_point_offset_info = DatasetInfo(m_steps_group,
"PointOffsets");
1213 m_connectivity_offset_info = DatasetInfo(m_steps_group,
"ConnectivityIdOffsets");
1215 m_offset_for_cell_offset_info = DatasetInfo(
"_OffsetForCellOffsetInfo");
1216 m_part_offset_info = DatasetInfo(
"_PartOffsetInfo");
1217 m_time_offset_info = DatasetInfo(
"_TimeOffsetInfo");
1222 if (m_is_writer && !m_is_first_call) {
1223 IParallelMng* pm = m_mesh->parallelMng();
1224 const Int32 nb_rank = pm->commSize();
1225 Int64 nb_current_step = _readInt64Attribute(m_steps_group,
"NSteps");
1226 Int32 time_index = m_times.size();
1227 info(4) <<
"NB_STEP=" << nb_current_step <<
" time_index=" << time_index
1228 <<
" current_time=" << m_times.back();
1229 const bool debug_times =
false;
1231 StandardArrayT<Real> a1(m_steps_group.id(),
"Values");
1232 UniqueArray<Real> times;
1233 a1.directRead(m_standard_types, times);
1234 info() <<
"TIMES=" << times;
1236 if ((nb_current_step + 1) != time_index) {
1237 info() <<
"[VtkHdf] go_backward detected";
1238 Int32 wanted_step = time_index - 1;
1241 _readAndSetOffset(m_cell_offset_info, wanted_step);
1242 _readAndSetOffset(m_point_offset_info, wanted_step);
1243 _readAndSetOffset(m_connectivity_offset_info, wanted_step);
1244 m_part_offset_info.setOffset(wanted_step * nb_rank);
1245 m_time_offset_info.setOffset(wanted_step);
1246 m_offset_for_cell_offset_info.setOffset(m_cell_offset_info.offset() + wanted_step * nb_rank);
1260:
public ArcaneVtkHdfV2PostProcessorObject
1265 : ArcaneVtkHdfV2PostProcessorObject(
sbi)
1269 IDataWriter* dataWriter()
override {
return m_writer.get(); }
1270 void notifyBeginWrite()
override
1280 w->setTimes(times());
1282 w->setDirectoryName(
dir.file(
"vtkhdfv2"));
1283 m_writer = std::move(
w);
1285 void notifyEndWrite()
override
1289 void close()
override {}
1293 std::unique_ptr<IDataWriter> m_writer;
#define ARCANE_CHECK_POINTER(ptr)
Macro retournant le pointeur ptr s'il est non nul ou lancant une exception s'il est nul.
#define ARCANE_THROW(exception_class,...)
Macro pour envoyer une exception avec formattage.
#define ARCANE_FATAL(...)
Macro envoyant une exception FatalErrorException.
Classe gérant un répertoire.
Encapsule un hid_t pour un groupe.
static bool hasParallelHdf5()
Vrai HDF5 est compilé avec le support de MPI.
void initialize()
Initialise les types.
Interface d'écriture des données d'une variable.
Groupe d'entités de maillage.
static IMeshMaterialMng * getReference(const MeshHandleOrMesh &mesh_handle, bool create=true)
Récupère ou créé la référence associée à mesh.
Structure contenant les informations pour créer un service.
void write(IVariable *var, IData *data) override
Ecrit les données data de la variable var.
void setMetaData(const String &meta_data) override
Positionne les infos des méta-données.
Post-traitement au format VtkHdf V2.
Vue constante d'un tableau de type T.
Référence à une instance.
Chaîne de caractères unicode.
bool null() const
Retourne true si la chaîne est nulle.
__host__ __device__ Real2 min(Real2 a, Real2 b)
Retourne le minimum de deux Real2.
ItemGroupT< Cell > CellGroup
Groupe de mailles.
ItemGroupT< Node > NodeGroup
Groupe de noeuds.
MeshVariableScalarRefT< Node, Real3 > VariableNodeReal3
Grandeur au noeud de type coordonnées.
Integer len(const char *s)
Retourne la longueur de la chaîne s.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Collection< ItemGroup > ItemGroupCollection
Collection de groupes d'éléments du maillage.
eItemKind
Genre d'entité de maillage.
@ IK_Node
Entité de maillage de genre noeud.
@ IK_Cell
Entité de maillage de genre maille.
eDataType
Type d'une donnée.
@ DT_Int32
Donnée de type entier 32 bits.
@ DT_Real3
Donnée de type vecteur 3.
@ DT_Int64
Donnée de type entier 64 bits.
@ DT_Real2
Donnée de type vecteur 2.
@ DT_Real
Donnée de type réel.
double Real
Type représentant un réel.
unsigned char Byte
Type d'un octet.
Ensemble des classes assurant la gestion des matériaux et des milieux.
Conserve les infos sur les données à sauver et l'offset associé.
Classe pour conserver un couple (hdf_group,nom_du_dataset).
Classe pour conserver les information d'un offset.
Int64 offset() const
Valeur de l'offset. (-1) si on écrit à la fin du tableau.
Informations collectives sur un ItemGroup;.
WritePartInfo m_write_part_info
Informations sur l'écriture.
ItemGroup m_item_group
Groupe associé
Informations sur l'offset de la partie à écrire associée à un rang.