14#include "arcane/utils/Collection.h"
15#include "arcane/utils/Enumerator.h"
16#include "arcane/utils/Iostream.h"
17#include "arcane/utils/StringBuilder.h"
18#include "arcane/utils/IOException.h"
19#include "arcane/utils/FixedArray.h"
20#include "arcane/utils/MemoryView.h"
22#include "arcane/core/PostProcessorWriterBase.h"
23#include "arcane/core/Directory.h"
24#include "arcane/core/FactoryService.h"
25#include "arcane/core/IDataWriter.h"
26#include "arcane/core/IData.h"
27#include "arcane/core/IItemFamily.h"
28#include "arcane/core/VariableCollection.h"
29#include "arcane/core/IParallelMng.h"
30#include "arcane/core/IMesh.h"
31#include "arcane/core/internal/VtkCellTypes.h"
33#include "arcane/hdf5/Hdf5Utils.h"
34#include "arcane/hdf5/VtkHdfV2PostProcessor_axl.h"
68using namespace Hdf5Utils;
72 template <
typename T> Span<const T>
73 asConstSpan(
const T* v)
75 return Span<const T>(v, 1);
121 bool isNull()
const {
return m_name.
null(); }
123 HGroup* group()
const {
return m_group; }
124 const String& name()
const {
return m_name; }
126 Int64
offset()
const {
return m_offset; }
127 void setOffset(Int64 v) { m_offset = v; }
128 friend bool operator<(
const DatasetInfo& s1,
const DatasetInfo& s2)
130 return (s1.m_name < s2.m_name);
135 HGroup* m_group =
nullptr;
145 void setTotalSize(Int64 v) { m_total_size = v; }
146 void setSize(Int64 v) { m_size = v; }
147 void setOffset(Int64 v) { m_offset = v; }
149 Int64 totalSize()
const {
return m_total_size; }
150 Int64 size()
const {
return m_size; }
151 Int64 offset()
const {
return m_offset; }
156 Int64 m_total_size = 0;
194 , m_dataset_info(dataset_info)
200 , m_dataset_info(dataset_info)
201 , m_group_info(group_info)
207 DatasetInfo datasetInfo()
const {
return m_dataset_info; }
223 void endWrite()
override;
230 void setDirectoryName(
const String& dir_name) { m_directory_name = dir_name; }
231 void setMaxWriteSize(Int64 v) { m_max_write_size = v; }
235 IMesh* m_mesh =
nullptr;
241 UniqueArray<Real> m_times;
244 String m_full_filename;
247 String m_directory_name;
253 HGroup m_cell_data_group;
254 HGroup m_node_data_group;
256 HGroup m_steps_group;
257 HGroup m_point_data_offsets_group;
258 HGroup m_cell_data_offsets_group;
259 HGroup m_field_data_offsets_group;
261 bool m_is_parallel =
false;
262 bool m_is_master_io =
false;
263 bool m_is_collective_io =
false;
264 bool m_is_first_call =
false;
265 bool m_is_writer =
false;
267 DatasetInfo m_cell_offset_info;
268 DatasetInfo m_point_offset_info;
269 DatasetInfo m_connectivity_offset_info;
270 DatasetInfo m_offset_for_cell_offset_info;
271 DatasetInfo m_part_offset_info;
272 DatasetInfo m_time_offset_info;
273 std::map<DatasetInfo, Int64> m_offset_info_list;
275 StandardTypes m_standard_types{
false };
277 ItemGroupCollectiveInfo m_all_cells_info;
278 ItemGroupCollectiveInfo m_all_nodes_info;
286 Int64 m_max_write_size = 0;
290 void _addInt64ArrayAttribute(Hid& hid,
const char* name, Span<const Int64> values);
291 void _addStringAttribute(Hid& hid,
const char* name,
const String& value);
293 template <
typename DataType>
void
294 _writeDataSet1D(
const DataInfo& data_info, Span<const DataType> values);
295 template <
typename DataType>
void
296 _writeDataSet1DUsingCollectiveIO(
const DataInfo& data_info, Span<const DataType> values);
297 template <
typename DataType>
void
298 _writeDataSet1DCollective(
const DataInfo& data_info, Span<const DataType> values);
299 template <
typename DataType>
void
300 _writeDataSet2D(
const DataInfo& data_info, Span2<const DataType> values);
301 template <
typename DataType>
void
302 _writeDataSet2DUsingCollectiveIO(
const DataInfo& data_info, Span2<const DataType> values);
303 template <
typename DataType>
void
304 _writeDataSet2DCollective(
const DataInfo& data_info, Span2<const DataType> values);
305 template <
typename DataType>
void
306 _writeBasicTypeDataset(
const DataInfo& data_info, IData* data);
307 void _writeReal3Dataset(
const DataInfo& data_info, IData* data);
308 void _writeReal2Dataset(
const DataInfo& data_info, IData* data);
310 String _getFileName()
312 StringBuilder sb(m_mesh->name());
314 return sb.toString();
316 template <
typename DataType>
void
317 _writeDataSetGeneric(
const DataInfo& data_info, Int32 nb_dim,
318 Int64 dim1_size, Int64 dim2_size,
const DataType* values_data,
320 void _writeDataSetGeneric(
const DataInfo& data_info, Int32 nb_dim,
321 Int64 dim1_size, Int64 dim2_size, ConstMemoryView values_data,
322 const hid_t hdf_datatype_type,
bool is_collective);
323 void _addInt64Attribute(Hid& hid,
const char* name, Int64 value);
324 Int64 _readInt64Attribute(Hid& hid,
const char* name);
325 void _openOrCreateGroups();
327 void _readAndSetOffset(DatasetInfo& offset_info, Int32 wanted_step);
328 void _initializeOffsets();
329 void _initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo& group_info);
330 WritePartInfo _computeWritePartInfo(Int64 local_size);
338: TraceAccessor(mesh->traceMng())
341, m_is_collective_io(is_collective_io)
342, m_all_cells_info(mesh->allCells())
343, m_all_nodes_info(mesh->allNodes())
350void VtkHdfV2DataWriter::
351beginWrite(
const VariableCollection& vars)
355 IParallelMng* pm = m_mesh->parallelMng();
356 const Int32 nb_rank = pm->commSize();
357 m_is_parallel = nb_rank > 1;
358 m_is_master_io = pm->isMasterIO();
360 Int32 time_index = m_times.size();
361 const bool is_first_call = (time_index < 2);
362 m_is_first_call = is_first_call;
364 pwarning() <<
"L'implémentation au format 'VtkHdfV2' est expérimentale";
366 String filename = _getFileName();
368 Directory dir(m_directory_name);
370 m_full_filename = dir.file(filename);
371 info(4) <<
"VtkHdfV2DataWriter::beginWrite() file=" << m_full_filename;
379 if (pm->isHybridImplementation() || pm->isThreadImplementation())
380 m_is_collective_io =
false;
383 info() <<
"VtkHdfV2DataWriter: using collective MPI/IO ?=" << m_is_collective_io;
384 info() <<
"VtkHdfV2DataWriter: max_write_size (kB) =" << m_max_write_size;
390 m_is_writer = m_is_master_io || m_is_collective_io;
394 if (m_is_collective_io)
395 plist_id.createFilePropertyMPIIO(pm);
397 if (is_first_call && m_is_master_io)
398 dir.createDirectory();
400 if (m_is_collective_io)
407 m_file_id.openTruncate(m_full_filename, plist_id.id());
409 m_file_id.openAppend(m_full_filename, plist_id.id());
411 _openOrCreateGroups();
414 std::array<Int64, 2> version = { 2, 0 };
415 _addInt64ArrayAttribute(m_top_group,
"Version", version);
416 _addStringAttribute(m_top_group,
"Type",
"UnstructuredGrid");
421 _initializeItemGroupCollectiveInfos(m_all_cells_info);
422 _initializeItemGroupCollectiveInfos(m_all_nodes_info);
424 CellGroup all_cells = m_mesh->allCells();
425 NodeGroup all_nodes = m_mesh->allNodes();
427 const Int32 nb_cell = all_cells.
size();
428 const Int32 nb_node = all_nodes.size();
430 Int32 total_nb_connected_node = 0;
433 total_nb_connected_node += cell.nodeIds().size();
438 UniqueArray<Int64> cells_connectivity(total_nb_connected_node);
439 UniqueArray<Int64> cells_offset(nb_cell + 1);
440 UniqueArray<unsigned char> cells_ghost_type(nb_cell);
441 UniqueArray<unsigned char> cells_type(nb_cell);
442 UniqueArray<Int64> cells_uid(nb_cell);
445 Int32 connected_node_index = 0;
447 Int32 index = icell.index();
450 cells_uid[index] = cell.uniqueId();
453 bool is_ghost = !cell.isOwn();
455 ghost_type = VtkUtils::CellGhostTypes::DUPLICATECELL;
456 cells_ghost_type[index] = ghost_type;
458 unsigned char vtk_type = VtkUtils::arcaneToVtkCellType(cell.type());
459 cells_type[index] = vtk_type;
460 for (NodeLocalId node : cell.nodeIds()) {
461 cells_connectivity[connected_node_index] = node;
462 ++connected_node_index;
464 cells_offset[index + 1] = connected_node_index;
468 _initializeOffsets();
471 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Offsets" }, m_offset_for_cell_offset_info }, cells_offset);
473 _writeDataSet1DCollective<Int64>({ { m_top_group,
"Connectivity" }, m_connectivity_offset_info },
475 _writeDataSet1DCollective<unsigned char>({ { m_top_group,
"Types" }, m_cell_offset_info }, cells_type);
478 Int64 nb_cell_int64 = nb_cell;
479 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfCells" }, m_part_offset_info },
480 asConstSpan(&nb_cell_int64));
481 Int64 nb_node_int64 = nb_node;
482 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfPoints" }, m_part_offset_info },
483 asConstSpan(&nb_node_int64));
484 Int64 number_of_connectivity_ids = cells_connectivity.size();
485 _writeDataSet1DCollective<Int64>({ { m_top_group,
"NumberOfConnectivityIds" }, m_part_offset_info },
486 asConstSpan(&number_of_connectivity_ids));
491 UniqueArray<Int64> nodes_uid(nb_node);
492 UniqueArray<unsigned char> nodes_ghost_type(nb_node);
494 UniqueArray2<Real> points;
495 points.resize(nb_node, 3);
497 Int32 index = inode.index();
500 nodes_uid[index] = node.uniqueId();
503 bool is_ghost = !node.isOwn();
505 ghost_type = VtkUtils::PointGhostTypes::DUPLICATEPOINT;
506 nodes_ghost_type[index] = ghost_type;
508 Real3 pos = nodes_coordinates[inode];
509 points[index][0] = pos.x;
510 points[index][1] = pos.y;
511 points[index][2] = pos.z;
515 _writeDataSet1DCollective<Int64>({ { m_node_data_group,
"GlobalNodeId" }, m_cell_offset_info }, nodes_uid);
518 _writeDataSet1DCollective<unsigned char>({ { m_node_data_group,
"vtkGhostType" }, m_cell_offset_info }, nodes_ghost_type);
521 _writeDataSet2DCollective<Real>({ { m_top_group,
"Points" }, m_point_offset_info }, points);
525 _writeDataSet1DCollective<unsigned char>({ { m_cell_data_group,
"vtkGhostType" }, m_cell_offset_info }, cells_ghost_type);
529 _writeDataSet1DCollective<Int64>({ { m_cell_data_group,
"GlobalCellId" }, m_cell_offset_info }, cells_uid);
534 Real current_time = m_times[time_index - 1];
535 _writeDataSet1D<Real>({ { m_steps_group,
"Values" }, m_time_offset_info }, asConstSpan(¤t_time));
538 Int64 part_offset = (time_index - 1) * pm->commSize();
539 _writeDataSet1D<Int64>({ { m_steps_group,
"PartOffsets" }, m_time_offset_info }, asConstSpan(&part_offset));
542 _addInt64Attribute(m_steps_group,
"NSteps", time_index);
551VtkHdfV2DataWriter::WritePartInfo VtkHdfV2DataWriter::
552_computeWritePartInfo(Int64 local_size)
555 IParallelMng* pm = m_mesh->parallelMng();
556 Int32 nb_rank = pm->commSize();
557 Int32 my_rank = pm->commRank();
559 UniqueArray<Int64> ranks_size(nb_rank);
560 ArrayView<Int64> all_sizes(ranks_size);
561 Int64 dim1_size = local_size;
562 pm->allGather(ConstArrayView<Int64>(1, &dim1_size), all_sizes);
564 Int64 total_size = 0;
565 for (Integer i = 0; i < nb_rank; ++i)
566 total_size += all_sizes[i];
569 for (Integer i = 0; i < my_rank; ++i)
570 my_index += all_sizes[i];
572 WritePartInfo part_info;
573 part_info.setTotalSize(total_size);
574 part_info.setSize(local_size);
575 part_info.setOffset(my_index);
582void VtkHdfV2DataWriter::
583_initializeItemGroupCollectiveInfos(ItemGroupCollectiveInfo& group_info)
585 Int64 dim1_size = group_info.m_item_group.size();
586 group_info.setWritePartInfo(_computeWritePartInfo(dim1_size));
591 std::pair<Int64, Int64> _getInterval(Int64 index, Int64 nb_interval, Int64 total_size)
593 Int64 n = total_size;
594 Int64 isize = n / nb_interval;
595 Int64 ibegin = index * isize;
597 if ((index + 1) == nb_interval)
599 return { ibegin, isize };
611void VtkHdfV2DataWriter::
612_writeDataSetGeneric(
const DataInfo& data_info, Int32 nb_dim,
613 Int64 dim1_size, Int64 dim2_size,
614 ConstMemoryView values_data,
615 const hid_t hdf_type,
bool is_collective)
620 HGroup& group = data_info.dataset.group;
621 const String& name = data_info.dataset.name;
625 Int64 wanted_offset = data_info.datasetInfo().offset();
627 static constexpr int MAX_DIM = 2;
636 FixedArray<hsize_t, MAX_DIM> local_dims;
637 local_dims[0] = dim1_size;
638 local_dims[1] = dim2_size;
641 FixedArray<hsize_t, MAX_DIM> global_dims;
645 FixedArray<hsize_t, MAX_DIM> max_dims;
646 max_dims[0] = H5S_UNLIMITED;
647 max_dims[1] = dim2_size;
650 Int64 write_offset = 0;
653 Int64 global_dim1_size = dim1_size;
654 Int32 nb_participating_rank = 1;
657 nb_participating_rank = m_mesh->parallelMng()->commSize();
658 WritePartInfo part_info;
659 if (data_info.m_group_info) {
662 part_info = data_info.m_group_info->writePartInfo();
665 part_info = _computeWritePartInfo(dim1_size);
667 global_dim1_size = part_info.totalSize();
668 my_index = part_info.offset();
671 HProperty write_plist_id;
673 write_plist_id.createDatasetTransfertCollectiveMPIIO();
676 FixedArray<hsize_t, MAX_DIM> hyperslab_offsets;
678 if (m_is_first_call) {
680 FixedArray<hsize_t, MAX_DIM> chunk_dims;
681 global_dims[0] = global_dim1_size;
682 global_dims[1] = dim2_size;
684 Int64 chunk_size = global_dim1_size / nb_participating_rank;
685 if (chunk_size < 1024)
687 const Int64 max_chunk_size = 1024 * 1024 * 10;
688 chunk_size =
math::min(chunk_size, max_chunk_size);
689 chunk_dims[0] = chunk_size;
690 chunk_dims[1] = dim2_size;
691 info() <<
"CHUNK nb_dim=" << nb_dim
692 <<
" global_dim1_size=" << global_dim1_size
693 <<
" chunk0=" << chunk_dims[0]
694 <<
" chunk1=" << chunk_dims[1]
696 file_space.createSimple(nb_dim, global_dims.data(), max_dims.data());
698 plist_id.create(H5P_DATASET_CREATE);
699 H5Pset_chunk(plist_id.id(), nb_dim, chunk_dims.data());
700 dataset.create(group, name.localstr(), hdf_type, file_space, HProperty{}, plist_id, HProperty{});
703 hyperslab_offsets[0] = my_index;
704 hyperslab_offsets[1] = 0;
710 dataset.open(group, name.localstr());
711 file_space = dataset.getSpace();
712 int nb_dimension = file_space.nbDimension();
713 if (nb_dimension != nb_dim)
714 ARCANE_THROW(IOException,
"Bad dimension '{0}' for dataset '{1}' (should be 1)",
717 FixedArray<hsize_t, MAX_DIM> original_dims;
718 file_space.getDimensions(original_dims.data(),
nullptr);
719 hsize_t offset0 = original_dims[0];
722 if (wanted_offset >= 0) {
723 offset0 = wanted_offset;
724 info() <<
"Forcing offset to " << wanted_offset;
726 global_dims[0] = offset0 + global_dim1_size;
727 global_dims[1] = dim2_size;
728 write_offset = offset0;
731 if ((herror = dataset.setExtent(global_dims.data())) < 0)
732 ARCANE_THROW(IOException,
"Can not extent dataset '{0}' (err={1})", name, herror);
733 file_space = dataset.getSpace();
735 hyperslab_offsets[0] = offset0 + my_index;
736 hyperslab_offsets[1] = 0;
737 info(4) <<
"APPEND nb_dim=" << nb_dim
738 <<
" dim0=" << global_dims[0]
739 <<
" count0=" << local_dims[0]
740 <<
" offsets0=" << hyperslab_offsets[0] <<
" name=" << name;
743 Int64 nb_write_byte = global_dim1_size * dim2_size * values_data.datatypeSize();
747 Int64 nb_interval = 1;
748 if (is_collective && m_max_write_size > 0) {
749 nb_interval = 1 + nb_write_byte / (m_max_write_size * 1024);
751 info(4) <<
"WRITE global_size=" << nb_write_byte <<
" max_size=" << m_max_write_size <<
" nb_interval=" << nb_interval;
753 for (Int64 i = 0; i < nb_interval; ++i) {
754 auto [index, nb_element] = _getInterval(i, nb_interval, dim1_size);
756 FixedArray<hsize_t, 2> dims;
757 dims[0] = nb_element;
759 FixedArray<hsize_t, 2> offsets;
760 offsets[0] = hyperslab_offsets[0] + index;
762 if ((herror = H5Sselect_hyperslab(file_space.id(), H5S_SELECT_SET, offsets.data(),
nullptr, dims.data(),
nullptr)) < 0)
763 ARCANE_THROW(IOException,
"Can not select hyperslab '{0}' (err={1})", name, herror);
766 memory_space.createSimple(nb_dim, dims.data());
767 Int64 data_offset = index * values_data.datatypeSize() * dim2_size;
769 if ((herror = dataset.write(hdf_type, values_data.data() + data_offset, memory_space, file_space, write_plist_id)) < 0)
770 ARCANE_THROW(IOException,
"Can not write dataset '{0}' (err={1})", name, herror);
773 ARCANE_THROW(IOException,
"Can not write dataset '{0}'", name);
776 if (!data_info.datasetInfo().isNull())
777 m_offset_info_list.insert(std::make_pair(data_info.datasetInfo(), write_offset));
783template <
typename DataType>
void VtkHdfV2DataWriter::
784_writeDataSetGeneric(
const DataInfo& data_info, Int32 nb_dim,
785 Int64 dim1_size, Int64 dim2_size,
const DataType* values_data,
788 const hid_t hdf_type = m_standard_types.nativeType(DataType{});
789 ConstMemoryView mem_view = makeConstMemoryView(values_data,
sizeof(DataType), dim1_size * dim2_size);
790 _writeDataSetGeneric(data_info, nb_dim, dim1_size, dim2_size, mem_view, hdf_type, is_collective);
796template <
typename DataType>
void VtkHdfV2DataWriter::
797_writeDataSet1D(
const DataInfo& data_info, Span<const DataType> values)
799 _writeDataSetGeneric(data_info, 1, values.size(), 1, values.data(),
false);
805template <
typename DataType>
void VtkHdfV2DataWriter::
806_writeDataSet1DUsingCollectiveIO(
const DataInfo& data_info, Span<const DataType> values)
808 _writeDataSetGeneric(data_info, 1, values.size(), 1, values.data(),
true);
814template <
typename DataType>
void VtkHdfV2DataWriter::
815_writeDataSet1DCollective(
const DataInfo& data_info, Span<const DataType> values)
818 return _writeDataSet1D(data_info, values);
819 if (m_is_collective_io)
820 return _writeDataSet1DUsingCollectiveIO(data_info, values);
821 UniqueArray<DataType> all_values;
822 IParallelMng* pm = m_mesh->parallelMng();
823 pm->gatherVariable(values.smallView(), all_values, pm->masterIORank());
825 _writeDataSet1D<DataType>(data_info, all_values);
831template <
typename DataType>
void VtkHdfV2DataWriter::
832_writeDataSet2D(
const DataInfo& data_info, Span2<const DataType> values)
834 _writeDataSetGeneric(data_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
false);
840template <
typename DataType>
void VtkHdfV2DataWriter::
841_writeDataSet2DUsingCollectiveIO(
const DataInfo& data_info, Span2<const DataType> values)
843 _writeDataSetGeneric(data_info, 2, values.dim1Size(), values.dim2Size(), values.data(),
true);
849template <
typename DataType>
void VtkHdfV2DataWriter::
850_writeDataSet2DCollective(
const DataInfo& data_info, Span2<const DataType> values)
853 return _writeDataSet2D(data_info, values);
854 if (m_is_collective_io)
855 return _writeDataSet2DUsingCollectiveIO(data_info, values);
857 Int64 dim2_size = values.dim2Size();
858 UniqueArray<DataType> all_values;
859 IParallelMng* pm = m_mesh->parallelMng();
860 Span<const DataType> values_1d(values.data(), values.totalNbElement());
861 pm->gatherVariable(values_1d.smallView(), all_values, pm->masterIORank());
862 if (m_is_master_io) {
863 Int64 dim1_size = all_values.size();
865 dim1_size = dim1_size / dim2_size;
866 Span2<const DataType> span2(all_values.data(), dim1_size, dim2_size);
867 return _writeDataSet2D<DataType>(data_info, span2);
874void VtkHdfV2DataWriter::
875_addInt64ArrayAttribute(Hid& hid,
const char* name, Span<const Int64> values)
877 hsize_t
len = values.size();
878 hid_t aid = H5Screate_simple(1, &len,
nullptr);
879 hid_t attr = H5Acreate2(hid.id(), name, H5T_NATIVE_INT64, aid, H5P_DEFAULT, H5P_DEFAULT);
882 int ret = H5Awrite(attr, H5T_NATIVE_INT64, values.data());
892void VtkHdfV2DataWriter::
893_addInt64Attribute(Hid& hid,
const char* name, Int64 value)
895 HSpace aid(H5Screate(H5S_SCALAR));
898 attr.create(hid, name, H5T_NATIVE_INT64, aid);
900 attr.open(hid, name);
903 herr_t ret = attr.write(H5T_NATIVE_INT64, &value);
911Int64 VtkHdfV2DataWriter::
912_readInt64Attribute(Hid& hid,
const char* name)
915 attr.open(hid, name);
919 herr_t ret = attr.read(H5T_NATIVE_INT64, &value);
928void VtkHdfV2DataWriter::
929_addStringAttribute(Hid& hid,
const char* name,
const String& value)
931 hid_t aid = H5Screate(H5S_SCALAR);
932 hid_t attr_type = H5Tcopy(H5T_C_S1);
933 H5Tset_size(attr_type, value.length());
934 hid_t attr = H5Acreate2(hid.id(), name, attr_type, aid, H5P_DEFAULT, H5P_DEFAULT);
937 int ret = H5Awrite(attr, attr_type, value.localstr());
938 ret = H5Tclose(attr_type);
948void VtkHdfV2DataWriter::
954 for (
const auto& i : m_offset_info_list) {
955 Int64 offset = i.second;
956 const DatasetInfo& offset_info = i.first;
957 HGroup* hdf_group = offset_info.group();
960 _writeDataSet1D<Int64>({ { *hdf_group, offset_info.name() }, m_time_offset_info }, asConstSpan(&offset));
970void VtkHdfV2DataWriter::
974 m_top_group.openOrCreate(m_file_id,
"VTKHDF");
975 m_cell_data_group.openOrCreate(m_top_group,
"CellData");
976 m_node_data_group.openOrCreate(m_top_group,
"PointData");
977 m_steps_group.openOrCreate(m_top_group,
"Steps");
978 m_point_data_offsets_group.openOrCreate(m_steps_group,
"PointDataOffsets");
979 m_cell_data_offsets_group.openOrCreate(m_steps_group,
"CellDataOffsets");
980 m_field_data_offsets_group.openOrCreate(m_steps_group,
"FieldDataOffsets");
986void VtkHdfV2DataWriter::
989 m_cell_data_group.close();
990 m_node_data_group.close();
991 m_point_data_offsets_group.close();
992 m_cell_data_offsets_group.close();
993 m_field_data_offsets_group.close();
994 m_steps_group.close();
1004 ARCANE_UNUSED(meta_data);
1013 info(4) <<
"Write VtkHdfV2 var=" << var->
name();
1018 ARCANE_FATAL(
"Only export of scalar item variable is implemented (name={0})", var->
name());
1020 ARCANE_FATAL(
"Export of partial variable is not implemented");
1025 switch (item_kind) {
1027 group = &m_cell_data_group;
1028 offset_info = m_cell_offset_info;
1029 group_info = &m_all_cells_info;
1032 group = &m_node_data_group;
1033 offset_info = m_point_offset_info;
1034 group_info = &m_all_nodes_info;
1037 ARCANE_FATAL(
"Only export of 'Cell' or 'Node' variable is implemented (name={0})", var->
name());
1044 switch (data_type) {
1046 _writeBasicTypeDataset<Real>(data_info, data);
1049 _writeBasicTypeDataset<Int64>(data_info, data);
1052 _writeBasicTypeDataset<Int32>(data_info, data);
1055 _writeReal3Dataset(data_info, data);
1058 _writeReal2Dataset(data_info, data);
1061 warning() << String::format(
"Export for datatype '{0}' is not supported (var_name={1})", data_type, var->
name());
1068template <
typename DataType>
void VtkHdfV2DataWriter::
1069_writeBasicTypeDataset(
const DataInfo& data_info,
IData* data)
1079void VtkHdfV2DataWriter::
1080_writeReal3Dataset(
const DataInfo& data_info, IData* data)
1082 auto* true_data =
dynamic_cast<IArrayDataT<Real3>*
>(data);
1084 SmallSpan<const Real3> values(true_data->view());
1085 Int32 nb_value = values.size();
1087 UniqueArray2<Real> scalar_values;
1088 scalar_values.resize(nb_value, 3);
1089 for (Int32 i = 0; i < nb_value; ++i) {
1090 Real3 v = values[i];
1091 scalar_values[i][0] = v.x;
1092 scalar_values[i][1] = v.y;
1093 scalar_values[i][2] = v.z;
1095 _writeDataSet2DCollective<Real>(data_info, scalar_values);
1101void VtkHdfV2DataWriter::
1102_writeReal2Dataset(
const DataInfo& data_info, IData* data)
1105 auto* true_data =
dynamic_cast<IArrayDataT<Real2>*
>(data);
1107 SmallSpan<const Real2> values(true_data->view());
1108 Int32 nb_value = values.size();
1109 UniqueArray2<Real> scalar_values;
1110 scalar_values.resize(nb_value, 3);
1111 for (Int32 i = 0; i < nb_value; ++i) {
1112 Real2 v = values[i];
1113 scalar_values[i][0] = v.x;
1114 scalar_values[i][1] = v.y;
1115 scalar_values[i][2] = 0.0;
1117 _writeDataSet2DCollective<Real>(data_info, scalar_values);
1123void VtkHdfV2DataWriter::
1124_readAndSetOffset(DatasetInfo& offset_info, Int32 wanted_step)
1126 HGroup* hgroup = offset_info.group();
1128 StandardArrayT<Int64> a(hgroup->id(), offset_info.name());
1129 UniqueArray<Int64> values;
1130 a.directRead(m_standard_types, values);
1131 Int64 offset_value = values[wanted_step];
1132 offset_info.setOffset(offset_value);
1133 info() <<
"VALUES name=" << offset_info.name() <<
" values=" << values
1134 <<
" wanted_step=" << wanted_step <<
" v=" << offset_value;
1140void VtkHdfV2DataWriter::
1160 m_cell_offset_info = DatasetInfo(m_steps_group,
"CellOffsets");
1161 m_point_offset_info = DatasetInfo(m_steps_group,
"PointOffsets");
1162 m_connectivity_offset_info = DatasetInfo(m_steps_group,
"ConnectivityIdOffsets");
1164 m_offset_for_cell_offset_info = DatasetInfo(
"_OffsetForCellOffsetInfo");
1165 m_part_offset_info = DatasetInfo(
"_PartOffsetInfo");
1166 m_time_offset_info = DatasetInfo(
"_TimeOffsetInfo");
1171 if (m_is_writer && !m_is_first_call) {
1172 IParallelMng* pm = m_mesh->parallelMng();
1173 const Int32 nb_rank = pm->commSize();
1174 Int64 nb_current_step = _readInt64Attribute(m_steps_group,
"NSteps");
1175 Int32 time_index = m_times.size();
1176 info(4) <<
"NB_STEP=" << nb_current_step <<
" time_index=" << time_index
1177 <<
" current_time=" << m_times.back();
1178 const bool debug_times =
false;
1180 StandardArrayT<Real> a1(m_steps_group.id(),
"Values");
1181 UniqueArray<Real> times;
1182 a1.directRead(m_standard_types, times);
1183 info() <<
"TIMES=" << times;
1185 if ((nb_current_step + 1) != time_index) {
1186 info() <<
"[VtkHdf] go_backward detected";
1187 Int32 wanted_step = time_index - 1;
1190 _readAndSetOffset(m_cell_offset_info, wanted_step);
1191 _readAndSetOffset(m_point_offset_info, wanted_step);
1192 _readAndSetOffset(m_connectivity_offset_info, wanted_step);
1193 m_part_offset_info.setOffset(wanted_step * nb_rank);
1194 m_time_offset_info.setOffset(wanted_step);
1195 m_offset_for_cell_offset_info.setOffset(m_cell_offset_info.offset() + wanted_step * nb_rank);
1209:
public ArcaneVtkHdfV2PostProcessorObject
1214 : ArcaneVtkHdfV2PostProcessorObject(sbi)
1218 IDataWriter* dataWriter()
override {
return m_writer.get(); }
1219 void notifyBeginWrite()
override
1221 bool use_collective_io =
true;
1222 Int64 max_write_size = 0;
1224 use_collective_io = options()->useCollectiveWrite();
1225 max_write_size = options()->maxWriteSize();
1227 auto w = std::make_unique<VtkHdfV2DataWriter>(mesh(), groups(), use_collective_io);
1228 w->setMaxWriteSize(max_write_size);
1229 w->setTimes(times());
1231 w->setDirectoryName(dir.
file(
"vtkhdfv2"));
1232 m_writer = std::move(w);
1234 void notifyEndWrite()
override
1238 void close()
override {}
1242 std::unique_ptr<IDataWriter> m_writer;
#define ARCANE_CHECK_POINTER(ptr)
Macro retournant le pointeur ptr s'il est non nul ou lancant une exception s'il est nul.
#define ARCANE_THROW(exception_class,...)
Macro pour envoyer une exception avec formattage.
#define ARCANE_FATAL(...)
Macro envoyant une exception FatalErrorException.
Classe gérant un répertoire.
virtual String file(const String &file_name) const
Retourne le chemin complet du fichier file_name dans le répertoire.
Encapsule un hid_t pour un groupe.
static bool hasParallelHdf5()
Vrai HDF5 est compilé avec le support de MPI.
void initialize()
Initialise les types.
Interface d'écriture des données d'une variable.
virtual eDataType dataType() const =0
Type de la donnée gérée par la variable (Real, Integer, ...)
virtual eItemKind itemKind() const =0
Type des entités du maillage sur lequel repose la variable.
virtual bool isPartial() const =0
Indique si la variable est partielle.
virtual Integer dimension() const =0
Dimension de la variable.
virtual String name() const =0
Nom de la variable.
Groupe d'entités de maillage.
Integer size() const
Nombre d'éléments du groupe.
Structure contenant les informations pour créer un service.
void write(IVariable *var, IData *data) override
Ecrit les données data de la variable var.
void setMetaData(const String &meta_data) override
Positionne les infos des méta-données.
Post-traitement au format VtkHdf V2.
Vue constante d'un tableau de type T.
Vue d'un tableau d'éléments de type T.
Chaîne de caractères unicode.
bool null() const
Retourne true si la chaîne est nulle.
Classe d'accès aux traces.
TraceMessage warning() const
Flot pour un message d'avertissement.
TraceMessage info() const
Flot pour un message d'information.
TraceMessage pwarning() const
__host__ __device__ Real2 min(Real2 a, Real2 b)
Retourne le minimum de deux Real2.
ItemGroupT< Cell > CellGroup
Groupe de mailles.
ItemGroupT< Node > NodeGroup
Groupe de noeuds.
MeshVariableScalarRefT< Node, Real3 > VariableNodeReal3
Grandeur au noeud de type coordonnées.
Integer len(const char *s)
Retourne la longueur de la chaîne s.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
Collection< ItemGroup > ItemGroupCollection
Collection de groupes d'éléments du maillage.
eItemKind
Genre d'entité de maillage.
@ IK_Node
Entité de maillage de genre noeud.
@ IK_Cell
Entité de maillage de genre maille.
unsigned char Byte
Type d'un octet.
eDataType
Type d'une donnée.
@ DT_Int32
Donnée de type entier 32 bits.
@ DT_Real3
Donnée de type vecteur 3.
@ DT_Int64
Donnée de type entier 64 bits.
@ DT_Real2
Donnée de type vecteur 2.
@ DT_Real
Donnée de type réel.
double Real
Type représentant un réel.
Conserve les infos sur les données à sauver et l'offset associé.
Classe pour conserver un couple (hdf_group,nom_du_dataset).
Classe pour conserver les information d'un offset.
Int64 offset() const
Valeur de l'offset. (-1) si on écrit à la fin du tableau.
Informations collectives sur un ItemGroup;.
WritePartInfo m_write_part_info
Informations sur l'écriture.
ItemGroup m_item_group
Groupe associé
Informations sur l'offset de la partie à écrire associée à un rang.