14#include "arcane/utils/Collection.h"
15#include "arcane/utils/Enumerator.h"
16#include "arcane/utils/Iostream.h"
17#include "arcane/utils/ScopedPtr.h"
18#include "arcane/utils/StringBuilder.h"
19#include "arcane/utils/CheckedConvert.h"
20#include "arcane/utils/JSONWriter.h"
21#include "arcane/utils/IOException.h"
23#include "arcane/core/PostProcessorWriterBase.h"
24#include "arcane/core/Directory.h"
25#include "arcane/core/FactoryService.h"
26#include "arcane/core/IDataWriter.h"
27#include "arcane/core/IData.h"
28#include "arcane/core/IItemFamily.h"
29#include "arcane/core/VariableCollection.h"
30#include "arcane/core/IParallelMng.h"
31#include "arcane/core/IMesh.h"
32#include "arcane/core/internal/VtkCellTypes.h"
34#include "arcane/hdf5/Hdf5Utils.h"
35#include "arcane/hdf5/VtkHdfPostProcessor_axl.h"
73using namespace Hdf5Utils;
89 void endWrite()
override;
119 bool m_is_parallel =
false;
120 bool m_is_master_io =
false;
121 bool m_is_collective_io =
false;
126 void _addStringAttribute(
Hid&
hid,
const char* name,
const String& value);
128 template <
typename DataType>
void
130 template <
typename DataType>
void
132 template <
typename DataType>
void
134 template <
typename DataType>
void
136 template <
typename DataType>
void
141 template <
typename DataType>
void
143 template <
typename DataType>
void
146 String _getFileNameForTimeIndex(Int32 index)
154 return sb.toString();
163: TraceAccessor(mesh->traceMng())
172void VtkHdfDataWriter::
173beginWrite(
const VariableCollection& vars)
178 const Int32 nb_rank = pm->commSize();
179 m_is_parallel = nb_rank > 1;
180 m_is_master_io = pm->isMasterIO();
183 const bool is_first_call = (time_index < 2);
185 pwarning() <<
"L'implémentation au format 'VtkHdf' est expérimentale";
187 String filename = _getFileNameForTimeIndex(time_index);
189 Directory dir(m_directory_name);
191 m_full_filename = dir.file(filename);
192 info(4) <<
"VtkHdfDataWriter::beginWrite() file=" << m_full_filename;
203 if (pm->isHybridImplementation() || pm->isThreadImplementation())
204 m_is_collective_io =
false;
206 info() <<
"VtkHdfDataWriter: using collective MPI/IO ?=" << m_is_collective_io;
209 if (m_is_collective_io)
210 plist_id.createFilePropertyMPIIO(pm);
212 if (time_index <= 1) {
213 if (m_is_master_io) {
214 dir.createDirectory();
218 if (m_is_collective_io)
221 if (m_is_collective_io || m_is_master_io) {
222 m_file_id.openTruncate(m_full_filename, plist_id.id());
224 top_group.create(m_file_id,
"VTKHDF");
226 m_cell_data_group.create(top_group,
"CellData");
227 m_node_data_group.create(top_group,
"PointData");
229 std::array<Int64, 2> version = { 1, 0 };
230 _addInt64ArrayAttribute(top_group,
"Version", version);
231 _addStringAttribute(top_group,
"Type",
"UnstructuredGrid");
237 const Int32 nb_cell = all_cells.size();
238 const Int32 nb_node = all_nodes.size();
240 Int32 total_nb_connected_node = 0;
244 total_nb_connected_node += cell.nodeIds().size();
250 UniqueArray<Int64> cells_connectivity(total_nb_connected_node);
251 UniqueArray<Int64> cells_offset(nb_cell + 1);
252 UniqueArray<unsigned char> cells_ghost_type(nb_cell);
253 UniqueArray<unsigned char> cells_type(nb_cell);
254 UniqueArray<Int64> cells_uid(nb_cell);
257 Int32 connected_node_index = 0;
259 Int32 index = icell.index();
262 cells_uid[index] = icell->uniqueId();
265 bool is_ghost = !cell.isOwn();
267 ghost_type = VtkUtils::CellGhostTypes::DUPLICATECELL;
268 cells_ghost_type[index] = ghost_type;
270 unsigned char vtk_type = VtkUtils::arcaneToVtkCellType(cell.type());
271 cells_type[index] = vtk_type;
272 for (NodeLocalId node : cell.nodeIds()) {
273 cells_connectivity[connected_node_index] = node;
274 ++connected_node_index;
276 cells_offset[index + 1] = connected_node_index;
280 _writeDataSet1DCollective<Int64>(top_group,
"Offsets", cells_offset);
282 _writeDataSet1DCollective<Int64>(top_group,
"Connectivity", cells_connectivity);
283 _writeDataSet1DCollective<unsigned char>(top_group,
"Types", cells_type);
286 UniqueArray<Int64> nb_cell_by_ranks(1);
287 nb_cell_by_ranks[0] = nb_cell;
288 _writeDataSet1DCollective<Int64>(top_group,
"NumberOfCells", nb_cell_by_ranks);
290 UniqueArray<Int64> nb_node_by_ranks(1);
291 nb_node_by_ranks[0] = nb_node;
292 _writeDataSet1DCollective<Int64>(top_group,
"NumberOfPoints", nb_node_by_ranks);
294 UniqueArray<Int64> number_of_connectivity_ids(1);
295 number_of_connectivity_ids[0] = cells_connectivity.size();
296 _writeDataSet1DCollective<Int64>(top_group,
"NumberOfConnectivityIds", number_of_connectivity_ids);
301 UniqueArray<Int64> nodes_uid(nb_node);
302 UniqueArray<unsigned char> nodes_ghost_type(nb_node);
304 UniqueArray2<Real> points;
305 points.resize(nb_node, 3);
307 Int32 index = inode.index();
310 nodes_uid[index] = inode->uniqueId();
313 bool is_ghost = !node.isOwn();
315 ghost_type = VtkUtils::PointGhostTypes::DUPLICATEPOINT;
316 nodes_ghost_type[index] = ghost_type;
318 Real3 pos = nodes_coordinates[inode];
319 points[index][0] = pos.x;
320 points[index][1] = pos.y;
321 points[index][2] = pos.z;
325 _writeDataSet1DCollective<Int64>(m_node_data_group,
"GlobalNodeId", nodes_uid);
328 _writeDataSet1DCollective<unsigned char>(m_node_data_group,
"vtkGhostType", nodes_ghost_type);
331 _writeDataSet2DCollective<Real>(top_group,
"Points", points);
335 _writeDataSet1DCollective<Int64>(m_cell_data_group,
"GlobalCellId", cells_uid);
339 _writeDataSet1DCollective<unsigned char>(m_cell_data_group,
"vtkGhostType", cells_ghost_type);
347 template <
typename DataType>
class HDFTraits;
349 template <>
class HDFTraits<Int64>
353 static hid_t hdfType() {
return H5T_NATIVE_INT64; }
356 template <>
class HDFTraits<
Int32>
360 static hid_t hdfType() {
return H5T_NATIVE_INT32; }
363 template <>
class HDFTraits<double>
367 static hid_t hdfType() {
return H5T_NATIVE_DOUBLE; }
370 template <>
class HDFTraits<unsigned char>
374 static hid_t hdfType() {
return H5T_NATIVE_UINT8; }
382template <
typename DataType>
void VtkHdfDataWriter::
383_writeDataSet1DCollectiveWithCollectiveIO(HGroup& group,
const String& name, Span<const DataType> values)
386 Int64 size = values.size();
387 Int32 nb_rank = pm->commSize();
388 Int32 my_rank = pm->commRank();
389 UniqueArray<Int64> all_sizes(nb_rank);
390 pm->allGather(ConstArrayView<Int64>(1, &size), all_sizes);
392 Int64 total_size = 0;
393 for (Integer i = 0; i < nb_rank; ++i)
394 total_size += all_sizes[i];
396 for (Integer i = 0; i < my_rank; ++i)
397 my_index += all_sizes[i];
404 offset[0] = my_index;
408 dims[0] = total_size;
410 filespace_id.createSimple(1, dims);
412 memspace_id.createSimple(1, count);
415 const hid_t hdf_type = HDFTraits<DataType>::hdfType();
417 dataset_id.create(group, name.localstr(), hdf_type, filespace_id, H5P_DEFAULT);
419 H5Sselect_hyperslab(filespace_id.id(), H5S_SELECT_SET, offset, NULL, count, NULL);
421 HProperty write_plist_id;
422 write_plist_id.createDatasetTransfertCollectiveMPIIO();
424 herr_t herr = dataset_id.write(hdf_type, values.data(), memspace_id, filespace_id, write_plist_id);
427 ARCANE_THROW(IOException,
"Can not write dataset '{0}'", name);
434template <
typename DataType>
void VtkHdfDataWriter::
435_writeDataSet2DCollectiveWithCollectiveIO(HGroup& group,
const String& name, Span2<const DataType> values)
438 Int64 dim1_size = values.dim1Size();
439 Int64 dim2_size = values.dim2Size();
440 Int32 nb_rank = pm->commSize();
441 Int32 my_rank = pm->commRank();
442 UniqueArray<Int64> all_sizes(nb_rank);
443 pm->allGather(ConstArrayView<Int64>(1, &dim1_size), all_sizes);
445 Int64 total_size = 0;
446 for (Integer i = 0; i < nb_rank; ++i)
447 total_size += all_sizes[i];
449 for (Integer i = 0; i < my_rank; ++i)
450 my_index += all_sizes[i];
457 offset[0] = my_index;
459 count[0] = dim1_size;
460 count[1] = dim2_size;
463 dims[0] = total_size;
466 filespace_id.createSimple(2, dims);
468 memspace_id.createSimple(2, count);
471 const hid_t hdf_type = HDFTraits<DataType>::hdfType();
473 dataset_id.create(group, name.localstr(), hdf_type, filespace_id, H5P_DEFAULT);
475 H5Sselect_hyperslab(filespace_id.id(), H5S_SELECT_SET, offset, NULL, count, NULL);
477 HProperty write_plist_id;
478 write_plist_id.createDatasetTransfertCollectiveMPIIO();
480 herr_t herr = dataset_id.write(hdf_type, values.data(), memspace_id, filespace_id, write_plist_id);
483 ARCANE_THROW(IOException,
"Can not write dataset '{0}'", name);
489template <
typename DataType>
void VtkHdfDataWriter::
490_writeDataSet1D(HGroup& group,
const String& name, Span<const DataType> values)
493 dims[0] = values.size();
495 hspace.createSimple(1, dims);
497 const hid_t hdf_type = HDFTraits<DataType>::hdfType();
498 dataset.create(group, name.localstr(), hdf_type, hspace, H5P_DEFAULT);
499 dataset.write(hdf_type, values.data());
501 ARCANE_THROW(IOException,
"Can not write dataset '{0}'", name);
507template <
typename DataType>
void VtkHdfDataWriter::
508_writeDataSet1DCollective(HGroup& group,
const String& name, Span<const DataType> values)
510 if (!m_is_parallel) {
511 _writeDataSet1D(group, name, values);
514 if (m_is_collective_io) {
515 _writeDataSet1DCollectiveWithCollectiveIO(group, name, values);
518 UniqueArray<DataType> all_values;
520 pm->gatherVariable(values.smallView(), all_values, pm->masterIORank());
522 _writeDataSet1D<DataType>(group, name, all_values);
528template <
typename DataType>
void VtkHdfDataWriter::
529_writeDataSet2D(HGroup& group,
const String& name, Span2<const DataType> values)
532 dims[0] = values.dim1Size();
533 dims[1] = values.dim2Size();
535 hspace.createSimple(2, dims);
537 const hid_t hdf_type = HDFTraits<DataType>::hdfType();
538 dataset.create(group, name.localstr(), hdf_type, hspace, H5P_DEFAULT);
539 dataset.write(hdf_type, values.data());
541 ARCANE_THROW(IOException,
"Can not write dataset '{0}'", name);
547template <
typename DataType>
void VtkHdfDataWriter::
548_writeDataSet2DCollective(HGroup& group,
const String& name, Span2<const DataType> values)
550 Int64 dim2_size = values.dim2Size();
552 if (!m_is_parallel) {
553 _writeDataSet2D(group, name, values);
557 if (m_is_collective_io) {
558 _writeDataSet2DCollectiveWithCollectiveIO(group, name, values);
562 UniqueArray<DataType> all_values;
564 Span<const DataType> values_1d(values.data(), values.totalNbElement());
565 pm->gatherVariable(values_1d.smallView(), all_values, pm->masterIORank());
566 if (m_is_master_io) {
567 Int64 dim1_size = all_values.size();
569 dim1_size = dim1_size / dim2_size;
570 Span2<const DataType> span2(all_values.data(), dim1_size, dim2_size);
571 _writeDataSet2D<DataType>(group, name, span2);
578void VtkHdfDataWriter::
579_addInt64ArrayAttribute(Hid& hid,
const char* name, Span<const Int64> values)
581 hsize_t
len = values.size();
582 hid_t aid = H5Screate_simple(1, &len, 0);
583 hid_t attr = H5Acreate2(hid.id(), name, H5T_NATIVE_INT64, aid, H5P_DEFAULT, H5P_DEFAULT);
586 int ret = H5Awrite(attr, H5T_NATIVE_INT64, values.data());
596void VtkHdfDataWriter::
597_addStringAttribute(Hid& hid,
const char* name,
const String& value)
599 hid_t aid = H5Screate(H5S_SCALAR);
600 hid_t attr_type = H5Tcopy(H5T_C_S1);
601 H5Tset_size(attr_type, value.length());
602 hid_t attr = H5Acreate2(hid.id(), name, attr_type, aid, H5P_DEFAULT, H5P_DEFAULT);
605 int ret = H5Awrite(attr, attr_type, value.localstr());
606 ret = H5Tclose(attr_type);
616void VtkHdfDataWriter::
619 m_cell_data_group.close();
620 m_node_data_group.close();
640 JSONWriter writer(JSONWriter::FormatFlags::None);
642 JSONWriter::Object o(writer);
643 writer.write(
"file-series-version",
"1.0");
644 writer.writeKey(
"files");
648 for (Real v : m_times) {
649 JSONWriter::Object o(writer);
650 String filename = _getFileNameForTimeIndex(file_index);
651 writer.write(
"name", filename);
652 writer.write(
"time", v);
658 Directory dir(m_directory_name);
659 String fname = dir.file(_getFileNameForTimeIndex(-1) +
".series");
660 std::ofstream ofile(fname.localstr());
661 StringView buf = writer.getBuffer();
662 ofile.write(
reinterpret_cast<const char*
>(buf.bytes().data()), buf.length());
680 info(4) <<
"Write VtkHdf var=" <<
var->name();
684 if (
var->dimension() != 1)
685 ARCANE_FATAL(
"Only export of scalar item variable is implemented (name={0})",
var->name());
690 group = &m_cell_data_group;
693 group = &m_node_data_group;
696 ARCANE_FATAL(
"Only export of 'Cell' or 'Node' variable is implemented (name={0})",
var->name());
712 _writeReal3Dataset(*group,
var, data);
715 _writeReal2Dataset(*group,
var, data);
718 warning() << String::format(
"Export for datatype '{0}' is not supported (var_name={1})",
data_type,
var->name());
725template <
typename DataType>
void VtkHdfDataWriter::
736void VtkHdfDataWriter::
737_writeReal3Dataset(HGroup& group, IVariable* var, IData* data)
739 auto* true_data =
dynamic_cast<IArrayDataT<Real3>*
>(data);
741 SmallSpan<const Real3> values(true_data->view());
742 Int32 nb_value = values.size();
744 UniqueArray2<Real> scalar_values;
745 scalar_values.resize(nb_value, 3);
746 for (
Int32 i = 0; i < nb_value; ++i) {
748 scalar_values[i][0] = v.x;
749 scalar_values[i][1] = v.y;
750 scalar_values[i][2] = v.z;
752 _writeDataSet2DCollective<Real>(group, var->name(), scalar_values);
758void VtkHdfDataWriter::
759_writeReal2Dataset(HGroup& group, IVariable* var, IData* data)
762 auto* true_data =
dynamic_cast<IArrayDataT<Real2>*
>(data);
764 SmallSpan<const Real2> values(true_data->view());
765 Int32 nb_value = values.size();
766 UniqueArray2<Real> scalar_values;
767 scalar_values.resize(nb_value, 3);
768 for (
Int32 i = 0; i < nb_value; ++i) {
770 scalar_values[i][0] = v.x;
771 scalar_values[i][1] = v.y;
772 scalar_values[i][2] = 0.0;
774 _writeDataSet2DCollective<Real>(group, var->name(), scalar_values);
786:
public ArcaneVtkHdfPostProcessorObject
791 : ArcaneVtkHdfPostProcessorObject(
sbi)
795 IDataWriter* dataWriter()
override {
return m_writer.get(); }
796 void notifyBeginWrite()
override
798 auto w = std::make_unique<VtkHdfDataWriter>(mesh(), groups());
799 w->setTimes(times());
801 w->setDirectoryName(
dir.file(
"vtkhdf"));
802 m_writer = std::move(
w);
804 void notifyEndWrite()
override
808 void close()
override {}
812 std::unique_ptr<IDataWriter> m_writer;
#define ARCANE_CHECK_POINTER(ptr)
Macro retournant le pointeur ptr s'il est non nul ou lancant une exception s'il est nul.
#define ARCANE_THROW(exception_class,...)
Macro pour envoyer une exception avec formattage.
#define ARCANE_FATAL(...)
Macro envoyant une exception FatalErrorException.
Classe gérant un répertoire.
Encapsule un hid_t pour un fichier.
Encapsule un hid_t pour un groupe.
static bool hasParallelHdf5()
Vrai HDF5 est compilé avec le support de MPI.
Interface d'écriture des données d'une variable.
virtual String name() const =0
Nom du maillage.
virtual NodeGroup allNodes()=0
Groupe de tous les noeuds.
virtual CellGroup allCells()=0
Groupe de toutes les mailles.
virtual VariableNodeReal3 & nodesCoordinates()=0
Coordonnées des noeuds.
virtual IParallelMng * parallelMng()=0
Gestionnaire de parallèlisme.
Structure contenant les informations pour créer un service.
void setMetaData(const String &meta_data) override
Positionne les infos des méta-données.
void write(IVariable *var, IData *data) override
Ecrit les données data de la variable var.
Post-traitement au format Ensight Hdf.
Integer size() const
Nombre d'éléments du vecteur.
Vue constante d'un tableau de type T.
Référence à une instance.
Constructeur de chaîne de caractère unicode.
Chaîne de caractères unicode.
ItemGroupT< Cell > CellGroup
Groupe de mailles.
ItemGroupT< Node > NodeGroup
Groupe de noeuds.
MeshVariableScalarRefT< Node, Real3 > VariableNodeReal3
Grandeur au noeud de type coordonnées.
Integer len(const char *s)
Retourne la longueur de la chaîne s.
-*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
eItemKind
Genre d'entité de maillage.
@ IK_Node
Entité de maillage de genre noeud.
@ IK_Cell
Entité de maillage de genre maille.
eDataType
Type d'une donnée.
@ DT_Int32
Donnée de type entier 32 bits.
@ DT_Real3
Donnée de type vecteur 3.
@ DT_Int64
Donnée de type entier 64 bits.
@ DT_Real2
Donnée de type vecteur 2.
@ DT_Real
Donnée de type réel.
Int32 Integer
Type représentant un entier.
unsigned char Byte
Type d'un octet.