From 0f613246658b37a029d3e3edde7b45a3d881cc48 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 14 Nov 2022 23:42:34 -0600 Subject: [PATCH 01/82] [pre-commit.ci] pre-commit autoupdate (#1331) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/hadialqattan/pycln: v2.1.1 → v2.1.2](https://github.com/hadialqattan/pycln/compare/v2.1.1...v2.1.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e02ef0b083..77b2ef249d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -71,7 +71,7 @@ repos: # Autoremoves unused Python imports - repo: https://github.com/hadialqattan/pycln - rev: v2.1.1 + rev: v2.1.2 hooks: - id: pycln name: pycln (python) From 4ba28cd70f9c88b0b1e8768cbf15df7a8cc85cba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Tue, 15 Nov 2022 06:43:11 +0100 Subject: [PATCH 02/82] Variant constructor is explicit in icpc apparently (#1327) $ /trinity/shared/pkg/compiler/intel/20.4/bin/icpc --version icpc (ICC) 19.1.3.304 20200925 Copyright (C) 1985-2020 Intel Corporation. All rights reserved. --- include/openPMD/backend/Attribute.hpp | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/include/openPMD/backend/Attribute.hpp b/include/openPMD/backend/Attribute.hpp index 13c267b19f..b2cee727ae 100644 --- a/include/openPMD/backend/Attribute.hpp +++ b/include/openPMD/backend/Attribute.hpp @@ -145,7 +145,7 @@ namespace detail (void)pv; if constexpr (std::is_convertible_v) { - return static_cast(*pv); + return {static_cast(*pv)}; } else if constexpr (auxiliary::IsVector_v && auxiliary::IsVector_v) { @@ -156,11 +156,12 @@ namespace detail U res{}; res.reserve(pv->size()); std::copy(pv->begin(), pv->end(), std::back_inserter(res)); - return res; + return {res}; } else { - return std::runtime_error("getCast: no vector cast possible."); + return { + std::runtime_error("getCast: no vector cast possible.")}; } } // conversion cast: array to vector @@ -175,12 +176,12 @@ namespace detail U res{}; res.reserve(pv->size()); std::copy(pv->begin(), pv->end(), std::back_inserter(res)); - return res; + return {res}; } else { - return std::runtime_error( - "getCast: no array to vector conversion possible."); + return {std::runtime_error( + "getCast: no array to vector conversion possible.")}; } } // conversion cast: vector to array @@ -204,12 +205,12 @@ namespace detail { res[i] = static_cast((*pv)[i]); } - return res; + return {res}; } else { - return std::runtime_error( - "getCast: no vector to array conversion possible."); + return {std::runtime_error( + "getCast: no vector to array conversion possible.")}; } } // conversion cast: turn a single value into a 1-element vector @@ -220,17 +221,17 @@ namespace detail U res{}; res.reserve(1); res.push_back(static_cast(*pv)); - return res; + return {res}; } else { - return std::runtime_error( - "getCast: no scalar to vector conversion possible."); + return {std::runtime_error( + "getCast: no scalar to vector conversion possible.")}; } } else { - return std::runtime_error("getCast: no cast possible."); + return {std::runtime_error("getCast: no cast possible.")}; } #if defined(__INTEL_COMPILER) /* From 36506551e46b062badfbfef0db827eeeae9983dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Tue, 15 Nov 2022 12:09:25 +0100 Subject: [PATCH 03/82] Add IterationIndex_t to replace hardcoded uint64_t to index iterations (#1285) * Replace uint64_t with IterationIndex_t Todo: 1) Replace Container with Iterations_t or sth 2) Search for instances of decltype(Series::iterations)::key_type * Replace instances of decltype(Series::iterations)::key_type * Introduce IterationsContainer_t --- include/openPMD/Iteration.hpp | 8 ++-- include/openPMD/Series.hpp | 30 ++++++++++---- include/openPMD/WriteIterations.hpp | 19 +++++---- .../openPMD/benchmark/mpi/MPIBenchmark.hpp | 29 ++++++------- .../benchmark/mpi/MPIBenchmarkReport.hpp | 10 ++--- src/Iteration.cpp | 8 ++-- src/Series.cpp | 41 ++++++++++--------- src/WriteIterations.cpp | 5 ++- src/binding/python/Container.cpp | 3 +- src/binding/python/Series.cpp | 4 +- 10 files changed, 86 insertions(+), 71 deletions(-) diff --git a/include/openPMD/Iteration.hpp b/include/openPMD/Iteration.hpp index 2d14313cfa..2393b93322 100644 --- a/include/openPMD/Iteration.hpp +++ b/include/openPMD/Iteration.hpp @@ -134,6 +134,8 @@ class Iteration : public Attributable Iteration(Iteration const &) = default; Iteration &operator=(Iteration const &) = default; + using IterationIndex_t = uint64_t; + /** * @tparam T Floating point type of user-selected precision (e.g. float, * double). @@ -255,9 +257,9 @@ class Iteration : public Attributable } void flushFileBased( - std::string const &, uint64_t, internal::FlushParams const &); - void flushGroupBased(uint64_t, internal::FlushParams const &); - void flushVariableBased(uint64_t, internal::FlushParams const &); + std::string const &, IterationIndex_t, internal::FlushParams const &); + void flushGroupBased(IterationIndex_t, internal::FlushParams const &); + void flushVariableBased(IterationIndex_t, internal::FlushParams const &); void flush(internal::FlushParams const &); void deferParseAccess(internal::DeferredParseAccess); /* diff --git a/include/openPMD/Series.hpp b/include/openPMD/Series.hpp index 362001605e..eb79199b38 100644 --- a/include/openPMD/Series.hpp +++ b/include/openPMD/Series.hpp @@ -37,7 +37,7 @@ #include #endif -#include +#include // uint64_t #include #include #include @@ -76,7 +76,9 @@ namespace internal SeriesData &operator=(SeriesData const &) = delete; SeriesData &operator=(SeriesData &&) = delete; - Container iterations{}; + using IterationIndex_t = Iteration::IterationIndex_t; + using IterationsContainer_t = Container; + IterationsContainer_t iterations{}; /** * For each instance of Series, there is only one instance @@ -90,7 +92,7 @@ namespace internal * currently active output step. Use this later when writing the * snapshot attribute. */ - std::set m_currentlyActiveIterations; + std::set m_currentlyActiveIterations; /** * Needed if reading a single iteration of a file-based series. * Users may specify the concrete filename of one iteration instead of @@ -210,7 +212,15 @@ class Series : public Attributable virtual ~Series() = default; - Container iterations; + /** + * An unsigned integer type, used to identify Iterations in a Series. + */ + using IterationIndex_t = Iteration::IterationIndex_t; + /** + * Type for a container of Iterations indexed by IterationIndex_t. + */ + using IterationsContainer_t = internal::SeriesData::IterationsContainer_t; + IterationsContainer_t iterations; /** * @brief Is this a usable Series object? @@ -588,9 +598,10 @@ OPENPMD_private * If series.iterations contains the attribute `snapshot`, returns its * value. */ - std::optional > readGorVBased(bool init = true); + std::optional > + readGorVBased(bool init = true); void readBase(); - std::string iterationFilename(uint64_t i); + std::string iterationFilename(IterationIndex_t i); enum class IterationOpened : bool { @@ -603,14 +614,15 @@ OPENPMD_private * Only open if the iteration is dirty and if it is not in deferred * parse state. */ - IterationOpened openIterationIfDirty(uint64_t index, Iteration iteration); + IterationOpened + openIterationIfDirty(IterationIndex_t index, Iteration iteration); /* * Open an iteration. Ensures that the iteration's m_closed status * is set properly and that any files pertaining to the iteration * is opened. * Does not create files when called in CREATE mode. */ - void openIteration(uint64_t index, Iteration iteration); + void openIteration(IterationIndex_t index, Iteration iteration); /** * Find the given iteration in Series::iterations and return an iterator @@ -653,7 +665,7 @@ OPENPMD_private * Returns the current content of the /data/snapshot attribute. * (We could also add this to the public API some time) */ - std::optional > currentSnapshot() const; + std::optional > currentSnapshot() const; }; // Series } // namespace openPMD diff --git a/include/openPMD/WriteIterations.hpp b/include/openPMD/WriteIterations.hpp index 414dd1c9e4..134abe0519 100644 --- a/include/openPMD/WriteIterations.hpp +++ b/include/openPMD/WriteIterations.hpp @@ -49,25 +49,26 @@ class WriteIterations friend class Series; private: - using iterations_t = Container; + using IterationsContainer_t = + Container; public: - using key_type = typename iterations_t::key_type; - using mapped_type = typename iterations_t::mapped_type; - using value_type = typename iterations_t::value_type; - using reference = typename iterations_t::reference; + using key_type = IterationsContainer_t::key_type; + using mapped_type = IterationsContainer_t::mapped_type; + using value_type = IterationsContainer_t::value_type; + using reference = IterationsContainer_t::reference; private: struct SharedResources { - iterations_t iterations; - std::optional currentlyOpen; + IterationsContainer_t iterations; + std::optional currentlyOpen; - SharedResources(iterations_t); + SharedResources(IterationsContainer_t); ~SharedResources(); }; - WriteIterations(iterations_t); + WriteIterations(IterationsContainer_t); explicit WriteIterations() = default; //! Index of the last opened iteration std::shared_ptr shared; diff --git a/include/openPMD/benchmark/mpi/MPIBenchmark.hpp b/include/openPMD/benchmark/mpi/MPIBenchmark.hpp index 14a260496e..3d6d78c7e4 100644 --- a/include/openPMD/benchmark/mpi/MPIBenchmark.hpp +++ b/include/openPMD/benchmark/mpi/MPIBenchmark.hpp @@ -105,7 +105,7 @@ class MPIBenchmark std::string jsonConfig, std::string backend, Datatype dt, - typename decltype(Series::iterations)::key_type iterations, + Series::IterationIndex_t iterations, int threadSize); /** @@ -123,7 +123,7 @@ class MPIBenchmark std::string jsonConfig, std::string backend, Datatype dt, - typename decltype(Series::iterations)::key_type iterations); + Series::IterationIndex_t iterations); void resetConfigurations(); @@ -146,7 +146,7 @@ class MPIBenchmark std::string, int, Datatype, - typename decltype(Series::iterations)::key_type>> + Series::IterationIndex_t>> m_configurations; enum Config @@ -194,7 +194,7 @@ class MPIBenchmark Extent &extent, std::string const &extension, std::shared_ptr> datasetFiller, - typename decltype(Series::iterations)::key_type iterations); + Series::IterationIndex_t iterations); /** * Execute a single read benchmark. @@ -210,7 +210,7 @@ class MPIBenchmark Offset &offset, Extent &extent, std::string extension, - typename decltype(Series::iterations)::key_type iterations); + Series::IterationIndex_t iterations); template static void call( @@ -278,7 +278,7 @@ void MPIBenchmark::addConfiguration( std::string jsonConfig, std::string backend, Datatype dt, - typename decltype(Series::iterations)::key_type iterations, + Series::IterationIndex_t iterations, int threadSize) { this->m_configurations.emplace_back( @@ -290,7 +290,7 @@ void MPIBenchmark::addConfiguration( std::string jsonConfig, std::string backend, Datatype dt, - typename decltype(Series::iterations)::key_type iterations) + Series::IterationIndex_t iterations) { int size; MPI_Comm_size(communicator, &size); @@ -313,7 +313,7 @@ MPIBenchmark::BenchmarkExecution::writeBenchmark( Extent &extent, std::string const &extension, std::shared_ptr> datasetFiller, - typename decltype(Series::iterations)::key_type iterations) + Series::IterationIndex_t iterations) { MPI_Barrier(m_benchmark->communicator); auto start = Clock::now(); @@ -325,8 +325,7 @@ MPIBenchmark::BenchmarkExecution::writeBenchmark( m_benchmark->communicator, jsonConfig); - for (typename decltype(Series::iterations)::key_type i = 0; i < iterations; - i++) + for (Series::IterationIndex_t i = 0; i < iterations; i++) { auto writeData = datasetFiller->produceData(); @@ -348,8 +347,7 @@ MPIBenchmark::BenchmarkExecution::writeBenchmark( auto end = Clock::now(); // deduct the time needed for data generation - for (typename decltype(Series::iterations)::key_type i = 0; i < iterations; - i++) + for (Series::IterationIndex_t i = 0; i < iterations; i++) { datasetFiller->produceData(); } @@ -366,7 +364,7 @@ MPIBenchmark::BenchmarkExecution::readBenchmark( Offset &offset, Extent &extent, std::string extension, - typename decltype(Series::iterations)::key_type iterations) + Series::IterationIndex_t iterations) { MPI_Barrier(m_benchmark->communicator); // let every thread measure time @@ -377,8 +375,7 @@ MPIBenchmark::BenchmarkExecution::readBenchmark( Access::READ_ONLY, m_benchmark->communicator); - for (typename decltype(Series::iterations)::key_type i = 0; i < iterations; - i++) + for (Series::IterationIndex_t i = 0; i < iterations; i++) { MeshRecordComponent id = series.iterations[i].meshes["id"][MeshRecordComponent::SCALAR]; @@ -409,7 +406,7 @@ void MPIBenchmark::BenchmarkExecution::call( std::string backend; int size; Datatype dt2; - typename decltype(Series::iterations)::key_type iterations; + Series::IterationIndex_t iterations; std::tie(jsonConfig, backend, size, dt2, iterations) = config; if (dt != dt2) diff --git a/include/openPMD/benchmark/mpi/MPIBenchmarkReport.hpp b/include/openPMD/benchmark/mpi/MPIBenchmarkReport.hpp index aab21e6ea7..7ad18714f9 100644 --- a/include/openPMD/benchmark/mpi/MPIBenchmarkReport.hpp +++ b/include/openPMD/benchmark/mpi/MPIBenchmarkReport.hpp @@ -57,7 +57,7 @@ struct MPIBenchmarkReport std::string, // extension int, // thread size Datatype, - typename decltype(Series::iterations)::key_type>, + Series::IterationIndex_t>, std::pair > durations; @@ -89,7 +89,7 @@ struct MPIBenchmarkReport std::string extension, int threadSize, Datatype dt, - typename decltype(Series::iterations)::key_type iterations, + Series::IterationIndex_t iterations, std::pair const &report); /** Retrieve the time measured for a certain compression strategy. @@ -108,7 +108,7 @@ struct MPIBenchmarkReport std::string extension, int threadSize, Datatype dt, - typename decltype(Series::iterations)::key_type iterations); + Series::IterationIndex_t iterations); private: template @@ -189,7 +189,7 @@ void MPIBenchmarkReport::addReport( std::string extension, int threadSize, Datatype dt, - typename decltype(Series::iterations)::key_type iterations, + Series::IterationIndex_t iterations, std::pair const &report) { using rep = typename Duration::rep; @@ -257,7 +257,7 @@ std::pair MPIBenchmarkReport::getReport( std::string extension, int threadSize, Datatype dt, - typename decltype(Series::iterations)::key_type iterations) + Series::IterationIndex_t iterations) { auto it = this->durations.find(std::make_tuple( rank, jsonConfig, extension, threadSize, dt, iterations)); diff --git a/src/Iteration.cpp b/src/Iteration.cpp index 0a7de3e6bb..179bf0f97d 100644 --- a/src/Iteration.cpp +++ b/src/Iteration.cpp @@ -79,7 +79,7 @@ Iteration &Iteration::setTimeUnitSI(double newTimeUnitSI) return *this; } -using iterator_t = Container::iterator; +using iterator_t = Container::iterator; Iteration &Iteration::close(bool _flush) { @@ -194,7 +194,7 @@ bool Iteration::closedByWriter() const void Iteration::flushFileBased( std::string const &filename, - uint64_t i, + IterationIndex_t i, internal::FlushParams const &flushParams) { /* Find the root point [Series] of this file, @@ -251,7 +251,7 @@ void Iteration::flushFileBased( } void Iteration::flushGroupBased( - uint64_t i, internal::FlushParams const &flushParams) + IterationIndex_t i, internal::FlushParams const &flushParams) { if (!written()) { @@ -274,7 +274,7 @@ void Iteration::flushGroupBased( } void Iteration::flushVariableBased( - uint64_t i, internal::FlushParams const &flushParams) + IterationIndex_t i, internal::FlushParams const &flushParams) { if (!written()) { diff --git a/src/Series.cpp b/src/Series.cpp index 478aea5d1c..6439f67e29 100644 --- a/src/Series.cpp +++ b/src/Series.cpp @@ -71,12 +71,13 @@ namespace { bool isContained{}; //! pattern match successful int padding{}; //! number of zeros used for padding of iteration - uint64_t iteration{}; //! iteration found in regex pattern (default: 0) + Series::IterationIndex_t + iteration{}; //! iteration found in regex pattern (default: 0) // support for std::tie - operator std::tuple() + operator std::tuple() { - return std::tuple{ + return std::tuple{ isContained, padding, iteration}; } }; @@ -164,7 +165,7 @@ Series &Series::setMeshesPath(std::string const &mp) if (std::any_of( series.iterations.begin(), series.iterations.end(), - [](Container::value_type const &i) { + [](Container::value_type const &i) { return i.second.meshes.written(); })) throw std::runtime_error( @@ -190,7 +191,7 @@ Series &Series::setParticlesPath(std::string const &pp) if (std::any_of( series.iterations.begin(), series.iterations.end(), - [](Container::value_type const &i) { + [](Container::value_type const &i) { return i.second.particles.written(); })) throw std::runtime_error( @@ -500,7 +501,7 @@ namespace { bool isContained; int padding; - uint64_t iterationIndex; + Series::IterationIndex_t iterationIndex; std::set paddings; if (auxiliary::directory_exists(directory)) { @@ -531,7 +532,7 @@ namespace return autoDetectPadding( std::move(isPartOfSeries), directory, - [](uint64_t index, std::string const &filename) { + [](Series::IterationIndex_t index, std::string const &filename) { (void)index; (void)filename; }); @@ -999,7 +1000,7 @@ void Series::readFileBased() std::move(isPartOfSeries), IOHandler()->directory, // foreach found file with `filename` and `index`: - [&series](uint64_t index, std::string const &filename) { + [&series](IterationIndex_t index, std::string const &filename) { Iteration &i = series.iterations[index]; i.deferParseAccess( {std::to_string(index), @@ -1139,7 +1140,8 @@ void Series::readOneIterationFileBased(std::string const &filePath) series.iterations.readAttributes(ReadMode::OverrideExisting); } -std::optional> Series::readGorVBased(bool do_init) +auto Series::readGorVBased(bool do_init) + -> std::optional> { auto &series = get(); Parameter fOpen; @@ -1240,7 +1242,7 @@ std::optional> Series::readGorVBased(bool do_init) IOHandler()->flush(internal::defaultFlushParams); auto readSingleIteration = [&series, &pOpen, this, withRWAccess]( - uint64_t index, + IterationIndex_t index, std::string path, bool guardAgainstRereading, bool beginStep) { @@ -1292,7 +1294,7 @@ std::optional> Series::readGorVBased(bool do_init) case IterationEncoding::fileBased: { for (auto const &it : *pList.paths) { - uint64_t index = std::stoull(it); + IterationIndex_t index = std::stoull(it); /* * For now: parse a Series in RandomAccess mode. * (beginStep = false) @@ -1304,15 +1306,15 @@ std::optional> Series::readGorVBased(bool do_init) if (currentSteps.has_value()) { auto const &vec = currentSteps.value(); - return std::deque{vec.begin(), vec.end()}; + return std::deque{vec.begin(), vec.end()}; } else { - return std::optional>(); + return std::optional>(); } } case IterationEncoding::variableBased: { - std::deque res = {0}; + std::deque res = {0}; if (currentSteps.has_value() && !currentSteps.value().empty()) { res = {currentSteps.value().begin(), currentSteps.value().end()}; @@ -1418,7 +1420,7 @@ void Series::readBase() } } -std::string Series::iterationFilename(uint64_t i) +std::string Series::iterationFilename(IterationIndex_t i) { /* * The filename might have been overridden at the Series level or at the @@ -1675,7 +1677,7 @@ void Series::flushStep(bool doFlush) } } -auto Series::openIterationIfDirty(uint64_t index, Iteration iteration) +auto Series::openIterationIfDirty(IterationIndex_t index, Iteration iteration) -> IterationOpened { /* @@ -1737,7 +1739,7 @@ auto Series::openIterationIfDirty(uint64_t index, Iteration iteration) return IterationOpened::RemainsClosed; } -void Series::openIteration(uint64_t index, Iteration iteration) +void Series::openIteration(IterationIndex_t index, Iteration iteration) { auto oldStatus = iteration.get().m_closed; switch (oldStatus) @@ -2045,9 +2047,10 @@ WriteIterations Series::writeIterations() return series.m_writeIterations.value(); } -std::optional> Series::currentSnapshot() const +auto Series::currentSnapshot() const + -> std::optional> { - using vec_t = std::vector; + using vec_t = std::vector; auto &series = get(); /* * In variable-based iteration encoding, iterations have no distinct diff --git a/src/WriteIterations.cpp b/src/WriteIterations.cpp index 597298b80e..872342dfbe 100644 --- a/src/WriteIterations.cpp +++ b/src/WriteIterations.cpp @@ -25,7 +25,8 @@ namespace openPMD { -WriteIterations::SharedResources::SharedResources(iterations_t _iterations) +WriteIterations::SharedResources::SharedResources( + IterationsContainer_t _iterations) : iterations(std::move(_iterations)) {} @@ -43,7 +44,7 @@ WriteIterations::SharedResources::~SharedResources() } } -WriteIterations::WriteIterations(iterations_t iterations) +WriteIterations::WriteIterations(IterationsContainer_t iterations) : shared{std::make_shared(std::move(iterations))} {} diff --git a/src/binding/python/Container.cpp b/src/binding/python/Container.cpp index 357cb0bba2..137260d9f1 100644 --- a/src/binding/python/Container.cpp +++ b/src/binding/python/Container.cpp @@ -33,6 +33,7 @@ #include "openPMD/ParticlePatches.hpp" #include "openPMD/ParticleSpecies.hpp" #include "openPMD/Record.hpp" +#include "openPMD/Series.hpp" #include "openPMD/backend/BaseRecord.hpp" #include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/backend/Container.hpp" @@ -135,7 +136,7 @@ bind_container(py::handle scope, std::string const &name, Args &&...args) } } // namespace detail -using PyIterationContainer = Container; +using PyIterationContainer = Series::IterationsContainer_t; using PyMeshContainer = Container; using PyPartContainer = Container; using PyPatchContainer = Container; diff --git a/src/binding/python/Series.cpp b/src/binding/python/Series.cpp index 1c8ddd3df7..f403eedaf6 100644 --- a/src/binding/python/Series.cpp +++ b/src/binding/python/Series.cpp @@ -54,12 +54,10 @@ using openPMD_PyMPIIntracommObject = openPMD_PyMPICommObject; void init_Series(py::module &m) { - - using iterations_key_t = decltype(Series::iterations)::key_type; py::class_(m, "WriteIterations") .def( "__getitem__", - [](WriteIterations writeIterations, iterations_key_t key) { + [](WriteIterations writeIterations, Series::IterationIndex_t key) { return writeIterations[key]; }, // keep container alive while iterator exists From bbf4bff395b420cdd15032758c20a9e4d86a901d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Nov 2022 01:03:49 +0000 Subject: [PATCH 04/82] [pre-commit.ci] pre-commit autoupdate (#1334) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/mirrors-clang-format: v14.0.6 → v15.0.4](https://github.com/pre-commit/mirrors-clang-format/compare/v14.0.6...v15.0.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 77b2ef249d..bd09b4bf07 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,7 +18,7 @@ exclude: '^share/openPMD/thirdParty' # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v4.4.0 hooks: - id: trailing-whitespace args: [--markdown-linebreak-ext=md] @@ -65,7 +65,7 @@ repos: # clang-format v13 # to run manually, use .github/workflows/clang-format/clang-format.sh - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v14.0.6 + rev: v15.0.4 hooks: - id: clang-format From 70941a21960a99479d097239a1aa1cef545bf062 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 7 Dec 2022 02:42:28 -0800 Subject: [PATCH 05/82] Catch2 v2.13.10 (#1344) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update to the latest Catch2 release. Mostly small MSVC bug fixes. Co-authored-by: Martin Hořeňovský Co-authored-by: Martin Hořeňovský --- CHANGELOG.rst | 2 +- CMakeLists.txt | 4 +-- NEWS.rst | 2 +- README.md | 14 +++++----- docs/source/dev/buildoptions.rst | 2 +- docs/source/dev/dependencies.rst | 2 +- .../catch2/include/catch2/catch.hpp | 28 +++++++++++-------- 7 files changed, 30 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 57b7c3a90c..70cff2439f 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -26,7 +26,7 @@ Bug Fixes Other """"" -- Catch2: updated to 2.13.9 #1299 +- Catch2: updated to 2.13.10 #1299 #... 0.14.3 diff --git a/CMakeLists.txt b/CMakeLists.txt index d5d30fa074..06c046a0bf 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -532,9 +532,9 @@ if(openPMD_BUILD_TESTING) target_include_directories(openPMD::thirdparty::Catch2 SYSTEM INTERFACE $ ) - message(STATUS "Catch2: Using INTERNAL version '2.13.9'") + message(STATUS "Catch2: Using INTERNAL version '2.13.10'") else() - find_package(Catch2 2.13.9 REQUIRED CONFIG) + find_package(Catch2 2.13.10 REQUIRED CONFIG) target_link_libraries(openPMD::thirdparty::Catch2 INTERFACE Catch2::Catch2) message(STATUS "Catch2: Found version '${Catch2_VERSION}'") diff --git a/NEWS.rst b/NEWS.rst index 7eb4dafaef..707d1cb712 100644 --- a/NEWS.rst +++ b/NEWS.rst @@ -12,7 +12,7 @@ Building openPMD-api now requires a compiler that supports C++17 or newer. Python 3.10 & 3.11 are now supported, Python 3.6 is removed. openPMD-api now depends on `toml11 `__ 3.7.1+. pybind11 2.10.1 is now the minimally supported version for Python support. -Catch2 2.13.9 is now the minimally supported version for tests. +Catch2 2.13.10 is now the minimally supported version for tests. The following backend-specific members of the ``Dataset`` class have been removed: ``Dataset::setChunkSize()``, ``Dataset::setCompression()``, ``Dataset::setCustomTransform()``, ``Dataset::chunkSize``, ``Dataset::compression``, ``Dataset::transform``. They are replaced by backend-specific options in the JSON-based backend configuration. diff --git a/README.md b/README.md index 968e90b65c..520486d6a0 100644 --- a/README.md +++ b/README.md @@ -103,7 +103,7 @@ Required: * C++17 capable compiler, e.g., g++ 7+, clang 7+, MSVC 19.15+, icpc 19+, icpx Shipped internally in `share/openPMD/thirdParty/`: -* [Catch2](https://github.com/catchorg/Catch2) 2.13.9+ ([BSL-1.0](https://github.com/catchorg/Catch2/blob/master/LICENSE.txt)) +* [Catch2](https://github.com/catchorg/Catch2) 2.13.10+ ([BSL-1.0](https://github.com/catchorg/Catch2/blob/master/LICENSE.txt)) * [pybind11](https://github.com/pybind/pybind11) 2.10.1+ ([new BSD](https://github.com/pybind/pybind11/blob/master/LICENSE)) * [NLohmann-JSON](https://github.com/nlohmann/json) 3.9.1+ ([MIT](https://github.com/nlohmann/json/blob/develop/LICENSE.MIT)) * [toml11](https://github.com/ToruNiina/toml11) 3.7.1+ ([MIT](https://github.com/ToruNiina/toml11/blob/master/LICENSE)) @@ -269,12 +269,12 @@ CMake controls options with prefixed `-D`, e.g. `-DopenPMD_USE_MPI=OFF`: Additionally, the following libraries are shipped internally. The following options allow to switch to external installs: -| CMake Option | Values | Library | Version | -|---------------------------------|------------|---------------|---------| -| `openPMD_USE_INTERNAL_CATCH` | **ON**/OFF | Catch2 | 2.13.9+ | -| `openPMD_USE_INTERNAL_PYBIND11` | **ON**/OFF | pybind11 | 2.10.1+ | -| `openPMD_USE_INTERNAL_JSON` | **ON**/OFF | NLohmann-JSON | 3.9.1+ | -| `openPMD_USE_INTERNAL_TOML11` | **ON**/OFF | toml11 | 3.7.1+ | +| CMake Option | Values | Library | Version | +|---------------------------------|------------|---------------|----------| +| `openPMD_USE_INTERNAL_CATCH` | **ON**/OFF | Catch2 | 2.13.10+ | +| `openPMD_USE_INTERNAL_PYBIND11` | **ON**/OFF | pybind11 | 2.10.1+ | +| `openPMD_USE_INTERNAL_JSON` | **ON**/OFF | NLohmann-JSON | 3.9.1+ | +| `openPMD_USE_INTERNAL_TOML11` | **ON**/OFF | toml11 | 3.7.1+ | By default, this will build as a shared library (`libopenPMD.[so|dylib|dll]`) and installs also its headers. In order to build a static library, append `-DBUILD_SHARED_LIBS=OFF` to the `cmake` command. diff --git a/docs/source/dev/buildoptions.rst b/docs/source/dev/buildoptions.rst index e63c1c8e9d..ccd0614966 100644 --- a/docs/source/dev/buildoptions.rst +++ b/docs/source/dev/buildoptions.rst @@ -68,7 +68,7 @@ The following options allow to switch to external installs of dependencies: ================================= =========== ======== ============= ======== CMake Option Values Installs Library Version ================================= =========== ======== ============= ======== -``openPMD_USE_INTERNAL_CATCH`` **ON**/OFF No Catch2 2.13.9+ +``openPMD_USE_INTERNAL_CATCH`` **ON**/OFF No Catch2 2.13.10+ ``openPMD_USE_INTERNAL_PYBIND11`` **ON**/OFF No pybind11 2.10.1+ ``openPMD_USE_INTERNAL_JSON`` **ON**/OFF No NLohmann-JSON 3.9.1+ ``openPMD_USE_INTERNAL_TOML11`` **ON**/OFF No toml11 3.7.1+ diff --git a/docs/source/dev/dependencies.rst b/docs/source/dev/dependencies.rst index 823e8f9949..df2a835f98 100644 --- a/docs/source/dev/dependencies.rst +++ b/docs/source/dev/dependencies.rst @@ -17,7 +17,7 @@ Shipped internally The following libraries are shipped internally in ``share/openPMD/thirdParty/`` for convenience: -* `Catch2 `_ 2.13.9+ (`BSL-1.0 `__) +* `Catch2 `_ 2.13.10+ (`BSL-1.0 `__) * `pybind11 `_ 2.10.1+ (`new BSD `_) * `NLohmann-JSON `_ 3.9.1+ (`MIT `_) * `toml11 `_ 3.7.1+ (`MIT `__) diff --git a/share/openPMD/thirdParty/catch2/include/catch2/catch.hpp b/share/openPMD/thirdParty/catch2/include/catch2/catch.hpp index d2a12427b2..9b309bddc6 100644 --- a/share/openPMD/thirdParty/catch2/include/catch2/catch.hpp +++ b/share/openPMD/thirdParty/catch2/include/catch2/catch.hpp @@ -1,6 +1,6 @@ /* - * Catch v2.13.9 - * Generated: 2022-04-12 22:37:23.260201 + * Catch v2.13.10 + * Generated: 2022-10-16 11:01:23.452308 * ---------------------------------------------------------- * This file has been merged from multiple headers. Please don't edit it directly * Copyright (c) 2022 Two Blue Cubes Ltd. All rights reserved. @@ -15,7 +15,7 @@ #define CATCH_VERSION_MAJOR 2 #define CATCH_VERSION_MINOR 13 -#define CATCH_VERSION_PATCH 9 +#define CATCH_VERSION_PATCH 10 #ifdef __clang__ # pragma clang system_header @@ -7395,8 +7395,6 @@ namespace Catch { template struct ObjectStorage { - using TStorage = typename std::aligned_storage::value>::type; - ObjectStorage() : data() {} ObjectStorage(const ObjectStorage& other) @@ -7439,7 +7437,7 @@ namespace Catch { return *static_cast(static_cast(&data)); } - TStorage data; + struct { alignas(T) unsigned char data[sizeof(T)]; } data; }; } @@ -7949,7 +7947,7 @@ namespace Catch { #if defined(__i386__) || defined(__x86_64__) #define CATCH_TRAP() __asm__("int $3\n" : : ) /* NOLINT */ #elif defined(__aarch64__) - #define CATCH_TRAP() __asm__(".inst 0xd4200000") + #define CATCH_TRAP() __asm__(".inst 0xd43e0000") #endif #elif defined(CATCH_PLATFORM_IPHONE) @@ -13558,7 +13556,7 @@ namespace Catch { // Handle list request if( Option listed = list( m_config ) ) - return static_cast( *listed ); + return (std::min) (MaxExitCode, static_cast(*listed)); TestGroup tests { m_config }; auto const totals = tests.execute(); @@ -15391,7 +15389,7 @@ namespace Catch { } Version const& libraryVersion() { - static Version version( 2, 13, 9, "", 0 ); + static Version version( 2, 13, 10, "", 0 ); return version; } @@ -17526,12 +17524,20 @@ namespace Catch { #ifndef __OBJC__ +#ifndef CATCH_INTERNAL_CDECL +#ifdef _MSC_VER +#define CATCH_INTERNAL_CDECL __cdecl +#else +#define CATCH_INTERNAL_CDECL +#endif +#endif + #if defined(CATCH_CONFIG_WCHAR) && defined(CATCH_PLATFORM_WINDOWS) && defined(_UNICODE) && !defined(DO_NOT_USE_WMAIN) // Standard C/C++ Win32 Unicode wmain entry point -extern "C" int wmain (int argc, wchar_t * argv[], wchar_t * []) { +extern "C" int CATCH_INTERNAL_CDECL wmain (int argc, wchar_t * argv[], wchar_t * []) { #else // Standard C/C++ main entry point -int main (int argc, char * argv[]) { +int CATCH_INTERNAL_CDECL main (int argc, char * argv[]) { #endif return Catch::Session().run( argc, argv ); From a23154ab20f330487ddac0e9a1a42a3d8c98f8ae Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 7 Dec 2022 11:43:03 +0100 Subject: [PATCH 06/82] [pre-commit.ci] pre-commit autoupdate (#1343) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/mgedmin/check-manifest: 0.48 → 0.49](https://github.com/mgedmin/check-manifest/compare/0.48...0.49) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bd09b4bf07..a11d29e3b0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -109,7 +109,7 @@ repos: # Checks the manifest for missing files (native support) - repo: https://github.com/mgedmin/check-manifest - rev: "0.48" + rev: "0.49" hooks: - id: check-manifest # This is a slow hook, so only run this if --hook-stage manual is passed From f6fc4194ce4584ed8dc26920c9bbd46f747d1e6f Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 9 Dec 2022 09:49:16 -0800 Subject: [PATCH 07/82] CI: Style w/ Ubuntu 22.04 (#1346) Update the base image for CI. --- .github/workflows/source.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/source.yml b/.github/workflows/source.yml index bad03d9dec..2e7f0a9b9a 100644 --- a/.github/workflows/source.yml +++ b/.github/workflows/source.yml @@ -8,7 +8,7 @@ concurrency: jobs: style: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v2 - name: Non-ASCII Characters @@ -23,7 +23,7 @@ jobs: python3 -m flake8 --exclude=thirdParty . static-analysis: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 if: github.event.pull_request.draft == false steps: - uses: actions/checkout@v2 @@ -33,7 +33,7 @@ jobs: python3 -m pyflakes docs/ examples/ test/ setup.py documentation: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 steps: - uses: actions/checkout@v2 - uses: s-weigand/setup-conda@v1.1.1 From 268843b1fb471a6452e96bce8dc655413c126bfe Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 9 Dec 2022 11:22:27 -0800 Subject: [PATCH 08/82] Docs: Latest Sphinx, Docutils, RTD (#1341) Update to the latest and now compatible versions. --- .readthedocs.yml | 6 +++--- docs/requirements.txt | 10 ++++------ docs/source/index.rst | 14 +++++++------- 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index c98f336f1d..54a296c29d 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -5,9 +5,9 @@ python: - requirements: docs/requirements.txt formats: - - htmlzip - - pdf - - epub + - htmlzip + - pdf + - epub build: apt_packages: diff --git a/docs/requirements.txt b/docs/requirements.txt index 8fa51acf75..05ac0aa486 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -1,14 +1,12 @@ -breathe>=4.5,<4.15 -# docutils 0.17 breaks HTML tags & RTD theme -# https://github.com/sphinx-doc/sphinx/issues/9001 -docutils<=0.16 +breathe +docutils>=0.17.1 # generate plots matplotlib numpy>=1.15 pygments recommonmark scipy -sphinx<4.0 -sphinx_rtd_theme>=0.3.1 +sphinx>=5.3 +sphinx_rtd_theme>=1.1.1 sphinxcontrib-svg2pdfconverter sphinxcontrib.programoutput diff --git a/docs/source/index.rst b/docs/source/index.rst index cb315c52e5..1abdadebaf 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -16,13 +16,13 @@ Writing & reading through those backends and their associated files is supported /* front page: hide chapter titles * needed for consistent HTML-PDF-EPUB chapters */ - div#installation.section, - div#usage.section, - div#api-details.section, - div#utilities.section, - div#backends.section, - div#development.section, - div#maintenance.section { + section#installation, + section#usage, + section#api-details, + section#utilities, + section#backends, + section#development, + section#maintenance { display:none; } From b2edc4fb0c5cd4134cfcdae9f3b20c8ec181c70f Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 9 Dec 2022 11:31:24 -0800 Subject: [PATCH 09/82] CMake: Fix Build & Install Option Names (#1326) * CMake: Fix Build & Install Option Names We should not overwrite `CMAKE_` variables but instead set our own. This corrects this. This also fixes that the cmake install path was unset in superbuilds failing the configure steps unless set by the calling project did set `CMAKE_INSTALL_CMAKEDIR`: ``` CMake Error at openpmd-src/CMakeLists.txt:1080 (install): install EXPORT given no DESTINATION! CMake Error at openpmd-src/CMakeLists.txt:1085 (install): install FILES given no DESTINATION! ``` * Win: LIBDIR changed from `Lib` to `lib` * Examples & CLI: Also Set Lib Output --- CMakeLists.txt | 264 ++++++++++++++++++++++++----------- cmake/openPMDFunctions.cmake | 12 +- 2 files changed, 191 insertions(+), 85 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 06c046a0bf..c52373361e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -43,22 +43,88 @@ endif() # Project structure ########################################################### # # temporary build directories -if(NOT CMAKE_ARCHIVE_OUTPUT_DIRECTORY) - set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib") +if(NOT openPMD_ARCHIVE_OUTPUT_DIRECTORY) + if(CMAKE_ARCHIVE_OUTPUT_DIRECTORY) + set(openPMD_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}) + else() + set(openPMD_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib") + endif() +endif() +if(NOT openPMD_LIBRARY_OUTPUT_DIRECTORY) + if(CMAKE_LIBRARY_OUTPUT_DIRECTORY) + set(openPMD_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}) + else() + set(openPMD_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib") + endif() +endif() +if(NOT openPMD_RUNTIME_OUTPUT_DIRECTORY) + if(CMAKE_RUNTIME_OUTPUT_DIRECTORY) + set(openPMD_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) + else() + set(openPMD_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin") + endif() endif() -if(NOT CMAKE_LIBRARY_OUTPUT_DIRECTORY) - set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib") +if(NOT openPMD_PDB_OUTPUT_DIRECTORY) + if(CMAKE_PDB_OUTPUT_DIRECTORY) + set(openPMD_PDB_OUTPUT_DIRECTORY ${CMAKE_PDB_OUTPUT_DIRECTORY}) + else() + set(openPMD_PDB_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY}) + endif() endif() -if(NOT CMAKE_RUNTIME_OUTPUT_DIRECTORY) - set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/bin") +if(NOT openPMD_COMPILE_PDB_OUTPUT_DIRECTORY) + if(CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY) + set(openPMD_COMPILE_PDB_OUTPUT_DIRECTORY ${CMAKE_COMPILE_PDB_OUTPUT_DIRECTORY}) + else() + set(openPMD_COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY}) + endif() endif() + # install directories -if(CMAKE_SOURCE_DIR STREQUAL PROJECT_SOURCE_DIR) +if(NOT CMAKE_INSTALL_LIBDIR AND NOT WIN32) include(GNUInstallDirs) - set(CMAKE_INSTALL_CMAKEDIR "${CMAKE_INSTALL_LIBDIR}/cmake/openPMD") - if(WIN32) - set(CMAKE_INSTALL_LIBDIR Lib) - set(CMAKE_INSTALL_CMAKEDIR "cmake") +endif() + +if(NOT openPMD_INSTALL_PREFIX) + if(CMAKE_INSTALL_PREFIX) + set(openPMD_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}") + else() + message(FATAL_ERROR "openPMD_INSTALL_PREFIX / CMAKE_INSTALL_PREFIX not set.") + endif() +endif() +if(NOT openPMD_INSTALL_BINDIR) + if(CMAKE_INSTALL_BINDIR) + set(openPMD_INSTALL_BINDIR "${CMAKE_INSTALL_BINDIR}") + else() + set(openPMD_INSTALL_BINDIR bin) + endif() +endif() +if(NOT openPMD_INSTALL_INCLUDEDIR) + if(CMAKE_INSTALL_INCLUDEDIR) + set(openPMD_INSTALL_INCLUDEDIR "${CMAKE_INSTALL_INCLUDEDIR}") + else() + set(openPMD_INSTALL_INCLUDEDIR include) + endif() +endif() +if(NOT openPMD_INSTALL_LIBDIR) + if(CMAKE_INSTALL_LIBDIR) + set(openPMD_INSTALL_LIBDIR "${CMAKE_INSTALL_LIBDIR}") + else() + if(WIN32) + set(openPMD_INSTALL_LIBDIR Lib) + else() + set(openPMD_INSTALL_LIBDIR lib) + endif() + endif() +endif() +if(NOT openPMD_INSTALL_CMAKEDIR) + if(CMAKE_INSTALL_CMAKEDIR) + set(openPMD_INSTALL_CMAKEDIR "${CMAKE_INSTALL_CMAKEDIR}/openPMD") + else() + if(WIN32) + set(openPMD_INSTALL_CMAKEDIR "cmake") + else() + set(openPMD_INSTALL_CMAKEDIR "${CMAKE_INSTALL_LIBDIR}/cmake/openPMD") + endif() endif() endif() @@ -101,18 +167,12 @@ include(CMakeDependentOption) # change CMake default (static libs): # build shared libs if supported by target platform get_property(SHARED_LIBS_SUPPORTED GLOBAL PROPERTY TARGET_SUPPORTS_SHARED_LIBS) -cmake_dependent_option(BUILD_SHARED_LIBS - "Build shared libraries (so/dylib/dll)." - ${SHARED_LIBS_SUPPORTED} - "SHARED_LIBS_SUPPORTED" OFF -) -mark_as_advanced(BUILD_SHARED_LIBS) if(DEFINED BUILD_SHARED_LIBS) set(openPMD_BUILD_SHARED_LIBS_DEFAULT ${BUILD_SHARED_LIBS}) else() set(openPMD_BUILD_SHARED_LIBS_DEFAULT ${SHARED_LIBS_SUPPORTED}) endif() -option(openPMD_BUILD_SHARED_LIBS "Build the openPMD tests" +option(openPMD_BUILD_SHARED_LIBS "Build shared libraries (so/dylib/dll)." ${openPMD_BUILD_SHARED_LIBS_DEFAULT}) if(openPMD_BUILD_SHARED_LIBS AND NOT SHARED_LIBS_SUPPORTED) message(FATAL_ERROR "openPMD_BUILD_SHARED_LIBS requested but not supported by platform") @@ -506,6 +566,12 @@ add_library(openPMD::openPMD ALIAS openPMD) # properties openpmd_cxx_required(openPMD) set_target_properties(openPMD PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} + LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} + RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} + PDB_OUTPUT_DIRECTORY ${openPMD_PDB_OUTPUT_DIRECTORY} + COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY} + POSITION_INDEPENDENT_CODE ON WINDOWS_EXPORT_ALL_SYMBOLS ON ) @@ -580,6 +646,12 @@ if(openPMD_HAVE_ADIOS1) $) set_target_properties(openPMD.ADIOS1.Serial PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} + LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} + RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} + PDB_OUTPUT_DIRECTORY ${openPMD_PDB_OUTPUT_DIRECTORY} + COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY} + POSITION_INDEPENDENT_CODE ON CXX_VISIBILITY_PRESET hidden VISIBILITY_INLINES_HIDDEN ON @@ -605,6 +677,12 @@ if(openPMD_HAVE_ADIOS1) if(openPMD_HAVE_MPI) set_target_properties(openPMD.ADIOS1.Parallel PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} + LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} + RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} + PDB_OUTPUT_DIRECTORY ${openPMD_PDB_OUTPUT_DIRECTORY} + COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY} + POSITION_INDEPENDENT_CODE ON CXX_VISIBILITY_PRESET hidden VISIBILITY_INLINES_HIDDEN 1 @@ -739,29 +817,29 @@ if(openPMD_HAVE_PYTHON) endif() if(WIN32) - set(CMAKE_INSTALL_PYTHONDIR_DEFAULT + set(openPMD_INSTALL_PYTHONDIR_DEFAULT "${CMAKE_INSTALL_LIBDIR}/site-packages") else() - set(CMAKE_INSTALL_PYTHONDIR_DEFAULT + set(openPMD_INSTALL_PYTHONDIR_DEFAULT "${CMAKE_INSTALL_LIBDIR}/python${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}/site-packages" ) endif() # Location for installed python package - set(CMAKE_INSTALL_PYTHONDIR "${CMAKE_INSTALL_PYTHONDIR_DEFAULT}") + set(openPMD_INSTALL_PYTHONDIR "${openPMD_INSTALL_PYTHONDIR_DEFAULT}") # Build directory for python modules - set(CMAKE_PYTHON_OUTPUT_DIRECTORY "${openPMD_BINARY_DIR}/${CMAKE_INSTALL_PYTHONDIR}") + set(openPMD_PYTHON_OUTPUT_DIRECTORY "${openPMD_BINARY_DIR}/${openPMD_INSTALL_PYTHONDIR}") set_target_properties(openPMD.py PROPERTIES ARCHIVE_OUTPUT_NAME openpmd_api_cxx LIBRARY_OUTPUT_NAME openpmd_api_cxx - ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_PYTHON_OUTPUT_DIRECTORY}/openpmd_api - LIBRARY_OUTPUT_DIRECTORY ${CMAKE_PYTHON_OUTPUT_DIRECTORY}/openpmd_api - RUNTIME_OUTPUT_DIRECTORY ${CMAKE_PYTHON_OUTPUT_DIRECTORY}/openpmd_api - PDB_OUTPUT_DIRECTORY ${CMAKE_PYTHON_OUTPUT_DIRECTORY}/openpmd_api - COMPILE_PDB_OUTPUT_DIRECTORY ${CMAKE_PYTHON_OUTPUT_DIRECTORY}/openpmd_api + ARCHIVE_OUTPUT_DIRECTORY ${openPMD_PYTHON_OUTPUT_DIRECTORY}/openpmd_api + LIBRARY_OUTPUT_DIRECTORY ${openPMD_PYTHON_OUTPUT_DIRECTORY}/openpmd_api + RUNTIME_OUTPUT_DIRECTORY ${openPMD_PYTHON_OUTPUT_DIRECTORY}/openpmd_api + PDB_OUTPUT_DIRECTORY ${openPMD_PYTHON_OUTPUT_DIRECTORY}/openpmd_api + COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_PYTHON_OUTPUT_DIRECTORY}/openpmd_api ) function(copy_aux_py) set(AUX_PY_SRC_DIR ${openPMD_SOURCE_DIR}/src/binding/python/openpmd_api/) - set(AUX_PY_DSR_DIR ${CMAKE_PYTHON_OUTPUT_DIRECTORY}/openpmd_api/) + set(AUX_PY_DSR_DIR ${openPMD_PYTHON_OUTPUT_DIRECTORY}/openpmd_api/) foreach(src_name IN LISTS ARGN) configure_file(${AUX_PY_SRC_DIR}/${src_name} ${AUX_PY_DSR_DIR}/${src_name} COPYONLY) endforeach() @@ -842,6 +920,12 @@ if(openPMD_BUILD_TESTING) openpmd_cxx_required(CatchRunner) openpmd_cxx_required(CatchMain) set_target_properties(CatchRunner CatchMain PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} + LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} + RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} + PDB_OUTPUT_DIRECTORY ${openPMD_PDB_OUTPUT_DIRECTORY} + COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY} + POSITION_INDEPENDENT_CODE ON WINDOWS_EXPORT_ALL_SYMBOLS ON ) @@ -857,6 +941,13 @@ if(openPMD_BUILD_TESTING) foreach(testname ${openPMD_TEST_NAMES}) add_executable(${testname}Tests test/${testname}Test.cpp) openpmd_cxx_required(${testname}Tests) + set_target_properties(${testname}Tests PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} + LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} + RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} + PDB_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} + COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} + ) if(openPMD_USE_INVASIVE_TESTS) target_compile_definitions(${testname}Tests PRIVATE openPMD_USE_INVASIVE_TESTS=1) @@ -880,6 +971,14 @@ if(openPMD_BUILD_CLI_TOOLS) foreach(toolname ${openPMD_CLI_TOOL_NAMES}) add_executable(openpmd-${toolname} src/cli/${toolname}.cpp) openpmd_cxx_required(openpmd-${toolname}) + set_target_properties(openpmd-${toolname} PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} + LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} + RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} + PDB_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} + COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} + ) + target_link_libraries(openpmd-${toolname} PRIVATE openPMD) endforeach() endif() @@ -899,6 +998,13 @@ if(openPMD_BUILD_EXAMPLES) else() openpmd_cxx_required(${examplename}) endif() + set_target_properties(${examplename} PROPERTIES + ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} + LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} + RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} + PDB_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} + COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} + ) target_link_libraries(${examplename} PRIVATE openPMD) endforeach() endif() @@ -1039,26 +1145,26 @@ if(openPMD_INSTALL) install(TARGETS ${openPMD_INSTALL_TARGET_NAMES} EXPORT openPMDTargets - LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} - ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} - RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} - INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + LIBRARY DESTINATION ${openPMD_INSTALL_LIBDIR} + ARCHIVE DESTINATION ${openPMD_INSTALL_LIBDIR} + RUNTIME DESTINATION ${openPMD_INSTALL_BINDIR} + INCLUDES DESTINATION ${openPMD_INSTALL_INCLUDEDIR} ) if(openPMD_HAVE_PYTHON) install( DIRECTORY ${openPMD_SOURCE_DIR}/src/binding/python/openpmd_api - DESTINATION ${CMAKE_INSTALL_PYTHONDIR} + DESTINATION ${openPMD_INSTALL_PYTHONDIR} PATTERN "*pyc" EXCLUDE PATTERN "__pycache__" EXCLUDE ) install(TARGETS openPMD.py - DESTINATION ${CMAKE_INSTALL_PYTHONDIR}/openpmd_api + DESTINATION ${openPMD_INSTALL_PYTHONDIR}/openpmd_api ) if(openPMD_BUILD_CLI_TOOLS) foreach(toolname ${openPMD_PYTHON_CLI_TOOL_NAMES}) install( FILES ${openPMD_SOURCE_DIR}/src/cli/${toolname}.py - DESTINATION ${CMAKE_INSTALL_BINDIR} + DESTINATION ${openPMD_INSTALL_BINDIR} RENAME openpmd-${toolname} PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ ) @@ -1066,32 +1172,32 @@ if(openPMD_INSTALL) endif() endif() install(DIRECTORY "${openPMD_SOURCE_DIR}/include/openPMD" - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} + DESTINATION ${openPMD_INSTALL_INCLUDEDIR} FILES_MATCHING PATTERN "*.hpp" PATTERN "*.tpp" ) install( FILES ${openPMD_BINARY_DIR}/include/openPMD/config.hpp - DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}/openPMD + DESTINATION ${openPMD_INSTALL_INCLUDEDIR}/openPMD ) # CMake package file for find_package(openPMD::openPMD) in depending projects install(EXPORT openPMDTargets FILE openPMDTargets.cmake NAMESPACE openPMD:: - DESTINATION ${CMAKE_INSTALL_CMAKEDIR} + DESTINATION ${openPMD_INSTALL_CMAKEDIR} ) install( FILES ${openPMD_BINARY_DIR}/openPMDConfig.cmake ${openPMD_BINARY_DIR}/openPMDConfigVersion.cmake - DESTINATION ${CMAKE_INSTALL_CMAKEDIR} + DESTINATION ${openPMD_INSTALL_CMAKEDIR} ) install( FILES ${openPMD_SOURCE_DIR}/share/openPMD/cmake/FindADIOS.cmake - DESTINATION ${CMAKE_INSTALL_CMAKEDIR}/Modules + DESTINATION ${openPMD_INSTALL_CMAKEDIR}/Modules ) # pkg-config .pc file for depending legacy projects # This is for projects that do not use a build file generator, e.g. @@ -1100,7 +1206,7 @@ if(openPMD_INSTALL) if(openPMD_HAVE_PKGCONFIG) install( FILES ${openPMD_BINARY_DIR}/openPMD.pc - DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig + DESTINATION ${openPMD_INSTALL_LIBDIR}/pkgconfig ) endif() endif() @@ -1166,14 +1272,14 @@ if(openPMD_BUILD_TESTING) if(${testname} MATCHES "^Parallel.*$") if(openPMD_HAVE_MPI) add_test(NAME MPI.${testname} - COMMAND ${MPI_TEST_EXE} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${testname}Tests - WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + COMMAND ${MPI_TEST_EXE} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${testname}Tests + WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) endif() else() add_test(NAME Serial.${testname} - COMMAND ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${testname}Tests - WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + COMMAND ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${testname}Tests + WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) endif() endforeach() @@ -1186,22 +1292,22 @@ if(openPMD_BUILD_TESTING) COMMAND ${Python_EXECUTABLE} ${openPMD_SOURCE_DIR}/test/python/unittest/Test.py -v WORKING_DIRECTORY - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) if(WIN32) string(REGEX REPLACE "/" "\\\\" WIN_BUILD_BASEDIR ${openPMD_BINARY_DIR}) - string(REGEX REPLACE "/" "\\\\" WIN_BUILD_BINDIR ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) + string(REGEX REPLACE "/" "\\\\" WIN_BUILD_BINDIR ${openPMD_RUNTIME_OUTPUT_DIRECTORY}) string(REPLACE ";" "\\;" WIN_PATH "$ENV{PATH}") string(REPLACE ";" "\\;" WIN_PYTHONPATH "$ENV{PYTHONPATH}") set_property(TEST Unittest.py PROPERTY ENVIRONMENT "PATH=${WIN_BUILD_BINDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PATH}\n" - "PYTHONPATH=${WIN_BUILD_BASEDIR}\\${CMAKE_INSTALL_PYTHONDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PYTHONPATH}" + "PYTHONPATH=${WIN_BUILD_BASEDIR}\\${openPMD_INSTALL_PYTHONDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PYTHONPATH}" ) else() set_tests_properties(Unittest.py PROPERTIES ENVIRONMENT - "PYTHONPATH=${openPMD_BINARY_DIR}/${CMAKE_INSTALL_PYTHONDIR}:$ENV{PYTHONPATH}" + "PYTHONPATH=${openPMD_BINARY_DIR}/${openPMD_INSTALL_PYTHONDIR}:$ENV{PYTHONPATH}" ) endif() endif() @@ -1220,14 +1326,14 @@ if(openPMD_BUILD_TESTING) elseif(${examplename} MATCHES "^.*_parallel$") if(openPMD_HAVE_MPI) add_test(NAME MPI.${examplename} - COMMAND ${MPI_TEST_EXE} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${examplename} - WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + COMMAND ${MPI_TEST_EXE} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${examplename} + WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) endif() else() add_test(NAME Serial.${examplename} COMMAND ${examplename} - WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) endif() endforeach() @@ -1236,7 +1342,7 @@ if(openPMD_BUILD_TESTING) if(openPMD_HAVE_ADIOS2) add_test(NAME Asynchronous.10_streaming COMMAND sh -c "$ & sleep 1; $" - WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) + WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY}) endif() endif() @@ -1246,13 +1352,13 @@ if(openPMD_BUILD_TESTING) foreach(toolname ${openPMD_CLI_TOOL_NAMES}) add_test(NAME CLI.help.${toolname} COMMAND openpmd-${toolname} --help - WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) endforeach() if(openPMD_HAVE_HDF5 AND EXAMPLE_DATA_FOUND) add_test(NAME CLI.ls COMMAND openpmd-ls ../samples/git-sample/data%08T.h5 - WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) endif() endif() @@ -1262,12 +1368,12 @@ if(openPMD_BUILD_TESTING) set_property(TEST ${test_name} PROPERTY ENVIRONMENT "PATH=${WIN_BUILD_BINDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PATH}\n" - "PYTHONPATH=${WIN_BUILD_BASEDIR}\\${CMAKE_INSTALL_PYTHONDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PYTHONPATH}" + "PYTHONPATH=${WIN_BUILD_BASEDIR}\\${openPMD_INSTALL_PYTHONDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PYTHONPATH}" ) else() set_tests_properties(${test_name} PROPERTIES ENVIRONMENT - "PYTHONPATH=${openPMD_BINARY_DIR}/${CMAKE_INSTALL_PYTHONDIR}:$ENV{PYTHONPATH}" + "PYTHONPATH=${openPMD_BINARY_DIR}/${openPMD_INSTALL_PYTHONDIR}:$ENV{PYTHONPATH}" ) endif() endfunction() @@ -1279,7 +1385,7 @@ if(openPMD_BUILD_TESTING) foreach(pymodulename ${openPMD_PYTHON_CLI_MODULE_NAMES}) add_test(NAME CLI.py.help.${pymodulename} COMMAND ${Python_EXECUTABLE} -m openpmd_api.${pymodulename} --help - WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) test_set_pythonpath(CLI.py.help.${pymodulename}) endforeach() @@ -1291,13 +1397,13 @@ if(openPMD_BUILD_TESTING) foreach(toolname ${openPMD_PYTHON_CLI_TOOL_NAMES}) configure_file( ${openPMD_SOURCE_DIR}/src/cli/${toolname}.py - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/openpmd-${toolname} + ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/openpmd-${toolname} COPYONLY ) add_test(NAME CLI.help.${toolname}.py COMMAND ${Python_EXECUTABLE} - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/openpmd-${toolname} --help - WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/openpmd-${toolname} --help + WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) test_set_pythonpath(CLI.help.${toolname}.py) endforeach() @@ -1317,41 +1423,41 @@ if(openPMD_BUILD_TESTING) add_test(NAME CLI.pipe.py COMMAND sh -c "${MPI_TEST_EXE} ${Python_EXECUTABLE} \ - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/openpmd-pipe \ + ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/openpmd-pipe \ --infile ../samples/git-sample/data%T.h5 \ --outfile ../samples/git-sample/data%T.bp && \ \ ${MPI_TEST_EXE} ${Python_EXECUTABLE} \ - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/openpmd-pipe \ + ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/openpmd-pipe \ --infile ../samples/git-sample/thetaMode/data%T.h5 \ --outfile ../samples/git-sample/thetaMode/data.bp && \ \ ${Python_EXECUTABLE} \ - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/openpmd-pipe \ + ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/openpmd-pipe \ --infile ../samples/git-sample/thetaMode/data.bp \ --outfile ../samples/git-sample/thetaMode/data%T.json \ " - WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) else() add_test(NAME CLI.pipe.py COMMAND sh -c "${Python_EXECUTABLE} \ - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/openpmd-pipe \ + ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/openpmd-pipe \ --infile ../samples/git-sample/data%T.h5 \ --outfile ../samples/git-sample/data%T.bp && \ \ ${Python_EXECUTABLE} \ - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/openpmd-pipe \ + ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/openpmd-pipe \ --infile ../samples/git-sample/thetaMode/data%T.h5 \ --outfile ../samples/git-sample/thetaMode/data%T.bp && \ \ ${Python_EXECUTABLE} \ - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/openpmd-pipe \ + ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/openpmd-pipe \ --infile ../samples/git-sample/thetaMode/data%T.bp \ --outfile ../samples/git-sample/thetaMode/data%T.json \ " - WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) endif() test_set_pythonpath(CLI.pipe.py) @@ -1361,18 +1467,18 @@ if(openPMD_BUILD_TESTING) function(configure_python_test testname) if(WIN32) string(REGEX REPLACE "/" "\\\\" WIN_BUILD_BASEDIR ${openPMD_BINARY_DIR}) - string(REGEX REPLACE "/" "\\\\" WIN_BUILD_BINDIR ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) + string(REGEX REPLACE "/" "\\\\" WIN_BUILD_BINDIR ${openPMD_RUNTIME_OUTPUT_DIRECTORY}) string(REPLACE ";" "\\;" WIN_PATH "$ENV{PATH}") string(REPLACE ";" "\\;" WIN_PYTHONPATH "$ENV{PYTHONPATH}") set_property(TEST ${testname} PROPERTY ENVIRONMENT "PATH=${WIN_BUILD_BINDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PATH}\n" - "PYTHONPATH=${WIN_BUILD_BASEDIR}\\${CMAKE_INSTALL_PYTHONDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PYTHONPATH}" + "PYTHONPATH=${WIN_BUILD_BASEDIR}\\${openPMD_INSTALL_PYTHONDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PYTHONPATH}" ) else() set_tests_properties(${testname} PROPERTIES ENVIRONMENT - "PYTHONPATH=${openPMD_BINARY_DIR}/${CMAKE_INSTALL_PYTHONDIR}:$ENV{PYTHONPATH}" + "PYTHONPATH=${openPMD_BINARY_DIR}/${openPMD_INSTALL_PYTHONDIR}:$ENV{PYTHONPATH}" ) endif() endfunction() @@ -1385,7 +1491,7 @@ if(openPMD_BUILD_TESTING) foreach(examplename ${openPMD_PYTHON_EXAMPLE_NAMES}) configure_file( ${openPMD_SOURCE_DIR}/examples/${examplename}.py - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${examplename}.py + ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${examplename}.py COPYONLY ) if(openPMD_BUILD_TESTING) @@ -1397,8 +1503,8 @@ if(openPMD_BUILD_TESTING) # see https://mpi4py.readthedocs.io/en/stable/mpi4py.run.html add_test(NAME Example.py.${examplename} COMMAND ${MPI_TEST_EXE} ${Python_EXECUTABLE} -m mpi4py - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${examplename}.py - WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${examplename}.py + WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) else() continue() @@ -1406,9 +1512,9 @@ if(openPMD_BUILD_TESTING) else() add_test(NAME Example.py.${examplename} COMMAND ${Python_EXECUTABLE} - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${examplename}.py + ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${examplename}.py WORKING_DIRECTORY - ${CMAKE_RUNTIME_OUTPUT_DIRECTORY} + ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) endif() configure_python_test(Example.py.${examplename}) @@ -1416,8 +1522,8 @@ if(openPMD_BUILD_TESTING) endforeach() if(openPMD_HAVE_ADIOS2 AND openPMD_BUILD_TESTING AND NOT WIN32) add_test(NAME Asynchronous.10_streaming.py - COMMAND sh -c "${Python_EXECUTABLE} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/10_streaming_write.py & sleep 1; ${Python_EXECUTABLE} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/10_streaming_read.py" - WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}) + COMMAND sh -c "${Python_EXECUTABLE} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/10_streaming_write.py & sleep 1; ${Python_EXECUTABLE} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/10_streaming_read.py" + WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY}) configure_python_test(Asynchronous.10_streaming.py) endif() endif() diff --git a/cmake/openPMDFunctions.cmake b/cmake/openPMDFunctions.cmake index 7a6db52bc4..82453df88e 100644 --- a/cmake/openPMDFunctions.cmake +++ b/cmake/openPMDFunctions.cmake @@ -12,13 +12,13 @@ function(openpmd_print_summary) message("") if(openPMD_INSTALL) message(" Install with RPATHs: ${openPMD_INSTALL_RPATH}") - message(" Installation prefix: ${CMAKE_INSTALL_PREFIX}") - message(" bin: ${CMAKE_INSTALL_BINDIR}") - message(" lib: ${CMAKE_INSTALL_LIBDIR}") - message(" include: ${CMAKE_INSTALL_INCLUDEDIR}") - message(" cmake: ${CMAKE_INSTALL_CMAKEDIR}") + message(" Installation prefix: ${openPMD_INSTALL_PREFIX}") + message(" bin: ${openPMD_INSTALL_BINDIR}") + message(" lib: ${openPMD_INSTALL_LIBDIR}") + message(" include: ${openPMD_INSTALL_INCLUDEDIR}") + message(" cmake: ${openPMD_INSTALL_CMAKEDIR}") if(openPMD_HAVE_PYTHON) - message(" python: ${CMAKE_INSTALL_PYTHONDIR}") + message(" python: ${openPMD_INSTALL_PYTHONDIR}") endif() else() message(" Installation: OFF") From fa190783b0836075611712e6bcc6f5011d38a65c Mon Sep 17 00:00:00 2001 From: "lgtm-com[bot]" <43144390+lgtm-com[bot]@users.noreply.github.com> Date: Fri, 9 Dec 2022 20:17:28 +0000 Subject: [PATCH 10/82] Add CodeQL workflow for GitHub code scanning (#1345) * Add CodeQL workflow for GitHub code scanning * Improve, Cleanup Co-authored-by: LGTM Migrator Co-authored-by: Axel Huebl --- .github/workflows/codeql.yml | 65 ++++++++++++++++++++++++++++++++++++ .lgtm.yml | 23 ------------- 2 files changed, 65 insertions(+), 23 deletions(-) create mode 100644 .github/workflows/codeql.yml delete mode 100644 .lgtm.yml diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 0000000000..847a9733ad --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,65 @@ +name: "CodeQL" + +on: + push: + branches: [ "dev" ] + pull_request: + branches: [ "dev" ] + schedule: + - cron: "14 6 * * 6" + +concurrency: + group: ${{ github.ref }}-${{ github.head_ref }}-codeql + cancel-in-progress: true + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + strategy: + fail-fast: false + matrix: + language: [ python, cpp ] + + steps: + - name: Checkout + uses: actions/checkout@v3 + + - name: Install Packages + run: | + sudo apt-get update + sudo apt-get install --yes cmake openmpi-bin libopenmpi-dev libhdf5-openmpi-dev libadios-openmpi-dev + + python -m pip install --upgrade pip + python -m pip install --upgrade wheel + python -m pip install --upgrade cmake + export CMAKE="$HOME/.local/bin/cmake" && echo "CMAKE=$CMAKE" >> $GITHUB_ENV + python -m pip install --upgrade numpy + python -m pip install --upgrade mpi4py + python -m pip install --upgrade pytest + + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: ${{ matrix.language }} + queries: +security-and-quality + + - name: Build (Py) + uses: github/codeql-action/autobuild@v2 + if: ${{ matrix.language == 'python' }} + + - name: Build (C++) + if: ${{ matrix.language == 'cpp' }} + run: | + $CMAKE -S . -B build + $CMAKE --build build -j 2 + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 + with: + category: "/language:${{ matrix.language }}" diff --git a/.lgtm.yml b/.lgtm.yml deleted file mode 100644 index 8f1ac78e25..0000000000 --- a/.lgtm.yml +++ /dev/null @@ -1,23 +0,0 @@ -# docs: -# https://help.semmle.com/lgtm-enterprise/user/help/lgtm.yml-configuration-file.html#example-of-complete-lgtmyml-file -extraction: - cpp: - prepare: - packages: - - cmake - - openmpi-bin - - libopenmpi-dev - - libhdf5-openmpi-dev - - libadios-openmpi-dev - after_prepare: # make sure lgtm.com doesn't call setup.py (for which they use a python2 atm) - - rm -f setup.py - - python -m pip install --upgrade pip - - python -m pip install --upgrade wheel - - python -m pip install --upgrade cmake - - export CMAKE="$HOME/.local/bin/cmake" - configure: - command: - - $CMAKE -S . -B build - index: - build_command: - - $CMAKE --build build -j 2 From 651b236da1411e54847c4d5fb56322f4c019b2b8 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 9 Dec 2022 12:59:42 -0800 Subject: [PATCH 11/82] CodeQL: Split Config & Build --- .github/workflows/codeql.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 847a9733ad..ec84979959 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -43,6 +43,10 @@ jobs: python -m pip install --upgrade mpi4py python -m pip install --upgrade pytest + - name: Configure (C++) + if: ${{ matrix.language == 'cpp' }} + run: $CMAKE -S . -B build + - name: Initialize CodeQL uses: github/codeql-action/init@v2 with: @@ -55,9 +59,7 @@ jobs: - name: Build (C++) if: ${{ matrix.language == 'cpp' }} - run: | - $CMAKE -S . -B build - $CMAKE --build build -j 2 + run: $CMAKE --build build -j 2 - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v2 From e73d3d88d743b756b2b9a7baff7432c73e08b55b Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 9 Dec 2022 17:23:43 -0800 Subject: [PATCH 12/82] =?UTF-8?q?CI:=20CodeQL=20w/=20=F0=9F=94=8D?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .github/workflows/codeql.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index ec84979959..1925663362 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -1,4 +1,4 @@ -name: "CodeQL" +name: 🔍 CodeQL on: push: From ea3d7f8123498ef6ebbbfb815ab357ee42e0990c Mon Sep 17 00:00:00 2001 From: Bernhard Manfred Gruber Date: Tue, 13 Dec 2022 03:27:19 +0100 Subject: [PATCH 13/82] Prefer parallel HDF5 in find_package in downstream use (#1340) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Prefer parallel HDF5 in find_package in downstream use This aligns different behavior in finding HDF5 during cmake configure and use of openPMD in a downstream project. In both cases, the parallel HDF5 version is preferred now. Fixes: #1339 * Prefer parallel HDF5 only when using MPI * Update Docs Co-authored-by: Franz Pöschel Co-authored-by: Axel Huebl --- .rodare.json | 6 ++++++ CHANGELOG.rst | 4 ++++ README.md | 2 ++ openPMDConfig.cmake.in | 1 + 4 files changed, 13 insertions(+) diff --git a/.rodare.json b/.rodare.json index f2324fc670..5ceaf51f24 100644 --- a/.rodare.json +++ b/.rodare.json @@ -114,6 +114,12 @@ "name": "Bez, Jean Luca", "orcid": "0000-0002-3915-1135", "type": "Other" + }, + { + "affiliation": "CERN", + "name": "Gruber, Bernhard Manfred", + "orcid": "0000-0001-7848-1690", + "type": "Other" } ], "title": "C++ & Python API for Scientific I/O with openPMD", diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 70cff2439f..86a08b082c 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -24,6 +24,10 @@ Features Bug Fixes """"""""" +- CMake: + + - MPI: prefer HDF5 in Config package, too #1340 + Other """"" - Catch2: updated to 2.13.10 #1299 #... diff --git a/README.md b/README.md index 520486d6a0..df45936b70 100644 --- a/README.md +++ b/README.md @@ -420,6 +420,8 @@ Further thanks go to improvements and contributions from: C++ API bug fixes * [Jean Luca Bez (LBNL)](https://github.com/jeanbez): HDF5 performance tuning +* [Bernhard Manfred Gruber (CERN)](https://github.com/bernhardmgruber): + CMake fix for parallel HDF5 ### Grants diff --git a/openPMDConfig.cmake.in b/openPMDConfig.cmake.in index d22a595fae..eea485f2b8 100644 --- a/openPMDConfig.cmake.in +++ b/openPMDConfig.cmake.in @@ -24,6 +24,7 @@ set(openPMD_MPI_FOUND ${openPMD_HAVE_MPI}) set(openPMD_HAVE_HDF5 @openPMD_HAVE_HDF5@) if(openPMD_HAVE_HDF5) + set(HDF5_PREFER_PARALLEL ${openPMD_HAVE_MPI}) find_dependency(HDF5) endif() set(openPMD_HDF5_FOUND ${openPMD_HAVE_HDF5}) From 5fce7364640d8a2c94a58cbd39c45109096baf1a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 12 Dec 2022 18:28:06 -0800 Subject: [PATCH 14/82] [pre-commit.ci] pre-commit autoupdate (#1347) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pycqa/isort: 5.10.1 → 5.11.1](https://github.com/pycqa/isort/compare/5.10.1...5.11.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index a11d29e3b0..f1b8f25eca 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -79,7 +79,7 @@ repos: # Sorts Python imports according to PEP8 # https://www.python.org/dev/peps/pep-0008/#imports - repo: https://github.com/pycqa/isort - rev: 5.10.1 + rev: 5.11.1 hooks: - id: isort name: isort (python) From c373b970f6877024e1fc86671120cb1a79282a57 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Mon, 19 Dec 2022 12:40:13 +0100 Subject: [PATCH 15/82] Add Series::close() call (#1324) * Add Series::close() * Python bindings * Fix invasive tests * Fix shadowing in SerialIOTests * Reset shared ptrs when using Series::close() * Add doxygen documentation * Adapt examples to use Series::close() * Python tests Use mostly Series.close(), but keep an instance of `del series` to have this still tested. * Test and document status of operator bool after closing the series --- examples/2_read_serial.cpp | 2 + examples/2_read_serial.py | 11 +- examples/2a_read_thetaMode_serial.cpp | 2 + examples/2a_read_thetaMode_serial.py | 11 +- examples/3_write_serial.cpp | 2 + examples/3_write_serial.py | 11 +- examples/3a_write_thetaMode_serial.cpp | 2 + examples/3a_write_thetaMode_serial.py | 11 +- examples/3b_write_resizable_particles.cpp | 2 + examples/3b_write_resizable_particles.py | 11 +- examples/4_read_parallel.cpp | 68 ++++++------ examples/4_read_parallel.py | 11 +- examples/5_write_parallel.cpp | 100 +++++++++--------- examples/5_write_parallel.py | 11 +- examples/6_dump_filebased_series.cpp | 2 + examples/7_extended_write_serial.cpp | 4 +- examples/7_extended_write_serial.py | 11 +- examples/9_particle_write_serial.py | 2 +- .../openPMD/IO/AbstractIOHandlerHelper.hpp | 6 +- include/openPMD/Series.hpp | 16 ++- include/openPMD/backend/Attributable.hpp | 10 +- include/openPMD/backend/Writable.hpp | 13 ++- src/IO/AbstractIOHandlerHelper.cpp | 10 +- src/Series.cpp | 71 ++++++++----- src/binding/python/Series.cpp | 9 ++ test/AuxiliaryTest.cpp | 12 ++- test/SerialIOTest.cpp | 69 ++++++------ test/python/unittest/API/APITest.py | 51 ++++++--- 28 files changed, 321 insertions(+), 220 deletions(-) mode change 100755 => 100644 examples/2_read_serial.py mode change 100755 => 100644 examples/2a_read_thetaMode_serial.py mode change 100755 => 100644 examples/3_write_serial.py mode change 100755 => 100644 examples/3a_write_thetaMode_serial.py mode change 100755 => 100644 examples/3b_write_resizable_particles.py mode change 100755 => 100644 examples/4_read_parallel.py mode change 100755 => 100644 examples/5_write_parallel.py mode change 100755 => 100644 examples/9_particle_write_serial.py diff --git a/examples/2_read_serial.cpp b/examples/2_read_serial.cpp index 5cbea17ed2..e944ef12bf 100644 --- a/examples/2_read_serial.cpp +++ b/examples/2_read_serial.cpp @@ -100,6 +100,8 @@ int main() /* The files in 'series' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. */ return 0; } diff --git a/examples/2_read_serial.py b/examples/2_read_serial.py old mode 100755 new mode 100644 index 9b68db4809..d24841775a --- a/examples/2_read_serial.py +++ b/examples/2_read_serial.py @@ -65,8 +65,9 @@ print("Full E/x is of shape {0} and starts with:".format(all_data.shape)) print(all_data[0, 0, :5]) - # The files in 'series' are still open until the object is destroyed, on - # which it cleanly flushes and closes all open file handles. - # One can delete the object explicitly (or let it run out of scope) to - # trigger this. - del series + # The files in 'series' are still open until the series is closed, at which + # time it cleanly flushes and closes all open file handles. + # One can close the object explicitly to trigger this. + # Alternatively, this will automatically happen once the garbage collector + # claims (every copy of) the series object. + series.close() diff --git a/examples/2a_read_thetaMode_serial.cpp b/examples/2a_read_thetaMode_serial.cpp index 473a6e7d0f..8085e242b2 100644 --- a/examples/2a_read_thetaMode_serial.cpp +++ b/examples/2a_read_thetaMode_serial.cpp @@ -72,6 +72,8 @@ int main() /* The files in 'series' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. */ return 0; } diff --git a/examples/2a_read_thetaMode_serial.py b/examples/2a_read_thetaMode_serial.py old mode 100755 new mode 100644 index b2ec25bc20..907f6634aa --- a/examples/2a_read_thetaMode_serial.py +++ b/examples/2a_read_thetaMode_serial.py @@ -51,8 +51,9 @@ # E_z_yz = toCartesianSliceYZ(E_z_modes)[:, :] # (y, z) # series.flush() - # The files in 'series' are still open until the object is destroyed, on - # which it cleanly flushes and closes all open file handles. - # One can delete the object explicitly (or let it run out of scope) to - # trigger this. - del series + # The files in 'series' are still open until the series is closed, at which + # time it cleanly flushes and closes all open file handles. + # One can close the object explicitly to trigger this. + # Alternatively, this will automatically happen once the garbage collector + # claims (every copy of) the series object. + series.close() diff --git a/examples/3_write_serial.cpp b/examples/3_write_serial.cpp index 54f9c384b9..155425eaaa 100644 --- a/examples/3_write_serial.cpp +++ b/examples/3_write_serial.cpp @@ -73,6 +73,8 @@ int main(int argc, char *argv[]) /* The files in 'series' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. */ return 0; } diff --git a/examples/3_write_serial.py b/examples/3_write_serial.py old mode 100755 new mode 100644 index 5aabd2998a..320acd027e --- a/examples/3_write_serial.py +++ b/examples/3_write_serial.py @@ -50,8 +50,9 @@ series.flush() print("Dataset content has been fully written") - # The files in 'series' are still open until the object is destroyed, on - # which it cleanly flushes and closes all open file handles. - # One can delete the object explicitly (or let it run out of scope) to - # trigger this. - del series + # The files in 'series' are still open until the series is closed, at which + # time it cleanly flushes and closes all open file handles. + # One can close the object explicitly to trigger this. + # Alternatively, this will automatically happen once the garbage collector + # claims (every copy of) the series object. + series.close() diff --git a/examples/3a_write_thetaMode_serial.cpp b/examples/3a_write_thetaMode_serial.cpp index df7134a9f7..56fd703799 100644 --- a/examples/3a_write_thetaMode_serial.cpp +++ b/examples/3a_write_thetaMode_serial.cpp @@ -89,6 +89,8 @@ int main() /* The files in 'series' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. */ return 0; } diff --git a/examples/3a_write_thetaMode_serial.py b/examples/3a_write_thetaMode_serial.py old mode 100755 new mode 100644 index 8570383c42..e5c4419505 --- a/examples/3a_write_thetaMode_serial.py +++ b/examples/3a_write_thetaMode_serial.py @@ -64,8 +64,9 @@ series.flush() - # The files in 'series' are still open until the object is destroyed, on - # which it cleanly flushes and closes all open file handles. - # One can delete the object explicitly (or let it run out of scope) to - # trigger this. - del series + # The files in 'series' are still open until the series is closed, at which + # time it cleanly flushes and closes all open file handles. + # One can close the object explicitly to trigger this. + # Alternatively, this will automatically happen once the garbage collector + # claims (every copy of) the series object. + series.close() diff --git a/examples/3b_write_resizable_particles.cpp b/examples/3b_write_resizable_particles.cpp index 78c752313e..7cd424ee2a 100644 --- a/examples/3b_write_resizable_particles.cpp +++ b/examples/3b_write_resizable_particles.cpp @@ -86,6 +86,8 @@ int main() /* The files in 'series' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. */ return 0; } diff --git a/examples/3b_write_resizable_particles.py b/examples/3b_write_resizable_particles.py old mode 100755 new mode 100644 index eb604deb71..227ce06977 --- a/examples/3b_write_resizable_particles.py +++ b/examples/3b_write_resizable_particles.py @@ -65,8 +65,9 @@ # rinse and repeat as needed :) - # The files in 'series' are still open until the object is destroyed, on - # which it cleanly flushes and closes all open file handles. - # One can delete the object explicitly (or let it run out of scope) to - # trigger this. - del series + # The files in 'series' are still open until the series is closed, at which + # time it cleanly flushes and closes all open file handles. + # One can close the object explicitly to trigger this. + # Alternatively, this will automatically happen once the garbage collector + # claims (every copy of) the series object. + series.close() diff --git a/examples/4_read_parallel.cpp b/examples/4_read_parallel.cpp index 530b00fe5d..75f19f4be1 100644 --- a/examples/4_read_parallel.cpp +++ b/examples/4_read_parallel.cpp @@ -39,56 +39,48 @@ int main(int argc, char *argv[]) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - /* note: this scope is intentional to destruct the openPMD::Series object - * prior to MPI_Finalize(); - */ - { - Series series = Series( - "../samples/git-sample/data%T.h5", - Access::READ_ONLY, - MPI_COMM_WORLD); - if (0 == mpi_rank) - cout << "Read a series in parallel with " << mpi_size - << " MPI ranks\n"; + Series series = Series( + "../samples/git-sample/data%T.h5", Access::READ_ONLY, MPI_COMM_WORLD); + if (0 == mpi_rank) + cout << "Read a series in parallel with " << mpi_size << " MPI ranks\n"; - MeshRecordComponent E_x = series.iterations[100].meshes["E"]["x"]; + MeshRecordComponent E_x = series.iterations[100].meshes["E"]["x"]; - Offset chunk_offset = { - static_cast(mpi_rank) + 1, 1, 1}; - Extent chunk_extent = {2, 2, 1}; + Offset chunk_offset = {static_cast(mpi_rank) + 1, 1, 1}; + Extent chunk_extent = {2, 2, 1}; - auto chunk_data = E_x.loadChunk(chunk_offset, chunk_extent); + auto chunk_data = E_x.loadChunk(chunk_offset, chunk_extent); - if (0 == mpi_rank) - cout << "Queued the loading of a single chunk per MPI rank from " - "disk, " - "ready to execute\n"; - series.flush(); + if (0 == mpi_rank) + cout << "Queued the loading of a single chunk per MPI rank from " + "disk, " + "ready to execute\n"; + series.flush(); - if (0 == mpi_rank) - cout << "Chunks have been read from disk\n"; + if (0 == mpi_rank) + cout << "Chunks have been read from disk\n"; - for (int i = 0; i < mpi_size; ++i) + for (int i = 0; i < mpi_size; ++i) + { + if (i == mpi_rank) { - if (i == mpi_rank) + cout << "Rank " << mpi_rank << " - Read chunk contains:\n"; + for (size_t row = 0; row < chunk_extent[0]; ++row) { - cout << "Rank " << mpi_rank << " - Read chunk contains:\n"; - for (size_t row = 0; row < chunk_extent[0]; ++row) - { - for (size_t col = 0; col < chunk_extent[1]; ++col) - cout << "\t" << '(' << row + chunk_offset[0] << '|' - << col + chunk_offset[1] << '|' << 1 << ")\t" - << chunk_data.get()[row * chunk_extent[1] + col]; - cout << std::endl; - } + for (size_t col = 0; col < chunk_extent[1]; ++col) + cout << "\t" << '(' << row + chunk_offset[0] << '|' + << col + chunk_offset[1] << '|' << 1 << ")\t" + << chunk_data.get()[row * chunk_extent[1] + col]; + cout << std::endl; } - - // this barrier is not necessary but structures the example output - MPI_Barrier(MPI_COMM_WORLD); } + + // this barrier is not necessary but structures the example output + MPI_Barrier(MPI_COMM_WORLD); } + series.close(); - // openPMD::Series MUST be destructed at this point + // openPMD::Series MUST be destructed or closed at this point MPI_Finalize(); return 0; diff --git a/examples/4_read_parallel.py b/examples/4_read_parallel.py old mode 100755 new mode 100644 index 35efc9a4f9..f30d6ffa2d --- a/examples/4_read_parallel.py +++ b/examples/4_read_parallel.py @@ -56,10 +56,11 @@ # this barrier is not necessary but structures the example output comm.Barrier() - # The files in 'series' are still open until the object is destroyed, on - # which it cleanly flushes and closes all open file handles. - # One can delete the object explicitly (or let it run out of scope) to - # trigger this. + # The files in 'series' are still open until the series is closed, at which + # time it cleanly flushes and closes all open file handles. + # One can close the object explicitly to trigger this. + # Alternatively, this will automatically happen once the garbage collector + # claims (every copy of) the series object. # In any case, this must happen before MPI_Finalize() is called # (usually in the mpi4py exit hook). - del series + series.close() diff --git a/examples/5_write_parallel.cpp b/examples/5_write_parallel.cpp index b8875504a5..666de4a3cd 100644 --- a/examples/5_write_parallel.cpp +++ b/examples/5_write_parallel.cpp @@ -39,58 +39,54 @@ int main(int argc, char *argv[]) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - /* note: this scope is intentional to destruct the openPMD::Series object - * prior to MPI_Finalize(); - */ - { - // global data set to write: [MPI_Size * 10, 300] - // each rank writes a 10x300 slice with its MPI rank as values - auto const value = float(mpi_size); - std::vector local_data(10 * 300, value); - if (0 == mpi_rank) - cout << "Set up a 2D array with 10x300 elements per MPI rank (" - << mpi_size << "x) that will be written to disk\n"; - - // open file for writing - Series series = Series( - "../samples/5_parallel_write.h5", Access::CREATE, MPI_COMM_WORLD); - if (0 == mpi_rank) - cout << "Created an empty series in parallel with " << mpi_size - << " MPI ranks\n"; - - MeshRecordComponent mymesh = - series.iterations[1].meshes["mymesh"][MeshRecordComponent::SCALAR]; - - // example 1D domain decomposition in first index - Datatype datatype = determineDatatype(); - Extent global_extent = {10ul * mpi_size, 300}; - Dataset dataset = Dataset(datatype, global_extent); - - if (0 == mpi_rank) - cout << "Prepared a Dataset of size " << dataset.extent[0] << "x" - << dataset.extent[1] << " and Datatype " << dataset.dtype - << '\n'; - - mymesh.resetDataset(dataset); - if (0 == mpi_rank) - cout << "Set the global Dataset properties for the scalar field " - "mymesh in iteration 1\n"; - - // example shows a 1D domain decomposition in first index - Offset chunk_offset = {10ul * mpi_rank, 0}; - Extent chunk_extent = {10, 300}; - mymesh.storeChunk(local_data, chunk_offset, chunk_extent); - if (0 == mpi_rank) - cout << "Registered a single chunk per MPI rank containing its " - "contribution, " - "ready to write content to disk\n"; - - series.flush(); - if (0 == mpi_rank) - cout << "Dataset content has been fully written to disk\n"; - } - - // openPMD::Series MUST be destructed at this point + // global data set to write: [MPI_Size * 10, 300] + // each rank writes a 10x300 slice with its MPI rank as values + auto const value = float(mpi_size); + std::vector local_data(10 * 300, value); + if (0 == mpi_rank) + cout << "Set up a 2D array with 10x300 elements per MPI rank (" + << mpi_size << "x) that will be written to disk\n"; + + // open file for writing + Series series = Series( + "../samples/5_parallel_write.h5", Access::CREATE, MPI_COMM_WORLD); + if (0 == mpi_rank) + cout << "Created an empty series in parallel with " << mpi_size + << " MPI ranks\n"; + + MeshRecordComponent mymesh = + series.iterations[1].meshes["mymesh"][MeshRecordComponent::SCALAR]; + + // example 1D domain decomposition in first index + Datatype datatype = determineDatatype(); + Extent global_extent = {10ul * mpi_size, 300}; + Dataset dataset = Dataset(datatype, global_extent); + + if (0 == mpi_rank) + cout << "Prepared a Dataset of size " << dataset.extent[0] << "x" + << dataset.extent[1] << " and Datatype " << dataset.dtype << '\n'; + + mymesh.resetDataset(dataset); + if (0 == mpi_rank) + cout << "Set the global Dataset properties for the scalar field " + "mymesh in iteration 1\n"; + + // example shows a 1D domain decomposition in first index + Offset chunk_offset = {10ul * mpi_rank, 0}; + Extent chunk_extent = {10, 300}; + mymesh.storeChunk(local_data, chunk_offset, chunk_extent); + if (0 == mpi_rank) + cout << "Registered a single chunk per MPI rank containing its " + "contribution, " + "ready to write content to disk\n"; + + series.flush(); + if (0 == mpi_rank) + cout << "Dataset content has been fully written to disk\n"; + + series.close(); + + // openPMD::Series MUST be destructed or closed at this point MPI_Finalize(); return 0; diff --git a/examples/5_write_parallel.py b/examples/5_write_parallel.py old mode 100755 new mode 100644 index 8000f6867f..d925251834 --- a/examples/5_write_parallel.py +++ b/examples/5_write_parallel.py @@ -63,8 +63,9 @@ if 0 == comm.rank: print("Dataset content has been fully written to disk") - # The files in 'series' are still open until the object is destroyed, on - # which it cleanly flushes and closes all open file handles. - # One can delete the object explicitly (or let it run out of scope) to - # trigger this. - del series + # The files in 'series' are still open until the series is closed, at which + # time it cleanly flushes and closes all open file handles. + # One can close the object explicitly to trigger this. + # Alternatively, this will automatically happen once the garbage collector + # claims (every copy of) the series object. + series.close() diff --git a/examples/6_dump_filebased_series.cpp b/examples/6_dump_filebased_series.cpp index 99b2b0939d..1b2964a5d4 100644 --- a/examples/6_dump_filebased_series.cpp +++ b/examples/6_dump_filebased_series.cpp @@ -172,6 +172,8 @@ int main() /* The files in 'o' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. */ return 0; } diff --git a/examples/7_extended_write_serial.cpp b/examples/7_extended_write_serial.cpp index 62d8752a6e..da866eac65 100644 --- a/examples/7_extended_write_serial.cpp +++ b/examples/7_extended_write_serial.cpp @@ -229,7 +229,9 @@ int main() /* The files in 'f' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. * When running out of scope on return, the 'Series' destructor is - * called. + * called. Alternatively, one can call `series.close()` to the same + * effect as calling the destructor, including the release of file + * handles. */ } // namespace ; diff --git a/examples/7_extended_write_serial.py b/examples/7_extended_write_serial.py index 8a8a077058..884311f92d 100755 --- a/examples/7_extended_write_serial.py +++ b/examples/7_extended_write_serial.py @@ -206,8 +206,9 @@ # constant records mesh["y"].make_constant(constant_value) - # The files in 'f' are still open until the object is destroyed, on - # which it cleanly flushes and closes all open file handles. - # One can delete the object explicitly (or let it run out of scope) to - # trigger this. - del f + # The files in 'f' are still open until the series is closed, at which + # time it cleanly flushes and closes all open file handles. + # One can close the object explicitly to trigger this. + # Alternatively, this will automatically happen once the garbage collector + # claims (every copy of) the series object. + f.close() diff --git a/examples/9_particle_write_serial.py b/examples/9_particle_write_serial.py old mode 100755 new mode 100644 index 659ca846d1..5dc842918e --- a/examples/9_particle_write_serial.py +++ b/examples/9_particle_write_serial.py @@ -69,4 +69,4 @@ f.flush() # now the file is closed - del f + f.close() diff --git a/include/openPMD/IO/AbstractIOHandlerHelper.hpp b/include/openPMD/IO/AbstractIOHandlerHelper.hpp index 0d1fe4c8da..95d2d5537e 100644 --- a/include/openPMD/IO/AbstractIOHandlerHelper.hpp +++ b/include/openPMD/IO/AbstractIOHandlerHelper.hpp @@ -45,7 +45,7 @@ namespace openPMD * @return Smart pointer to created IOHandler. */ template -std::shared_ptr createIOHandler( +std::unique_ptr createIOHandler( std::string path, Access access, Format format, @@ -70,7 +70,7 @@ std::shared_ptr createIOHandler( * @return Smart pointer to created IOHandler. */ template -std::shared_ptr createIOHandler( +std::unique_ptr createIOHandler( std::string path, Access access, Format format, @@ -78,7 +78,7 @@ std::shared_ptr createIOHandler( JSON options = JSON()); // version without configuration to use in AuxiliaryTest -std::shared_ptr createIOHandler( +std::unique_ptr createIOHandler( std::string path, Access access, Format format, diff --git a/include/openPMD/Series.hpp b/include/openPMD/Series.hpp index eb79199b38..f3c40582fb 100644 --- a/include/openPMD/Series.hpp +++ b/include/openPMD/Series.hpp @@ -158,6 +158,8 @@ namespace internal * The destructor will only attempt flushing again if this is true. */ bool m_lastFlushSuccessful = false; + + void close(); }; // SeriesData class SeriesInternal; @@ -500,6 +502,18 @@ class Series : public Attributable */ WriteIterations writeIterations(); + /** + * @brief Close the Series and release the data storage/transport backends. + * + * This is an explicit API call for what the Series::~Series() destructor + * would do otherwise. + * All backends are closed after calling this method. + * The Series should be treated as destroyed after calling this method. + * The Series will be evaluated as false in boolean contexts after calling + * this method. + */ + void close(); + // clang-format off OPENPMD_private // clang-format on @@ -552,7 +566,7 @@ OPENPMD_private void parseJsonOptions(TracingJSON &options, ParsedInput &); bool hasExpansionPattern(std::string filenameWithExtension); bool reparseExpansionPattern(std::string filenameWithExtension); - void init(std::shared_ptr, std::unique_ptr); + void init(std::unique_ptr, std::unique_ptr); void initDefaults(IterationEncoding, bool initAll = false); /** * @brief Internal call for flushing a Series. diff --git a/include/openPMD/backend/Attributable.hpp b/include/openPMD/backend/Attributable.hpp index 8d34ee7935..995923151c 100644 --- a/include/openPMD/backend/Attributable.hpp +++ b/include/openPMD/backend/Attributable.hpp @@ -375,11 +375,17 @@ OPENPMD_protected * through m_writable-> */ AbstractIOHandler *IOHandler() { - return m_attri->m_writable.IOHandler.get(); + return const_cast( + static_cast(this)->IOHandler()); } AbstractIOHandler const *IOHandler() const { - return m_attri->m_writable.IOHandler.get(); + auto &opt = m_attri->m_writable.IOHandler; + if (!opt || !opt->has_value()) + { + return nullptr; + } + return &*opt->value(); } Writable *&parent() { diff --git a/include/openPMD/backend/Writable.hpp b/include/openPMD/backend/Writable.hpp index 81d83955f9..b83d80331a 100644 --- a/include/openPMD/backend/Writable.hpp +++ b/include/openPMD/backend/Writable.hpp @@ -48,7 +48,8 @@ class Span; namespace internal { class AttributableData; -} + class SeriesData; +} // namespace internal /** @brief Layer to mirror structure of logical data and persistent data in * file. @@ -63,6 +64,7 @@ namespace internal class Writable final { friend class internal::AttributableData; + friend class internal::SeriesData; friend class Attributable; template friend class BaseRecord; @@ -121,7 +123,14 @@ OPENPMD_private * Writable may share them. */ std::shared_ptr abstractFilePosition = nullptr; - std::shared_ptr IOHandler = nullptr; + /* + * shared_ptr since the IOHandler is shared by multiple Writable instances. + * optional to make it possible to release the IOHandler, without first + * having to destroy every single Writable. + * unique_ptr since AbstractIOHandler is an abstract class. + */ + std::shared_ptr>> + IOHandler = nullptr; internal::AttributableData *attributable = nullptr; Writable *parent = nullptr; bool dirty = true; diff --git a/src/IO/AbstractIOHandlerHelper.cpp b/src/IO/AbstractIOHandlerHelper.cpp index 0a0a32f53f..ff9471c5dd 100644 --- a/src/IO/AbstractIOHandlerHelper.cpp +++ b/src/IO/AbstractIOHandlerHelper.cpp @@ -39,12 +39,12 @@ namespace openPMD namespace { template - std::shared_ptr + std::unique_ptr constructIOHandler(std::string const &backendName, Args &&...args) { if constexpr (enabled) { - return std::make_shared(std::forward(args)...); + return std::make_unique(std::forward(args)...); } else { @@ -59,7 +59,7 @@ namespace #if openPMD_HAVE_MPI template <> -std::shared_ptr createIOHandler( +std::unique_ptr createIOHandler( std::string path, Access access, Format format, @@ -130,7 +130,7 @@ std::shared_ptr createIOHandler( #endif template <> -std::shared_ptr createIOHandler( +std::unique_ptr createIOHandler( std::string path, Access access, Format format, @@ -195,7 +195,7 @@ std::shared_ptr createIOHandler( } } -std::shared_ptr createIOHandler( +std::unique_ptr createIOHandler( std::string path, Access access, Format format, diff --git a/src/Series.cpp b/src/Series.cpp index 6439f67e29..c25f0f173c 100644 --- a/src/Series.cpp +++ b/src/Series.cpp @@ -540,11 +540,13 @@ namespace } // namespace void Series::init( - std::shared_ptr ioHandler, + std::unique_ptr ioHandler, std::unique_ptr input) { auto &series = get(); - writable().IOHandler = ioHandler; + writable().IOHandler = + std::make_shared>>( + std::move(ioHandler)); series.iterations.linkHierarchy(writable()); series.iterations.writable().ownKeyWithinParent = {"iterations"}; @@ -1941,26 +1943,7 @@ namespace internal // we must not throw in a destructor try { - // WriteIterations gets the first shot at flushing - this->m_writeIterations = std::optional(); - /* - * Scenario: A user calls `Series::flush()` but does not check for - * thrown exceptions. The exception will propagate further up, - * usually thereby popping the stack frame that holds the `Series` - * object. `Series::~Series()` will run. This check avoids that the - * `Series` is needlessly flushed a second time. Otherwise, error - * messages can get very confusing. - */ - if (this->m_lastFlushSuccessful) - { - Series impl{{this, [](auto const *) {}}}; - impl.flush(); - impl.flushStep(/* doFlush = */ true); - } - if (m_writeIterations.has_value()) - { - m_writeIterations = std::optional(); - } + close(); } catch (std::exception const &ex) { @@ -1972,6 +1955,39 @@ namespace internal std::cerr << "[~Series] An error occurred." << std::endl; } } + + void SeriesData::close() + { + // WriteIterations gets the first shot at flushing + this->m_writeIterations = std::optional(); + /* + * Scenario: A user calls `Series::flush()` but does not check for + * thrown exceptions. The exception will propagate further up, + * usually thereby popping the stack frame that holds the `Series` + * object. `Series::~Series()` will run. This check avoids that the + * `Series` is needlessly flushed a second time. Otherwise, error + * messages can get very confusing. + */ + if (this->m_lastFlushSuccessful && m_writable.IOHandler && + m_writable.IOHandler->has_value()) + { + Series impl{{this, [](auto const *) {}}}; + impl.flush(); + impl.flushStep(/* doFlush = */ true); + } + if (m_writeIterations.has_value()) + { + m_writeIterations = std::optional(); + } + // Not strictly necessary, but clear the map of iterations + // This releases the openPMD hierarchy + iterations.container().clear(); + // Release the IO Handler + if (m_writable.IOHandler) + { + *m_writable.IOHandler = std::nullopt; + } + } } // namespace internal Series::Series() : Attributable{nullptr}, iterations{} @@ -2004,7 +2020,7 @@ Series::Series( input->filenameExtension, comm, optionsJson); - init(handler, std::move(input)); + init(std::move(handler), std::move(input)); json::warnGlobalUnusedOptions(optionsJson); } #endif @@ -2021,7 +2037,7 @@ Series::Series( parseJsonOptions(optionsJson, *input); auto handler = createIOHandler( input->path, at, input->format, input->filenameExtension, optionsJson); - init(handler, std::move(input)); + init(std::move(handler), std::move(input)); json::warnGlobalUnusedOptions(optionsJson); } @@ -2047,6 +2063,13 @@ WriteIterations Series::writeIterations() return series.m_writeIterations.value(); } +void Series::close() +{ + get().close(); + m_series.reset(); + m_attri.reset(); +} + auto Series::currentSnapshot() const -> std::optional> { diff --git a/src/binding/python/Series.cpp b/src/binding/python/Series.cpp index f403eedaf6..a85462261c 100644 --- a/src/binding/python/Series.cpp +++ b/src/binding/python/Series.cpp @@ -151,6 +151,15 @@ void init_Series(py::module &m) py::arg("mpi_communicator"), py::arg("options") = "{}") #endif + .def("__bool__", &Series::operator bool) + .def("close", &Series::close, R"( +Closes the Series and release the data storage/transport backends. + +All backends are closed after calling this method. +The Series should be treated as destroyed after calling this method. +The Series will be evaluated as false in boolean contexts after calling +this method. + )") .def_property("openPMD", &Series::openPMD, &Series::setOpenPMD) .def_property( diff --git a/test/AuxiliaryTest.cpp b/test/AuxiliaryTest.cpp index 3af7b3741f..491bb37794 100644 --- a/test/AuxiliaryTest.cpp +++ b/test/AuxiliaryTest.cpp @@ -34,7 +34,8 @@ struct TestHelper : public Attributable TestHelper() { writable().IOHandler = - createIOHandler(".", Access::CREATE, Format::JSON, ".json"); + std::make_shared>>( + createIOHandler(".", Access::CREATE, Format::JSON, ".json")); } }; } // namespace openPMD::test @@ -147,7 +148,8 @@ TEST_CASE("container_default_test", "[auxiliary]") #if openPMD_USE_INVASIVE_TESTS Container c = Container(); c.writable().IOHandler = - createIOHandler(".", Access::CREATE, Format::JSON, ".json"); + std::make_shared>>( + createIOHandler(".", Access::CREATE, Format::JSON, ".json")); REQUIRE(c.empty()); REQUIRE(c.erase("nonExistentKey") == false); @@ -185,7 +187,8 @@ TEST_CASE("container_retrieve_test", "[auxiliary]") using structure = openPMD::test::structure; Container c = Container(); c.writable().IOHandler = - createIOHandler(".", Access::CREATE, Format::JSON, ".json"); + std::make_shared>>( + createIOHandler(".", Access::CREATE, Format::JSON, ".json")); structure s; std::string text = @@ -258,7 +261,8 @@ TEST_CASE("container_access_test", "[auxiliary]") using Widget = openPMD::test::Widget; Container c = Container(); c.writable().IOHandler = - createIOHandler(".", Access::CREATE, Format::JSON, ".json"); + std::make_shared>>( + createIOHandler(".", Access::CREATE, Format::JSON, ".json")); c["1firstWidget"] = Widget(0); REQUIRE(c.size() == 1); diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index 2f445c5a50..0ded4eaa17 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -6198,50 +6198,49 @@ void chaotic_stream(std::string filename, bool variableBased) bool weirdOrderWhenReading{}; + Series series(filename, Access::CREATE, jsonConfig); + /* + * When using ADIOS2 steps, iterations are read not by logical order + * (iteration index), but by order of writing. + */ + weirdOrderWhenReading = series.backend() == "ADIOS2" && + series.iterationEncoding() != IterationEncoding::fileBased; + if (variableBased) { - Series series(filename, Access::CREATE, jsonConfig); - /* - * When using ADIOS2 steps, iterations are read not by logical order - * (iteration index), but by order of writing. - */ - weirdOrderWhenReading = series.backend() == "ADIOS2" && - series.iterationEncoding() != IterationEncoding::fileBased; - if (variableBased) - { - if (series.backend() != "ADIOS2") - { - return; - } - series.setIterationEncoding(IterationEncoding::variableBased); - } - for (auto currentIteration : iterations) + if (series.backend() != "ADIOS2") { - auto dataset = - series.writeIterations()[currentIteration] - .meshes["iterationOrder"][MeshRecordComponent::SCALAR]; - dataset.resetDataset({determineDatatype(), {10}}); - dataset.storeChunk(iterations, {0}, {10}); - // series.writeIterations()[ currentIteration ].close(); + return; } + series.setIterationEncoding(IterationEncoding::variableBased); + } + for (auto currentIteration : iterations) + { + auto dataset = + series.writeIterations()[currentIteration] + .meshes["iterationOrder"][MeshRecordComponent::SCALAR]; + dataset.resetDataset({determineDatatype(), {10}}); + dataset.storeChunk(iterations, {0}, {10}); + // series.writeIterations()[ currentIteration ].close(); } + REQUIRE(series.operator bool()); + series.close(); + REQUIRE(!series.operator bool()); + Series read(filename, Access::READ_ONLY); + size_t index = 0; + for (const auto &iteration : read.readIterations()) { - Series series(filename, Access::READ_ONLY); - size_t index = 0; - for (const auto &iteration : series.readIterations()) + if (weirdOrderWhenReading) { - if (weirdOrderWhenReading) - { - REQUIRE(iteration.iterationIndex == iterations[index]); - } - else - { - REQUIRE(iteration.iterationIndex == index); - } - ++index; + REQUIRE(iteration.iterationIndex == iterations[index]); + } + else + { + REQUIRE(iteration.iterationIndex == index); } - REQUIRE(index == iterations.size()); + ++index; } + REQUIRE(index == iterations.size()); } TEST_CASE("chaotic_stream", "[serial]") diff --git a/test/python/unittest/API/APITest.py b/test/python/unittest/API/APITest.py index 93093626a4..94cdd062f7 100644 --- a/test/python/unittest/API/APITest.py +++ b/test/python/unittest/API/APITest.py @@ -252,7 +252,9 @@ def attributeRoundTrip(self, file_ending): # TODO init of > e304 ? series.set_attribute("longdouble_c", ctypes.c_longdouble(6.e200).value) - del series + self.assertTrue(series) + series.close() + self.assertFalse(series) # read back series = io.Series( @@ -461,7 +463,9 @@ def makeConstantRoundTrip(self, file_ending): np.clongdouble(1.23456789 + 2.34567890j)) # flush and close file - del series + self.assertTrue(series) + series.close() + self.assertFalse(series) # read back series = io.Series( @@ -609,7 +613,9 @@ def makeDataRoundTrip(self, file_ending): np.clongdouble(1.23456789 + 2.34567890j)) # flush and close file - del series + self.assertTrue(series) + series.close() + self.assertFalse(series) # read back series = io.Series( @@ -690,7 +696,9 @@ def makeEmptyRoundTrip(self, file_ending): ms["np_double"][SCALAR].make_empty(np.dtype("double"), 21) # flush and close file - del series + self.assertTrue(series) + series.close() + self.assertFalse(series) # read back series = io.Series( @@ -1607,7 +1615,9 @@ def backend_particle_patches(self, file_ending): e.particle_patches["extent"]["y"].store(1, np.single(123.)) # read back - del series + self.assertTrue(series) + series.close() + self.assertFalse(series) series = io.Series( "unittest_py_particle_patches." + file_ending, @@ -1715,7 +1725,9 @@ def makeCloseIterationRoundTrip(self, file_ending): for i in range(len(data)): self.assertEqual(data[i], chunk[i]) - del read + self.assertTrue(read) + read.close() + self.assertFalse(read) it1 = series.iterations[1] E_x = it1.meshes["E"]["x"] @@ -1737,7 +1749,9 @@ def makeCloseIterationRoundTrip(self, file_ending): for i in range(len(data)): self.assertEqual(data[i], chunk[i]) - del read + self.assertTrue(read) + read.close() + self.assertFalse(read) def testCloseIteration(self): for ext in tested_file_extensions: @@ -1781,7 +1795,9 @@ def makeIteratorRoundTrip(self, backend, file_ending): it.close() del it - del series + self.assertTrue(series) + series.close() + self.assertFalse(series) # read @@ -1804,7 +1820,9 @@ def makeIteratorRoundTrip(self, backend, file_ending): self.assertEqual(chunk2[0, 1], 1) self.assertEqual(chunk2[1, 0], 2) self.assertEqual(chunk2[1, 1], 3) - del read + self.assertTrue(read) + read.close() + self.assertFalse(read) self.assertEqual(lastIterationIndex, 9) def testIterator(self): @@ -1838,6 +1856,9 @@ def makeAvailableChunksRoundTrip(self, ext): data3 = np.array([[2], [4], [6], [8]], dtype=np.dtype("int")) E_x.store_chunk(data3, [6, 0], [4, 1]) + # Cleaner: write.close() + # But let's keep this instance to test that that workflow stays + # functional. del write read = io.Series( @@ -1899,7 +1920,9 @@ def writeFromTemporary(self, ext): self.writeFromTemporaryStore(E_x) gc.collect() # trigger removal of temporary data to check its copied - del write + self.assertTrue(write) + write.close() + self.assertFalse(write) read = io.Series( name, @@ -1995,7 +2018,9 @@ def testJsonConfigADIOS2(self): E_y.reset_dataset(DS(np.dtype("double"), [1000], local_config)) E_y.store_chunk(data, [0], [1000]) - del series + self.assertTrue(series) + series.close() + self.assertFalse(series) read = io.Series( "../samples/unittest_jsonConfiguredBP3.bp", @@ -2051,7 +2076,9 @@ def testCustomGeometries(self): e_chargeDensity_x.reset_dataset(DS(DT.LONG, [10])) e_chargeDensity_x[:] = sample_data - del write + self.assertTrue(write) + write.close() + self.assertFalse(write) read = io.Series("../samples/custom_geometries_python.json", io.Access.read_only) From df097bbc0a2d251c82db8f86e3306b51b07ea682 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 19 Dec 2022 06:12:45 -0800 Subject: [PATCH 16/82] Python Tests: Fix `long` Numpy Type (#1348) Ref.: https://numpy.org/doc/stable/user/basics.types.html --- test/python/unittest/API/APITest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/python/unittest/API/APITest.py b/test/python/unittest/API/APITest.py index 94cdd062f7..db1948ef3a 100644 --- a/test/python/unittest/API/APITest.py +++ b/test/python/unittest/API/APITest.py @@ -2048,7 +2048,7 @@ def testError(self): def testCustomGeometries(self): DS = io.Dataset DT = io.Datatype - sample_data = np.ones([10], dtype=np.long) + sample_data = np.ones([10], dtype=np.int_) write = io.Series("../samples/custom_geometries_python.json", io.Access.create) From a1671958552e3f5a9d404d3659a9f4fb195e570a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 20 Dec 2022 11:53:48 +0100 Subject: [PATCH 17/82] [pre-commit.ci] pre-commit autoupdate (#1349) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/mirrors-clang-format: v15.0.4 → v15.0.6](https://github.com/pre-commit/mirrors-clang-format/compare/v15.0.4...v15.0.6) - [github.com/pycqa/isort: 5.11.1 → v5.11.3](https://github.com/pycqa/isort/compare/5.11.1...v5.11.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f1b8f25eca..ce69f36ff7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -65,7 +65,7 @@ repos: # clang-format v13 # to run manually, use .github/workflows/clang-format/clang-format.sh - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v15.0.4 + rev: v15.0.6 hooks: - id: clang-format @@ -79,7 +79,7 @@ repos: # Sorts Python imports according to PEP8 # https://www.python.org/dev/peps/pep-0008/#imports - repo: https://github.com/pycqa/isort - rev: 5.11.1 + rev: v5.11.3 hooks: - id: isort name: isort (python) From e0a74bd1df40499f2e719b286edb3a61d0106cce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Tue, 20 Dec 2022 11:55:29 +0100 Subject: [PATCH 18/82] Add switches --mpi and --no-mpi to openpmd-pipe (#1336) * Add switches --mpi and --no-mpi to openpmd-pipe * Fix order of MPI checking --- .../python/openpmd_api/pipe/__main__.py | 86 ++++++++++++------- 1 file changed, 54 insertions(+), 32 deletions(-) diff --git a/src/binding/python/openpmd_api/pipe/__main__.py b/src/binding/python/openpmd_api/pipe/__main__.py index f517ae6991..d7f0590567 100755 --- a/src/binding/python/openpmd_api/pipe/__main__.py +++ b/src/binding/python/openpmd_api/pipe/__main__.py @@ -14,29 +14,6 @@ from .. import openpmd_api_cxx as io -# MPI is an optional dependency -if io.variants['mpi']: - try: - from mpi4py import MPI - HAVE_MPI = True - except (ImportError, ModuleNotFoundError): - print(""" -openPMD-api was built with support for MPI, -but mpi4py Python package was not found. -Will continue in serial mode.""", - file=sys.stderr) - HAVE_MPI = False -else: - HAVE_MPI = False - -debug = False - - -class FallbackMPICommunicator: - def __init__(self): - self.size = 1 - self.rank = 0 - def parse_args(program_name): parser = argparse.ArgumentParser( @@ -51,8 +28,19 @@ def parse_args(program_name): or multiplexing the data path in streaming setups. Parallelization with MPI is optionally possible and is done automatically as soon as the mpi4py package is found and this tool is called in an MPI -context. In that case, each dataset will be equally sliced along the dimension -with the largest extent. +context. +Parallelization with MPI is optionally possible and can be switched on with +the --mpi switch, resp. switched off with the --no-mpi switch. +By default, openpmd-pipe will use MPI if all of the following conditions +are fulfilled: +1) The mpi4py package can be imported. +2) The openPMD-api has been built with support for MPI. +3) The MPI size is greater than 1. + By default, the openPMD-api will be initialized without an MPI communicator + if the MPI size is 1. This is to simplify the use of the JSON backend + which is only available in serial openPMD. +With parallelization enabled, each dataset will be equally sliced along +the dimension with the largest extent. Examples: {0} --infile simData.h5 --outfile simData_%T.bp @@ -72,10 +60,45 @@ def parse_args(program_name): type=str, default='{}', help='JSON config for the out file') + # MPI, default: Import mpi4py if available and openPMD is parallel, + # but don't use if MPI size is 1 (this makes it easier to interact with + # JSON, since that backend is unavailable in parallel) + if io.variants['mpi']: + parser.add_argument('--mpi', action='store_true') + parser.add_argument('--no-mpi', dest='mpi', action='store_false') + parser.set_defaults(mpi=None) return parser.parse_args() +args = parse_args(sys.argv[0]) +# MPI is an optional dependency +if io.variants['mpi'] and (args.mpi is None or args.mpi): + try: + from mpi4py import MPI + HAVE_MPI = True + except (ImportError, ModuleNotFoundError): + if args.mpi: + raise + else: + print(""" + openPMD-api was built with support for MPI, + but mpi4py Python package was not found. + Will continue in serial mode.""", + file=sys.stderr) + HAVE_MPI = False +else: + HAVE_MPI = False + +debug = False + + +class FallbackMPICommunicator: + def __init__(self): + self.size = 1 + self.rank = 0 + + class Chunk: """ A Chunk is an n-dimensional hypercube, defined by an offset and an extent. @@ -178,7 +201,7 @@ def __init__(self, infile, outfile, inconfig, outconfig, comm): self.comm = comm def run(self): - if self.comm.size == 1: + if not HAVE_MPI or (args.mpi is None and self.comm.size == 1): print("Opening data source") sys.stdout.flush() inseries = io.Series(self.infile, io.Access.read_only, @@ -320,16 +343,15 @@ def __copy(self, src, dest, current_path="/data/"): def main(): - args = parse_args(sys.argv[0]) if not args.infile or not args.outfile: print("Please specify parameters --infile and --outfile.") sys.exit(1) - if (HAVE_MPI): - run_pipe = pipe(args.infile, args.outfile, args.inconfig, - args.outconfig, MPI.COMM_WORLD) + if HAVE_MPI: + communicator = MPI.COMM_WORLD else: - run_pipe = pipe(args.infile, args.outfile, args.inconfig, - args.outconfig, FallbackMPICommunicator()) + communicator = FallbackMPICommunicator() + run_pipe = pipe(args.infile, args.outfile, args.inconfig, args.outconfig, + communicator) run_pipe.run() From 19f5f883e81138179bd131d482e2011f021686f7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Tue, 20 Dec 2022 11:57:59 +0100 Subject: [PATCH 19/82] Document why m_backendAccess and m_frontendAccess are distinguished (#1337) --- include/openPMD/IO/AbstractIOHandler.hpp | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/include/openPMD/IO/AbstractIOHandler.hpp b/include/openPMD/IO/AbstractIOHandler.hpp index 7627b66524..c3ac9d91e5 100644 --- a/include/openPMD/IO/AbstractIOHandler.hpp +++ b/include/openPMD/IO/AbstractIOHandler.hpp @@ -211,7 +211,21 @@ class AbstractIOHandler virtual std::string backendName() const = 0; std::string const directory; - // why do these need to be separate? + /* + * Originally, the reason for distinguishing these two was that during + * parsing in reading access modes, the access type would be temporarily + * const_cast'ed to an access type that would support modifying + * the openPMD object model. Then, it would be const_cast'ed back to + * READ_ONLY, to disable further modifications. + * Due to this approach's tendency to cause subtle bugs, and due to its + * difficult debugging properties, this was replaced by the SeriesStatus + * enum, defined in this file. + * The distinction of backendAccess and frontendAccess stays relevant, since + * the frontend can use it in order to pretend to the backend that another + * access type is being used. This is used by the file-based append mode, + * which is entirely implemented by the frontend, which internally uses + * the backend in CREATE mode. + */ Access const m_backendAccess; Access const m_frontendAccess; internal::SeriesStatus m_seriesStatus = internal::SeriesStatus::Default; From babddfba7f99b67612434191c3325d97406b5709 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 20 Dec 2022 11:59:16 +0100 Subject: [PATCH 20/82] Python: grid_spacing & time_offset `double` (#1290) In `pybind11`, overloads on types are order-dependent (first wins). https://github.com/pybind/pybind11/issues/1512 We specialize `double` here generically and cast in read if needed (see #345 #1137). Later on, we could add support for 1D numpy arrays with distinct type. --- src/binding/python/Mesh.cpp | 22 ++++++---------------- 1 file changed, 6 insertions(+), 16 deletions(-) diff --git a/src/binding/python/Mesh.cpp b/src/binding/python/Mesh.cpp index 43b078b9d8..744712b05e 100644 --- a/src/binding/python/Mesh.cpp +++ b/src/binding/python/Mesh.cpp @@ -71,35 +71,25 @@ void init_Mesh(py::module &m) [](Mesh &mesh, char d) { mesh.setDataOrder(Mesh::DataOrder(d)); }, "Data Order of the Mesh (deprecated and set to C in openPMD 2)") .def_property("axis_labels", &Mesh::axisLabels, &Mesh::setAxisLabels) - .def_property( - "grid_spacing", - &Mesh::gridSpacing, - &Mesh::setGridSpacing) + + // note: overloads on types are order-dependent (first wins) + // https://github.com/pybind/pybind11/issues/1512 + // We specialize `double` here generically and cast in read if needed. + // Later on, we could add support for 1D numpy arrays with distinct + // type. .def_property( "grid_spacing", &Mesh::gridSpacing, &Mesh::setGridSpacing) - .def_property( - "grid_spacing", - &Mesh::gridSpacing, - &Mesh::setGridSpacing) .def_property( "grid_global_offset", &Mesh::gridGlobalOffset, &Mesh::setGridGlobalOffset) .def_property("grid_unit_SI", &Mesh::gridUnitSI, &Mesh::setGridUnitSI) - .def_property( - "time_offset", - &Mesh::timeOffset, - &Mesh::setTimeOffset) .def_property( "time_offset", &Mesh::timeOffset, &Mesh::setTimeOffset) - .def_property( - "time_offset", - &Mesh::timeOffset, - &Mesh::setTimeOffset) // TODO remove in future versions (deprecated) .def("set_unit_dimension", &Mesh::setUnitDimension) From 990ac95ddd08572b72abfbf4bff9d154c1e2ca78 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 3 Jan 2023 11:37:08 +0100 Subject: [PATCH 21/82] [pre-commit.ci] pre-commit autoupdate (#1353) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pycqa/isort: v5.11.3 → 5.11.4](https://github.com/pycqa/isort/compare/v5.11.3...5.11.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ce69f36ff7..4c90802507 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -79,7 +79,7 @@ repos: # Sorts Python imports according to PEP8 # https://www.python.org/dev/peps/pep-0008/#imports - repo: https://github.com/pycqa/isort - rev: v5.11.3 + rev: 5.11.4 hooks: - id: isort name: isort (python) From 599ac5ae8d4a8ee9dc5f1fdb10796251e5ed77c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Wed, 4 Jan 2023 14:13:13 +0100 Subject: [PATCH 22/82] Parsing logic: fail gracefully on unexpected input (#1237) * Add ReadError error type * Move error throwing outside of ADIOS1 library Some weird compiler configurations don't understand error type symbols across libraries, so keep the symbols entirely to the main library. * Backend additions: ADIOS2 * Backend additions: ADIOS1 * Backend additions: HDF5 * Backend additions: JSON * Fully clear task queue upon failure * Error handling at Series level This is the commit with the most complicated logic changes. It implements skipping broken iterations at the Series level. This includes the Streaming API (Series::readIterations()), as well as our three iteration encodings. On the positive side: The most complex logic changes have already been prepared in the topic-adios2-append PR * Error handling throughout the openPMD object model Nothing too complicated, just tedious * Testing, and adapt tests to new Error types * Replace no_such_file_error by ReadError `using no_such_file_error = error::ReadError` for backwards compatibility in user code. * Forward errors instead of creating new ones @todo: Check if this needs to be done elsewhere * Test opening single broken filebased iterations * Use std::stable_sort in ADIOS2 destructor * Add removed .cpp files back to ADIOS1 backend implementation * Code review * Add export attribute to functions in ThrowError.hpp --- CMakeLists.txt | 9 +- include/openPMD/Error.hpp | 41 +++ include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp | 7 +- include/openPMD/IO/AbstractIOHandler.hpp | 52 ++- include/openPMD/IO/AbstractIOHandlerImpl.hpp | 9 +- include/openPMD/Iteration.hpp | 2 + include/openPMD/ReadIterations.hpp | 14 +- include/openPMD/Series.hpp | 8 +- include/openPMD/ThrowError.hpp | 70 ++++ include/openPMD/auxiliary/JSON_internal.hpp | 2 - include/openPMD/backend/Container.hpp | 1 + src/Error.cpp | 83 +++++ src/IO/ADIOS/ADIOS1IOHandler.cpp | 19 +- src/IO/ADIOS/ADIOS2IOHandler.cpp | 91 ++++- src/IO/ADIOS/CommonADIOS1IOHandler.cpp | 184 ++++++---- src/IO/ADIOS/ParallelADIOS1IOHandler.cpp | 18 +- src/IO/HDF5/HDF5IOHandler.cpp | 248 +++++++++---- src/IO/JSON/JSONIOHandlerImpl.cpp | 31 +- src/Iteration.cpp | 239 +++++++++---- src/Mesh.cpp | 84 ++++- src/ParticlePatches.cpp | 48 ++- src/ParticleSpecies.cpp | 34 +- src/ReadIterations.cpp | 110 +++++- src/Record.cpp | 37 +- src/RecordComponent.cpp | 24 +- src/Series.cpp | 355 +++++++++++++++---- src/auxiliary/JSON.cpp | 1 + src/backend/Attributable.cpp | 17 +- src/backend/MeshRecordComponent.cpp | 9 +- src/backend/PatchRecord.cpp | 24 +- src/backend/PatchRecordComponent.cpp | 8 +- test/JSONTest.cpp | 1 + test/ParallelIOTest.cpp | 20 +- test/SerialIOTest.cpp | 243 +++++++++++-- 34 files changed, 1710 insertions(+), 433 deletions(-) create mode 100644 include/openPMD/ThrowError.hpp diff --git a/CMakeLists.txt b/CMakeLists.txt index c52373361e..b7a3425633 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -540,14 +540,12 @@ set(IO_SOURCE src/IO/ADIOS/ADIOS2PreloadAttributes.cpp src/IO/InvalidatableFile.cpp) set(IO_ADIOS1_SEQUENTIAL_SOURCE - src/Error.cpp src/auxiliary/Filesystem.cpp src/ChunkInfo.cpp src/IO/ADIOS/CommonADIOS1IOHandler.cpp src/IO/ADIOS/ADIOS1IOHandler.cpp src/IO/IOTask.cpp) set(IO_ADIOS1_SOURCE - src/Error.cpp src/auxiliary/Filesystem.cpp src/ChunkInfo.cpp src/IO/ADIOS/CommonADIOS1IOHandler.cpp @@ -674,6 +672,9 @@ if(openPMD_HAVE_ADIOS1) target_compile_definitions(openPMD.ADIOS1.Serial PRIVATE openPMD_HAVE_ADIOS1=1) target_compile_definitions(openPMD.ADIOS1.Serial PRIVATE openPMD_HAVE_MPI=0) target_compile_definitions(openPMD.ADIOS1.Serial PRIVATE _NOMPI) # ADIOS header + # This ensures that the ADIOS1 targets don't ever include Error.hpp + # To avoid incompatible error types in weird compile configurations + target_compile_definitions(openPMD.ADIOS1.Serial PRIVATE OPENPMD_ADIOS1_IMPLEMENTATION) if(openPMD_HAVE_MPI) set_target_properties(openPMD.ADIOS1.Parallel PROPERTIES @@ -711,6 +712,9 @@ if(openPMD_HAVE_ADIOS1) target_compile_definitions(openPMD.ADIOS1.Parallel PRIVATE openPMD_HAVE_MPI=0) target_compile_definitions(openPMD.ADIOS1.Parallel PRIVATE _NOMPI) # ADIOS header endif() + # This ensures that the ADIOS1 targets don't ever include Error.hpp + # To avoid incompatible error types in weird compile configurations + target_compile_definitions(openPMD.ADIOS1.Parallel PRIVATE OPENPMD_ADIOS1_IMPLEMENTATION) # Runtime parameter and API status checks ("asserts") if(openPMD_USE_VERIFY) @@ -909,6 +913,7 @@ if(openPMD_USE_INVASIVE_TESTS) message(WARNING "Invasive tests that redefine class signatures are " "known to fail on Windows!") endif() + target_compile_definitions(openPMD PRIVATE openPMD_USE_INVASIVE_TESTS=1) endif() if(openPMD_BUILD_TESTING) diff --git a/include/openPMD/Error.hpp b/include/openPMD/Error.hpp index e172670bcc..9845cdcdf0 100644 --- a/include/openPMD/Error.hpp +++ b/include/openPMD/Error.hpp @@ -1,10 +1,17 @@ #pragma once +#include "openPMD/ThrowError.hpp" + #include +#include #include #include #include +#if defined(OPENPMD_ADIOS1_IMPLEMENTATION) +static_assert(false, "ADIOS1 implementation must not include Error.hpp"); +#endif + namespace openPMD { /** @@ -80,5 +87,39 @@ namespace error public: Internal(std::string const &what); }; + + /* + * Read error concerning a specific object. + */ + class ReadError : public Error + { + public: + AffectedObject affectedObject; + Reason reason; + // If empty, then the error is thrown by the frontend + std::optional backend; + std::string description; // object path, further details, ... + + ReadError( + AffectedObject, + Reason, + std::optional backend_in, + std::string description_in); + }; + + /* + * Inrecoverable parse error from the frontend. + */ + class ParseError : public Error + { + public: + ParseError(std::string what); + }; } // namespace error + +/** + * @brief Backward-compatibility alias for no_such_file_error. + * + */ +using no_such_file_error = error::ReadError; } // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp index bc8ea80ad5..7c0dd1a44e 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp @@ -1070,6 +1070,9 @@ namespace detail template void enqueue(BA &&ba, decltype(m_buffer) &); + template + void flush(Args &&...args); + struct ADIOS2FlushParams { /* @@ -1103,7 +1106,7 @@ namespace detail * deferred IO tasks had been queued. */ template - void flush( + void flush_impl( ADIOS2FlushParams flushParams, F &&performPutsGets, bool writeAttributes, @@ -1114,7 +1117,7 @@ namespace detail * and does not flush unconditionally. * */ - void flush(ADIOS2FlushParams, bool writeAttributes = false); + void flush_impl(ADIOS2FlushParams, bool writeAttributes = false); /** * @brief Begin or end an ADIOS step. diff --git a/include/openPMD/IO/AbstractIOHandler.hpp b/include/openPMD/IO/AbstractIOHandler.hpp index c3ac9d91e5..f444996c66 100644 --- a/include/openPMD/IO/AbstractIOHandler.hpp +++ b/include/openPMD/IO/AbstractIOHandler.hpp @@ -34,18 +34,10 @@ #include #include #include +#include namespace openPMD { -class no_such_file_error : public std::runtime_error -{ -public: - no_such_file_error(std::string const &what_arg) - : std::runtime_error(what_arg) - {} - virtual ~no_such_file_error() - {} -}; class unsupported_data_error : public std::runtime_error { @@ -143,6 +135,48 @@ namespace internal ///< Special state only active while internal routines are ///< running. }; + + // @todo put this somewhere else + template + auto withRWAccess(SeriesStatus &status, Functor &&functor, Args &&...args) + -> decltype(std::forward(functor)(std::forward(args)...)) + { + using Res = decltype(std::forward(functor)( + std::forward(args)...)); + if constexpr (std::is_void_v) + { + auto oldStatus = status; + status = internal::SeriesStatus::Parsing; + try + { + std::forward(functor)(); + } + catch (...) + { + status = oldStatus; + throw; + } + status = oldStatus; + return; + } + else + { + auto oldStatus = status; + status = internal::SeriesStatus::Parsing; + Res res; + try + { + res = std::forward(functor)(); + } + catch (...) + { + status = oldStatus; + throw; + } + status = oldStatus; + return res; + } + } } // namespace internal /** Interface for communicating between logical and physically persistent data. diff --git a/include/openPMD/IO/AbstractIOHandlerImpl.hpp b/include/openPMD/IO/AbstractIOHandlerImpl.hpp index 04820a28d4..79e9b35739 100644 --- a/include/openPMD/IO/AbstractIOHandlerImpl.hpp +++ b/include/openPMD/IO/AbstractIOHandlerImpl.hpp @@ -208,10 +208,13 @@ class AbstractIOHandlerImpl { std::cerr << "[AbstractIOHandlerImpl] IO Task " << internal::operationAsString(i.operation) - << " failed with exception. Removing task" - << " from IO queue and passing on the exception." + << " failed with exception. Clearing IO queue and " + "passing on the exception." << std::endl; - (*m_handler).m_work.pop(); + while (!m_handler->m_work.empty()) + { + m_handler->m_work.pop(); + } throw; } (*m_handler).m_work.pop(); diff --git a/include/openPMD/Iteration.hpp b/include/openPMD/Iteration.hpp index 2393b93322..179ade29fe 100644 --- a/include/openPMD/Iteration.hpp +++ b/include/openPMD/Iteration.hpp @@ -286,6 +286,8 @@ class Iteration : public Attributable std::string filePath, std::string const &groupPath, bool beginStep); void readGorVBased(std::string const &groupPath, bool beginStep); void read_impl(std::string const &groupPath); + void readMeshes(std::string const &meshesPath); + void readParticles(std::string const &particlesPath); /** * Status after beginning an IO step. Currently includes: diff --git a/include/openPMD/ReadIterations.hpp b/include/openPMD/ReadIterations.hpp index c5b2720dce..7d6266e4f0 100644 --- a/include/openPMD/ReadIterations.hpp +++ b/include/openPMD/ReadIterations.hpp @@ -104,9 +104,21 @@ class SeriesIterator std::optional nextIterationInStep(); - std::optional nextStep(); + /* + * When a step cannot successfully be opened, the method nextStep() calls + * itself again recursively. + * (Recursion massively simplifies the logic here, and it only happens + * in case of error.) + * After successfully beginning a step, this methods needs to remember, how + * many broken steps have been skipped. In case the Series does not use + * the /data/snapshot attribute, this helps figuring out which iteration + * is now active. Hence, recursion_depth. + */ + std::optional nextStep(size_t recursion_depth); std::optional loopBody(); + + void deactivateDeadIteration(iteration_index_t); }; /** diff --git a/include/openPMD/Series.hpp b/include/openPMD/Series.hpp index f3c40582fb..30b618db40 100644 --- a/include/openPMD/Series.hpp +++ b/include/openPMD/Series.hpp @@ -611,9 +611,15 @@ OPENPMD_private * Iterations/Records/Record Components etc. * If series.iterations contains the attribute `snapshot`, returns its * value. + * If do_always_throw_errors is false, this method will try to handle errors + * and turn them into a warning (useful when parsing a Series, since parsing + * should succeed without issue). + * If true, the error will always be re-thrown (useful when using + * ReadIterations since those methods should be aware when the current step + * is broken). */ std::optional > - readGorVBased(bool init = true); + readGorVBased(bool do_always_throw_errors, bool init); void readBase(); std::string iterationFilename(IterationIndex_t i); diff --git a/include/openPMD/ThrowError.hpp b/include/openPMD/ThrowError.hpp new file mode 100644 index 0000000000..eae561aff7 --- /dev/null +++ b/include/openPMD/ThrowError.hpp @@ -0,0 +1,70 @@ +/* Copyright 2022 Franz Poeschel + * + * This file is part of openPMD-api. + * + * openPMD-api is free software: you can redistribute it and/or modify + * it under the terms of of either the GNU General Public License or + * the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * openPMD-api is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with openPMD-api. + * If not, see . + */ + +/* + * For objects that must not include Error.hpp but still need to throw errors. + * In some exotic compiler configurations (clang-6 with libc++), + * including Error.hpp into the ADIOS1 backend leads to incompatible error type + * symbols. + * So, use only the functions defined in here in the ADIOS1 backend. + * Definitions are in Error.cpp. + */ + +#pragma once + +#include "openPMD/auxiliary/Export.hpp" + +#include +#include +#include + +namespace openPMD::error +{ +enum class AffectedObject +{ + Attribute, + Dataset, + File, + Group, + Other +}; + +enum class Reason +{ + NotFound, + CannotRead, + UnexpectedContent, + Inaccessible, + Other +}; + +[[noreturn]] OPENPMDAPI_EXPORT void +throwBackendConfigSchema(std::vector jsonPath, std::string what); + +[[noreturn]] OPENPMDAPI_EXPORT void +throwOperationUnsupportedInBackend(std::string backend, std::string what); + +[[noreturn]] OPENPMDAPI_EXPORT void throwReadError( + AffectedObject affectedObject, + Reason reason_in, + std::optional backend, + std::string description_in); +} // namespace openPMD::error diff --git a/include/openPMD/auxiliary/JSON_internal.hpp b/include/openPMD/auxiliary/JSON_internal.hpp index e2dc838423..e865ddc7de 100644 --- a/include/openPMD/auxiliary/JSON_internal.hpp +++ b/include/openPMD/auxiliary/JSON_internal.hpp @@ -23,8 +23,6 @@ #include "openPMD/config.hpp" -#include "openPMD/Error.hpp" - #include #include diff --git a/include/openPMD/backend/Container.hpp b/include/openPMD/backend/Container.hpp index 8db82c69f0..c697593a12 100644 --- a/include/openPMD/backend/Container.hpp +++ b/include/openPMD/backend/Container.hpp @@ -141,6 +141,7 @@ class Container : public Attributable friend class Series; template friend class internal::EraseStaleEntries; + friend class SeriesIterator; protected: using ContainerData = internal::ContainerData; diff --git a/src/Error.cpp b/src/Error.cpp index a0331948c8..99096bd54e 100644 --- a/src/Error.cpp +++ b/src/Error.cpp @@ -17,6 +17,13 @@ namespace error , backend{std::move(backend_in)} {} + void + throwOperationUnsupportedInBackend(std::string backend, std::string what) + { + throw OperationUnsupportedInBackend( + std::move(backend), std::move(what)); + } + WrongAPIUsage::WrongAPIUsage(std::string what) : Error("Wrong API usage: " + what) {} @@ -46,11 +53,87 @@ namespace error , errorLocation(std::move(errorLocation_in)) {} + void throwBackendConfigSchema( + std::vector jsonPath, std::string what) + { + throw BackendConfigSchema(std::move(jsonPath), std::move(what)); + } + Internal::Internal(std::string const &what) : Error( "Internal error: " + what + "\nThis is a bug. Please report at ' " "https://github.com/openPMD/openPMD-api/issues'.") {} + + namespace + { + std::string asString(AffectedObject obj) + { + switch (obj) + { + using AO = AffectedObject; + case AO::Attribute: + return "Attribute"; + case AO::Dataset: + return "Dataset"; + case AO::File: + return "File"; + case AO::Group: + return "Group"; + case AO::Other: + return "Other"; + } + return "Unreachable"; + } + std::string asString(Reason obj) + { + switch (obj) + { + using Re = Reason; + case Re::NotFound: + return "NotFound"; + case Re::CannotRead: + return "CannotRead"; + case Re::UnexpectedContent: + return "UnexpectedContent"; + case Re::Inaccessible: + return "Inaccessible"; + case Re::Other: + return "Other"; + } + return "Unreachable"; + } + } // namespace + + ReadError::ReadError( + AffectedObject affectedObject_in, + Reason reason_in, + std::optional backend_in, + std::string description_in) + : Error( + (backend_in ? ("Read Error in backend " + *backend_in) + : "Read Error in frontend ") + + "\nObject type:\t" + asString(affectedObject_in) + + "\nError type:\t" + asString(reason_in) + + "\nFurther description:\t" + description_in) + , affectedObject(affectedObject_in) + , reason(reason_in) + , backend(std::move(backend_in)) + , description(std::move(description_in)) + {} + + void throwReadError( + AffectedObject affectedObject, + Reason reason, + std::optional backend, + std::string description) + { + throw ReadError( + affectedObject, reason, std::move(backend), std::move(description)); + } + + ParseError::ParseError(std::string what) : Error("Parse Error: " + what) + {} } // namespace error } // namespace openPMD diff --git a/src/IO/ADIOS/ADIOS1IOHandler.cpp b/src/IO/ADIOS/ADIOS1IOHandler.cpp index 019e5a8078..662bf1ec69 100644 --- a/src/IO/ADIOS/ADIOS1IOHandler.cpp +++ b/src/IO/ADIOS/ADIOS1IOHandler.cpp @@ -18,6 +18,7 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ + #include "openPMD/IO/ADIOS/ADIOS1IOHandler.hpp" #include "openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp" @@ -157,10 +158,13 @@ std::future ADIOS1IOHandlerImpl::flush() { std::cerr << "[AbstractIOHandlerImpl] IO Task " << internal::operationAsString(i.operation) - << " failed with exception. Removing task" - << " from IO queue and passing on the exception." + << " failed with exception. Clearing IO queue and " + "passing on the exception." << std::endl; - handler->m_setup.pop(); + while (!m_handler->m_work.empty()) + { + m_handler->m_work.pop(); + } throw; } handler->m_setup.pop(); @@ -289,10 +293,13 @@ std::future ADIOS1IOHandlerImpl::flush() { std::cerr << "[AbstractIOHandlerImpl] IO Task " << internal::operationAsString(i.operation) - << " failed with exception. Removing task" - << " from IO queue and passing on the exception." + << " failed with exception. Clearing IO queue and " + "passing on the exception." << std::endl; - m_handler->m_work.pop(); + while (!m_handler->m_work.empty()) + { + m_handler->m_work.pop(); + } throw; } handler->m_work.pop(); diff --git a/src/IO/ADIOS/ADIOS2IOHandler.cpp b/src/IO/ADIOS/ADIOS2IOHandler.cpp index 95362b2687..d768ec5b6b 100644 --- a/src/IO/ADIOS/ADIOS2IOHandler.cpp +++ b/src/IO/ADIOS/ADIOS2IOHandler.cpp @@ -112,7 +112,17 @@ ADIOS2IOHandlerImpl::~ADIOS2IOHandlerImpl() sorted.push_back(std::move(pair.second)); } m_fileData.clear(); - std::sort( + /* + * Technically, std::sort() is sufficient here, since file names are unique. + * Use std::stable_sort() for two reasons: + * 1) On some systems (clang 13.0.1, libc++ 13.0.1), std::sort() leads to + * weird inconsistent segfaults here. + * 2) Robustness against future changes. stable_sort() might become needed + * in future, and debugging this can be hard. + * 3) It does not really matter, this is just the destructor, so we can take + * the extra time. + */ + std::stable_sort( sorted.begin(), sorted.end(), [](auto const &left, auto const &right) { return left->m_file <= right->m_file; }); @@ -685,9 +695,11 @@ void ADIOS2IOHandlerImpl::openFile( { if (!auxiliary::directory_exists(m_handler->directory)) { - throw no_such_file_error( - "[ADIOS2] Supplied directory is not valid: " + - m_handler->directory); + throw error::ReadError( + error::AffectedObject::File, + error::Reason::Inaccessible, + "ADIOS2", + "Supplied directory is not valid: " + m_handler->directory); } std::string name = parameters.name + fileSuffix(); @@ -2305,9 +2317,11 @@ namespace detail if (type == Datatype::UNDEFINED) { - throw std::runtime_error( - "[ADIOS2] Requested attribute (" + name + - ") not found in backend."); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::NotFound, + "ADIOS2", + name); } Datatype ret = switchType( @@ -2325,9 +2339,11 @@ namespace detail if (type == Datatype::UNDEFINED) { - throw std::runtime_error( - "[ADIOS2] Requested attribute (" + name + - ") not found in backend."); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::NotFound, + "ADIOS2", + name); } Datatype ret = switchType( @@ -2844,8 +2860,49 @@ namespace detail std::unique_ptr(new _BA(std::forward(ba)))); } + template + void BufferedActions::flush(Args &&...args) + { + try + { + flush_impl(std::forward(args)...); + } + catch (error::ReadError const &) + { + /* + * We need to take actions out of the buffer, since an exception + * should reset everything from the current IOHandler->flush() call. + * However, we cannot simply clear the buffer, since tasks may have + * been enqueued to ADIOS2 already and we cannot undo that. + * So, we need to keep the memory alive for the benefit of ADIOS2. + * Luckily, we have m_alreadyEnqueued for exactly that purpose. + */ + for (auto &task : m_buffer) + { + m_alreadyEnqueued.emplace_back(std::move(task)); + } + m_buffer.clear(); + + // m_attributeWrites and m_attributeReads are for implementing the + // 2021 ADIOS2 schema which will go anyway. + // So, this ugliness here is temporary. + for (auto &task : m_attributeWrites) + { + m_alreadyEnqueued.emplace_back(std::unique_ptr{ + new BufferedAttributeWrite{std::move(task.second)}}); + } + m_attributeWrites.clear(); + /* + * An AttributeRead is not a deferred action, so we can clear it + * immediately. + */ + m_attributeReads.clear(); + throw; + } + } + template - void BufferedActions::flush( + void BufferedActions::flush_impl( ADIOS2FlushParams flushParams, F &&performPutGets, bool writeAttributes, @@ -2947,8 +3004,8 @@ namespace detail } } - void - BufferedActions::flush(ADIOS2FlushParams flushParams, bool writeAttributes) + void BufferedActions::flush_impl( + ADIOS2FlushParams flushParams, bool writeAttributes) { auto decideFlushAPICall = [this, flushTarget = flushParams.flushTarget]( adios2::Engine &engine) { @@ -2984,7 +3041,7 @@ namespace detail #endif }; - flush( + flush_impl( flushParams, [decideFlushAPICall = std::move(decideFlushAPICall)]( BufferedActions &ba, adios2::Engine &eng) { @@ -3019,7 +3076,9 @@ namespace detail { m_IO.DefineAttribute( ADIOS2Defaults::str_usesstepsAttribute, 0); - flush({FlushLevel::UserFlush}, /* writeAttributes = */ false); + flush( + ADIOS2FlushParams{FlushLevel::UserFlush}, + /* writeAttributes = */ false); return AdvanceStatus::RANDOMACCESS; } @@ -3059,7 +3118,7 @@ namespace detail } } flush( - {FlushLevel::UserFlush}, + ADIOS2FlushParams{FlushLevel::UserFlush}, [](BufferedActions &, adios2::Engine &eng) { eng.EndStep(); }, /* writeAttributes = */ true, /* flushUnconditionally = */ true); diff --git a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp index 3a13dc7fc8..dded98d94c 100644 --- a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp +++ b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp @@ -20,11 +20,10 @@ */ #include "openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp" -#include "openPMD/Error.hpp" +#include "openPMD/ThrowError.hpp" #if openPMD_HAVE_ADIOS1 -#include "openPMD/Error.hpp" #include "openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp" #include "openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp" #include "openPMD/auxiliary/JSON_internal.hpp" @@ -425,7 +424,7 @@ void CommonADIOS1IOHandlerImpl::createFile( if (m_handler->m_backendAccess == Access::APPEND && auxiliary::file_exists(name)) { - throw error::OperationUnsupportedInBackend( + error::throwOperationUnsupportedInBackend( "ADIOS1", "Appending to existing file on disk (use Access::CREATE to " "overwrite)"); @@ -515,7 +514,7 @@ static std::optional datasetTransform(json::TracingJSON config) } else { - throw error::BackendConfigSchema( + error::throwBackendConfigSchema( {"adios1", "dataset", "transform"}, "Key must convertible to type string."); } @@ -658,9 +657,11 @@ void CommonADIOS1IOHandlerImpl::openFile( Writable *writable, Parameter const ¶meters) { if (!auxiliary::directory_exists(m_handler->directory)) - throw no_such_file_error( - "[ADIOS1] Supplied directory is not valid: " + - m_handler->directory); + error::throwReadError( + error::AffectedObject::File, + error::Reason::Inaccessible, + "ADIOS1", + "Supplied directory is not valid: " + m_handler->directory); std::string name = m_handler->directory + parameters.name; if (!auxiliary::ends_with(name, ".bp")) @@ -1248,15 +1249,22 @@ void CommonADIOS1IOHandlerImpl::readAttribute( int status; status = adios_get_attr(f, attrname.c_str(), &datatype, &size, &data); - VERIFY( - status == 0, - "[ADIOS1] Internal error: Failed to get ADIOS1 attribute during " - "attribute read"); - VERIFY( - datatype != adios_unknown, - "[ADIOS1] Internal error: Read unknown ADIOS1 datatype during " - "attribute read"); - VERIFY(size != 0, "[ADIOS1] Internal error: ADIOS1 read 0-size attribute"); + if (status != 0) + { + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::NotFound, + "ADIOS1", + attrname); + } + if (datatype == adios_unknown) + { + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + "ADIOS1", + "Unknown datatype: " + attrname); + } // size is returned in number of allocated bytes // note the ill-named fixed-byte adios_... types @@ -1307,9 +1315,11 @@ void CommonADIOS1IOHandlerImpl::readAttribute( break; default: - throw unsupported_data_error( - "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype '" + - std::to_string(datatype) + "' in size check"); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + "ADIOS1", + "Unsupported datatype: " + attrname); } Datatype dtype; @@ -1345,9 +1355,12 @@ void CommonADIOS1IOHandlerImpl::readAttribute( a = Attribute(*reinterpret_cast(data)); } else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype adios_short " - "found."); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + "ADIOS1", + "No native equivalent found for type adios_short: " + + attrname); break; case adios_integer: if (sizeof(short) == 4u) @@ -1371,9 +1384,12 @@ void CommonADIOS1IOHandlerImpl::readAttribute( a = Attribute(*reinterpret_cast(data)); } else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype adios_integer " - "found."); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + "ADIOS1", + "No native equivalent found for type adios_integer: " + + attrname); break; case adios_long: if (sizeof(short) == 8u) @@ -1397,9 +1413,12 @@ void CommonADIOS1IOHandlerImpl::readAttribute( a = Attribute(*reinterpret_cast(data)); } else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype adios_long " - "found."); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + "ADIOS1", + "No native equivalent found for type adios_long: " + + attrname); break; case adios_unsigned_byte: dtype = DT::UCHAR; @@ -1427,9 +1446,13 @@ void CommonADIOS1IOHandlerImpl::readAttribute( a = Attribute(*reinterpret_cast(data)); } else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype " - "adios_unsigned_short found."); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + "ADIOS1", + "No native equivalent found for type " + "adios_unsigned_short: " + + attrname); break; case adios_unsigned_integer: if (sizeof(unsigned short) == 4u) @@ -1453,9 +1476,13 @@ void CommonADIOS1IOHandlerImpl::readAttribute( a = Attribute(*reinterpret_cast(data)); } else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype " - "adios_unsigned_integer found."); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + "ADIOS1", + "No native equivalent found for type " + "adios_unsigned_integer: " + + attrname); break; case adios_unsigned_long: if (sizeof(unsigned short) == 8u) @@ -1479,9 +1506,13 @@ void CommonADIOS1IOHandlerImpl::readAttribute( a = Attribute(*reinterpret_cast(data)); } else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype " - "adios_unsigned_long found."); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + "ADIOS1", + "No native equivalent found for type " + "adios_unsigned_long: " + + attrname); break; case adios_real: dtype = DT::FLOAT; @@ -1527,10 +1558,13 @@ void CommonADIOS1IOHandlerImpl::readAttribute( break; } default: - throw unsupported_data_error( - "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype " - "'" + - std::to_string(datatype) + "' in scalar branch"); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + "ADIOS1", + "Unsupported ADIOS1 attribute datatype '" + + std::to_string(datatype) + + "' in scalar branch: " + attrname); } } else @@ -1565,9 +1599,12 @@ void CommonADIOS1IOHandlerImpl::readAttribute( readVectorAttributeInternal(data, size), DT::VEC_LONGLONG); else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype adios_short " - "found."); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + "ADIOS1", + "No native equivalent found for type adios_short: " + + attrname); break; } case adios_integer: { @@ -1587,9 +1624,12 @@ void CommonADIOS1IOHandlerImpl::readAttribute( readVectorAttributeInternal(data, size), DT::VEC_LONGLONG); else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype adios_integer " - "found."); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + "ADIOS1", + "No native equivalent found for type adios_integer: " + + attrname); break; } case adios_long: { @@ -1609,9 +1649,12 @@ void CommonADIOS1IOHandlerImpl::readAttribute( readVectorAttributeInternal(data, size), DT::VEC_LONGLONG); else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype adios_long " - "found."); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + "ADIOS1", + "No native equivalent found for type adios_long: " + + attrname); break; } case adios_unsigned_byte: { @@ -1642,9 +1685,13 @@ void CommonADIOS1IOHandlerImpl::readAttribute( readVectorAttributeInternal(data, size), DT::VEC_ULONGLONG); else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype " - "adios_unsigned_short found."); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + "ADIOS1", + "No native equivalent found for type " + "adios_unsigned_short: " + + attrname); break; } case adios_unsigned_integer: { @@ -1665,9 +1712,13 @@ void CommonADIOS1IOHandlerImpl::readAttribute( readVectorAttributeInternal(data, size), DT::VEC_ULONGLONG); else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype " - "adios_unsigned_integer found."); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + "ADIOS1", + "No native equivalent found for type " + "adios_unsigned_integer: " + + attrname); break; } case adios_unsigned_long: { @@ -1688,9 +1739,13 @@ void CommonADIOS1IOHandlerImpl::readAttribute( readVectorAttributeInternal(data, size), DT::VEC_ULONGLONG); else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype " - "adios_unsigned_long found."); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + "ADIOS1", + "No native equivalent found for type " + "adios_unsigned_long: " + + attrname); break; } case adios_real: { @@ -1750,10 +1805,13 @@ void CommonADIOS1IOHandlerImpl::readAttribute( } default: - throw unsupported_data_error( - "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype " - "'" + - std::to_string(datatype) + "' in vector branch"); + error::throwReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + "ADIOS1", + "Unsupported ADIOS1 attribute datatype '" + + std::to_string(datatype) + + "' in vector branch: " + attrname); } } diff --git a/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp b/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp index 20f571e980..fe628e48aa 100644 --- a/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp +++ b/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp @@ -179,10 +179,13 @@ std::future ParallelADIOS1IOHandlerImpl::flush() { std::cerr << "[AbstractIOHandlerImpl] IO Task " << internal::operationAsString(i.operation) - << " failed with exception. Removing task" - << " from IO queue and passing on the exception." + << " failed with exception. Clearing IO queue and " + "passing on the exception." << std::endl; - handler->m_setup.pop(); + while (!m_handler->m_work.empty()) + { + m_handler->m_work.pop(); + } throw; } handler->m_setup.pop(); @@ -309,10 +312,13 @@ std::future ParallelADIOS1IOHandlerImpl::flush() { std::cerr << "[AbstractIOHandlerImpl] IO Task " << internal::operationAsString(i.operation) - << " failed with exception. Removing task" - << " from IO queue and passing on the exception." + << " failed with exception. Clearing IO queue and " + "passing on the exception." << std::endl; - m_handler->m_work.pop(); + while (!m_handler->m_work.empty()) + { + m_handler->m_work.pop(); + } throw; } handler->m_work.pop(); diff --git a/src/IO/HDF5/HDF5IOHandler.cpp b/src/IO/HDF5/HDF5IOHandler.cpp index 01979d8071..db26b2098d 100644 --- a/src/IO/HDF5/HDF5IOHandler.cpp +++ b/src/IO/HDF5/HDF5IOHandler.cpp @@ -774,8 +774,11 @@ void HDF5IOHandlerImpl::openFile( Writable *writable, Parameter const ¶meters) { if (!auxiliary::directory_exists(m_handler->directory)) - throw no_such_file_error( - "[HDF5] Supplied directory is not valid: " + m_handler->directory); + throw error::ReadError( + error::AffectedObject::File, + error::Reason::Inaccessible, + "HDF5", + "Supplied directory is not valid: " + m_handler->directory); std::string name = m_handler->directory + parameters.name; if (!auxiliary::ends_with(name, ".h5")) @@ -809,7 +812,11 @@ void HDF5IOHandlerImpl::openFile( hid_t file_id; file_id = H5Fopen(name.c_str(), flags, m_fileAccessProperty); if (file_id < 0) - throw no_such_file_error("[HDF5] Failed to open HDF5 file " + name); + throw error::ReadError( + error::AffectedObject::File, + error::Reason::Inaccessible, + "HDF5", + "Failed to open HDF5 file " + name); writable->written = true; writable->abstractFilePosition = std::make_shared("/"); @@ -853,9 +860,15 @@ void HDF5IOHandlerImpl::openPath( node_id = H5Gopen( file.id, concrete_h5_file_position(writable->parent).c_str(), gapl); - VERIFY( - node_id >= 0, - "[HDF5] Internal error: Failed to open HDF5 group during path opening"); + if (node_id < 0) + { + throw error::ReadError( + error::AffectedObject::Group, + error::Reason::NotFound, + "HDF5", + "[HDF5] Internal error: Failed to open HDF5 group during path " + "opening"); + } /* Sanitize path */ std::string path = parameters.path; @@ -866,30 +879,50 @@ void HDF5IOHandlerImpl::openPath( if (!auxiliary::ends_with(path, '/')) path += '/'; path_id = H5Gopen(node_id, path.c_str(), gapl); - VERIFY( - path_id >= 0, - "[HDF5] Internal error: Failed to open HDF5 group during path " - "opening"); + if (path_id < 0) + { + throw error::ReadError( + error::AffectedObject::Group, + error::Reason::NotFound, + "HDF5", + "[HDF5] Internal error: Failed to open HDF5 group during path " + "opening"); + } herr_t status; status = H5Gclose(path_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 group during path " - "opening"); + if (status != 0) + { + throw error::ReadError( + error::AffectedObject::Group, + error::Reason::Other, + "HDF5", + "[HDF5] Internal error: Failed to close HDF5 group during path " + "opening"); + } } herr_t status; status = H5Gclose(node_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 group during path " - "opening"); + if (status != 0) + { + throw error::ReadError( + error::AffectedObject::Group, + error::Reason::Other, + "HDF5", + "[HDF5] Internal error: Failed to close HDF5 group during path " + "opening"); + } status = H5Pclose(gapl); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 property during path " - "opening"); + if (status != 0) + { + throw error::ReadError( + error::AffectedObject::Group, + error::Reason::Other, + "HDF5", + "[HDF5] Internal error: Failed to close HDF5 property during path " + "opening"); + } writable->written = true; writable->abstractFilePosition = std::make_shared(path); @@ -1771,18 +1804,30 @@ void HDF5IOHandlerImpl::readAttribute( obj_id = H5Oopen(file.id, concrete_h5_file_position(writable).c_str(), fapl); - VERIFY( - obj_id >= 0, - std::string("[HDF5] Internal error: Failed to open HDF5 object '") + - concrete_h5_file_position(writable).c_str() + - "' during attribute read"); + if (obj_id < 0) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::NotFound, + "HDF5", + std::string("[HDF5] Internal error: Failed to open HDF5 object '") + + concrete_h5_file_position(writable).c_str() + + "' during attribute read"); + } std::string const &attr_name = parameters.name; attr_id = H5Aopen(obj_id, attr_name.c_str(), H5P_DEFAULT); - VERIFY( - attr_id >= 0, - std::string("[HDF5] Internal error: Failed to open HDF5 attribute '") + - attr_name + "' (" + concrete_h5_file_position(writable).c_str() + - ") during attribute read"); + if (attr_id < 0) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::NotFound, + "HDF5", + std::string( + "[HDF5] Internal error: Failed to open HDF5 attribute '") + + attr_name + "' (" + + concrete_h5_file_position(writable).c_str() + + ") during attribute read"); + } hid_t attr_type, attr_space; attr_type = H5Aget_type(attr_id); @@ -1793,10 +1838,15 @@ void HDF5IOHandlerImpl::readAttribute( std::vector maxdims(ndims, 0); status = H5Sget_simple_extent_dims(attr_space, dims.data(), maxdims.data()); - VERIFY( - status == ndims, - "[HDF5] Internal error: Failed to get dimensions during attribute " - "read"); + if (status != ndims) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::CannotRead, + "HDF5", + "[HDF5] Internal error: Failed to get dimensions during attribute " + "read"); + } H5S_class_t attr_class = H5Sget_simple_extent_type(attr_space); Attribute a(0); @@ -1929,7 +1979,10 @@ void HDF5IOHandlerImpl::readAttribute( a = Attribute(static_cast(enumVal)); } else - throw unsupported_data_error( + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + "HDF5", "[HDF5] Unsupported attribute enumeration"); } else if (H5Tget_class(attr_type) == H5T_COMPOUND) @@ -1997,21 +2050,33 @@ void HDF5IOHandlerImpl::readAttribute( a = Attribute(cld); } else - throw unsupported_data_error( + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + "HDF5", "[HDF5] Unknown complex type representation"); } else - throw unsupported_data_error( + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + "HDF5", "[HDF5] Compound attribute type not supported"); } else - throw std::runtime_error( + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + "HDF5", "[HDF5] Unsupported scalar attribute type"); } else if (attr_class == H5S_SIMPLE) { if (ndims != 1) - throw std::runtime_error( + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + "HDF5", "[HDF5] Unsupported attribute (array with ndims != 1)"); if (H5Tequal(attr_type, H5T_NATIVE_CHAR)) @@ -2126,11 +2191,16 @@ void HDF5IOHandlerImpl::readAttribute( { std::vector vc(dims[0]); status = H5Aread(attr_id, attr_type, vc.data()); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to read attribute " + - attr_name + " at " + - concrete_h5_file_position(writable)); + if (status != 0) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::CannotRead, + "HDF5", + "[HDF5] Internal error: Failed to read attribute " + + attr_name + " at " + + concrete_h5_file_position(writable)); + } for (auto const &val : vc) vs.push_back(auxiliary::strip(std::string(val), {'\0'})); status = H5Dvlen_reclaim( @@ -2148,26 +2218,45 @@ void HDF5IOHandlerImpl::readAttribute( a = Attribute(vs); } else - throw std::runtime_error( + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + "HDF5", "[HDF5] Unsupported simple attribute type"); } else throw std::runtime_error("[HDF5] Unsupported attribute class"); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to read attribute " + attr_name + - " at " + concrete_h5_file_position(writable)); + if (status != 0) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::CannotRead, + "HDF5", + "[HDF5] Internal error: Failed to read attribute " + attr_name + + " at " + concrete_h5_file_position(writable)); + } status = H5Tclose(attr_type); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close attribute datatype during " - "attribute read"); + if (status != 0) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::CannotRead, + "HDF5", + "[HDF5] Internal error: Failed to close attribute datatype during " + "attribute read"); + } status = H5Sclose(attr_space); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close attribute file space during " - "attribute read"); + if (status != 0) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::CannotRead, + "HDF5", + "[HDF5] Internal error: Failed to close attribute file space " + "during " + "attribute read"); + } auto dtype = parameters.dtype; *dtype = a.dtype; @@ -2175,21 +2264,36 @@ void HDF5IOHandlerImpl::readAttribute( *resource = a.getResource(); status = H5Aclose(attr_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close attribute " + attr_name + - " at " + concrete_h5_file_position(writable) + - " during attribute read"); + if (status != 0) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::CannotRead, + "HDF5", + "[HDF5] Internal error: Failed to close attribute " + attr_name + + " at " + concrete_h5_file_position(writable) + + " during attribute read"); + } status = H5Oclose(obj_id); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close " + - concrete_h5_file_position(writable) + " during attribute read"); + if (status != 0) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::CannotRead, + "HDF5", + "[HDF5] Internal error: Failed to close " + + concrete_h5_file_position(writable) + " during attribute read"); + } status = H5Pclose(fapl); - VERIFY( - status == 0, - "[HDF5] Internal error: Failed to close HDF5 attribute during " - "attribute read"); + if (status != 0) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::CannotRead, + "HDF5", + "[HDF5] Internal error: Failed to close HDF5 attribute during " + "attribute read"); + } } void HDF5IOHandlerImpl::listPaths( diff --git a/src/IO/JSON/JSONIOHandlerImpl.cpp b/src/IO/JSON/JSONIOHandlerImpl.cpp index 272478789b..8f25b56584 100644 --- a/src/IO/JSON/JSONIOHandlerImpl.cpp +++ b/src/IO/JSON/JSONIOHandlerImpl.cpp @@ -22,6 +22,7 @@ #include "openPMD/IO/JSON/JSONIOHandlerImpl.hpp" #include "openPMD/Datatype.hpp" #include "openPMD/DatatypeHelpers.hpp" +#include "openPMD/Error.hpp" #include "openPMD/auxiliary/Filesystem.hpp" #include "openPMD/auxiliary/Memory.hpp" #include "openPMD/auxiliary/StringManip.hpp" @@ -529,8 +530,11 @@ void JSONIOHandlerImpl::openFile( { if (!auxiliary::directory_exists(m_handler->directory)) { - throw no_such_file_error( - "[JSON] Supplied directory is not valid: " + m_handler->directory); + throw error::ReadError( + error::AffectedObject::File, + error::Reason::Inaccessible, + "JSON", + "Supplied directory is not valid: " + m_handler->directory); } std::string name = parameter.name; @@ -857,11 +861,15 @@ void JSONIOHandlerImpl::readAttribute( auto &jsonLoc = obtainJsonContents(writable)["attributes"]; setAndGetFilePosition(writable); std::string error_msg("[JSON] No such attribute '"); - error_msg.append(name) - .append("' in the given location '") - .append(jsonLoc.dump()) - .append("'."); - VERIFY_ALWAYS(hasKey(jsonLoc, name), error_msg) + if (!hasKey(jsonLoc, name)) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::NotFound, + "JSON", + "Tried looking up attribute '" + name + + "' in object: " + jsonLoc.dump()); + } auto &j = jsonLoc[name]; try { @@ -871,9 +879,12 @@ void JSONIOHandlerImpl::readAttribute( } catch (json::type_error &) { - throw std::runtime_error( - "[JSON] The given location does not contain a properly formatted " - "attribute"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + "JSON", + "No properly formatted attribute with name '" + name + + "' found in object: " + jsonLoc.dump()); } } diff --git a/src/Iteration.cpp b/src/Iteration.cpp index 179bf0f97d..54d23589f7 100644 --- a/src/Iteration.cpp +++ b/src/Iteration.cpp @@ -430,7 +430,12 @@ void Iteration::read_impl(std::string const &groupPath) val.has_value()) setDt(val.value()); else - throw std::runtime_error("Unexpected Attribute datatype for 'dt'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'dt' (expected double, found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); aRead.name = "time"; IOHandler()->enqueue(IOTask(this, aRead)); @@ -446,7 +451,13 @@ void Iteration::read_impl(std::string const &groupPath) val.has_value()) setTime(val.value()); else - throw std::runtime_error("Unexpected Attribute datatype for 'time'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'time' (expected double, " + "found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); aRead.name = "timeUnitSI"; IOHandler()->enqueue(IOTask(this, aRead)); @@ -455,8 +466,13 @@ void Iteration::read_impl(std::string const &groupPath) val.has_value()) setTimeUnitSI(val.value()); else - throw std::runtime_error( - "Unexpected Attribute datatype for 'timeUnitSI'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'timeUnitSI' (expected double, " + "found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); /* Find the root point [Series] of this file, * meshesPath and particlesPath are stored there */ @@ -489,97 +505,177 @@ void Iteration::read_impl(std::string const &groupPath) if (hasMeshes) { - pOpen.path = s.meshesPath(); - IOHandler()->enqueue(IOTask(&meshes, pOpen)); + try + { + readMeshes(s.meshesPath()); + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read meshes in iteration " << groupPath + << " and will skip them due to read error:\n" + << err.what() << std::endl; + meshes = {}; + meshes.dirty() = false; + } + } + else + { + meshes.dirty() = false; + } + + if (hasParticles) + { + try + { + readParticles(s.particlesPath()); + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read particles in iteration " << groupPath + << " and will skip them due to read error:\n" + << err.what() << std::endl; + particles = {}; + particles.dirty() = false; + } + } + else + { + particles.dirty() = false; + } - meshes.readAttributes(ReadMode::FullyReread); + readAttributes(ReadMode::FullyReread); +#ifdef openPMD_USE_INVASIVE_TESTS + if (containsAttribute("__openPMD_internal_fail")) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::Other, + {}, + "Deliberately failing this iteration for testing purposes"); + } +#endif +} - internal::EraseStaleEntries map{meshes}; +void Iteration::readMeshes(std::string const &meshesPath) +{ + Parameter pOpen; + Parameter pList; - /* obtain all non-scalar meshes */ - IOHandler()->enqueue(IOTask(&meshes, pList)); - IOHandler()->flush(internal::defaultFlushParams); + pOpen.path = meshesPath; + IOHandler()->enqueue(IOTask(&meshes, pOpen)); - Parameter aList; - for (auto const &mesh_name : *pList.paths) - { - Mesh &m = map[mesh_name]; - pOpen.path = mesh_name; - aList.attributes->clear(); - IOHandler()->enqueue(IOTask(&m, pOpen)); - IOHandler()->enqueue(IOTask(&m, aList)); - IOHandler()->flush(internal::defaultFlushParams); + meshes.readAttributes(ReadMode::FullyReread); - auto att_begin = aList.attributes->begin(); - auto att_end = aList.attributes->end(); - auto value = std::find(att_begin, att_end, "value"); - auto shape = std::find(att_begin, att_end, "shape"); - if (value != att_end && shape != att_end) - { - MeshRecordComponent &mrc = m[MeshRecordComponent::SCALAR]; - mrc.parent() = m.parent(); - IOHandler()->enqueue(IOTask(&mrc, pOpen)); - IOHandler()->flush(internal::defaultFlushParams); - mrc.get().m_isConstant = true; - } - m.read(); - } + internal::EraseStaleEntries map{meshes}; + + /* obtain all non-scalar meshes */ + IOHandler()->enqueue(IOTask(&meshes, pList)); + IOHandler()->flush(internal::defaultFlushParams); - /* obtain all scalar meshes */ - Parameter dList; - IOHandler()->enqueue(IOTask(&meshes, dList)); + Parameter aList; + for (auto const &mesh_name : *pList.paths) + { + Mesh &m = map[mesh_name]; + pOpen.path = mesh_name; + aList.attributes->clear(); + IOHandler()->enqueue(IOTask(&m, pOpen)); + IOHandler()->enqueue(IOTask(&m, aList)); IOHandler()->flush(internal::defaultFlushParams); - Parameter dOpen; - for (auto const &mesh_name : *dList.datasets) + auto att_begin = aList.attributes->begin(); + auto att_end = aList.attributes->end(); + auto value = std::find(att_begin, att_end, "value"); + auto shape = std::find(att_begin, att_end, "shape"); + if (value != att_end && shape != att_end) { - Mesh &m = map[mesh_name]; - dOpen.name = mesh_name; - IOHandler()->enqueue(IOTask(&m, dOpen)); - IOHandler()->flush(internal::defaultFlushParams); MeshRecordComponent &mrc = m[MeshRecordComponent::SCALAR]; mrc.parent() = m.parent(); - IOHandler()->enqueue(IOTask(&mrc, dOpen)); + IOHandler()->enqueue(IOTask(&mrc, pOpen)); IOHandler()->flush(internal::defaultFlushParams); - mrc.written() = false; - mrc.resetDataset(Dataset(*dOpen.dtype, *dOpen.extent)); - mrc.written() = true; + mrc.get().m_isConstant = true; + } + m.read(); + try + { m.read(); } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read mesh with name '" << mesh_name + << "' and will skip it due to read error:\n" + << err.what() << std::endl; + map.forget(mesh_name); + } } - else + + /* obtain all scalar meshes */ + Parameter dList; + IOHandler()->enqueue(IOTask(&meshes, dList)); + IOHandler()->flush(internal::defaultFlushParams); + + Parameter dOpen; + for (auto const &mesh_name : *dList.datasets) { - meshes.dirty() = false; + Mesh &m = map[mesh_name]; + dOpen.name = mesh_name; + IOHandler()->enqueue(IOTask(&m, dOpen)); + IOHandler()->flush(internal::defaultFlushParams); + MeshRecordComponent &mrc = m[MeshRecordComponent::SCALAR]; + mrc.parent() = m.parent(); + IOHandler()->enqueue(IOTask(&mrc, dOpen)); + IOHandler()->flush(internal::defaultFlushParams); + mrc.written() = false; + mrc.resetDataset(Dataset(*dOpen.dtype, *dOpen.extent)); + mrc.written() = true; + try + { + m.read(); + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read mesh with name '" << mesh_name + << "' and will skip it due to read error:\n" + << err.what() << std::endl; + map.forget(mesh_name); + } } +} - if (hasParticles) - { - pOpen.path = s.particlesPath(); - IOHandler()->enqueue(IOTask(&particles, pOpen)); +void Iteration::readParticles(std::string const &particlesPath) +{ + Parameter pOpen; + Parameter pList; - particles.readAttributes(ReadMode::FullyReread); + pOpen.path = particlesPath; + IOHandler()->enqueue(IOTask(&particles, pOpen)); - /* obtain all particle species */ - pList.paths->clear(); - IOHandler()->enqueue(IOTask(&particles, pList)); - IOHandler()->flush(internal::defaultFlushParams); + particles.readAttributes(ReadMode::FullyReread); + + /* obtain all particle species */ + pList.paths->clear(); + IOHandler()->enqueue(IOTask(&particles, pList)); + IOHandler()->flush(internal::defaultFlushParams); - internal::EraseStaleEntries map{particles}; - for (auto const &species_name : *pList.paths) + internal::EraseStaleEntries map{particles}; + for (auto const &species_name : *pList.paths) + { + ParticleSpecies &p = map[species_name]; + pOpen.path = species_name; + IOHandler()->enqueue(IOTask(&p, pOpen)); + IOHandler()->flush(internal::defaultFlushParams); + try { - ParticleSpecies &p = map[species_name]; - pOpen.path = species_name; - IOHandler()->enqueue(IOTask(&p, pOpen)); - IOHandler()->flush(internal::defaultFlushParams); p.read(); } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read particle species with name '" + << species_name + << "' and will skip it due to read error:\n" + << err.what() << std::endl; + map.forget(species_name); + } } - else - { - particles.dirty() = false; - } - - readAttributes(ReadMode::FullyReread); } auto Iteration::beginStep(bool reread) -> BeginStepStatus @@ -660,7 +756,8 @@ auto Iteration::beginStep( IOHandl->m_seriesStatus = internal::SeriesStatus::Parsing; try { - res.iterationsInOpenedStep = series.readGorVBased(false); + res.iterationsInOpenedStep = series.readGorVBased( + /* do_always_throw_errors = */ true, /* init = */ false); } catch (...) { diff --git a/src/Mesh.cpp b/src/Mesh.cpp index 037e2ca1de..5b91dd26dc 100644 --- a/src/Mesh.cpp +++ b/src/Mesh.cpp @@ -19,6 +19,7 @@ * If not, see . */ #include "openPMD/Mesh.hpp" +#include "openPMD/Error.hpp" #include "openPMD/Series.hpp" #include "openPMD/auxiliary/DerefDynamicCast.hpp" #include "openPMD/auxiliary/StringManip.hpp" @@ -297,8 +298,13 @@ void Mesh::read() setGeometry(tmpGeometry); } else - throw std::runtime_error( - "Unexpected Attribute datatype for 'geometry'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'geometry' (expected a string, " + "found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); aRead.name = "dataOrder"; IOHandler()->enqueue(IOTask(this, aRead)); @@ -313,12 +319,20 @@ void Mesh::read() if (tmpDataOrder.size() == 1) setDataOrder(static_cast(tmpDataOrder[0])); else - throw std::runtime_error( + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, "Unexpected Attribute value for 'dataOrder': " + tmpDataOrder); } else - throw std::runtime_error( - "Unexpected Attribute datatype for 'dataOrder'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'dataOrder' (expected char or " + "string, found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); aRead.name = "axisLabels"; IOHandler()->enqueue(IOTask(this, aRead)); @@ -327,8 +341,13 @@ void Mesh::read() setAxisLabels( Attribute(*aRead.resource).get >()); else - throw std::runtime_error( - "Unexpected Attribute datatype for 'axisLabels'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'axisLabels' (expected a vector " + "of string, found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); aRead.name = "gridSpacing"; IOHandler()->enqueue(IOTask(this, aRead)); @@ -345,8 +364,13 @@ void Mesh::read() else if (auto val = a.getOptional >(); val.has_value()) setGridSpacing(val.value()); else - throw std::runtime_error( - "Unexpected Attribute datatype for 'gridSpacing'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'gridSpacing' (expected a " + "vector of double, found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); aRead.name = "gridGlobalOffset"; IOHandler()->enqueue(IOTask(this, aRead)); @@ -356,8 +380,13 @@ void Mesh::read() val.has_value()) setGridGlobalOffset(val.value()); else - throw std::runtime_error( - "Unexpected Attribute datatype for 'gridGlobalOffset'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'gridGlobalOffset' (expected a " + "vector of double, found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); aRead.name = "gridUnitSI"; IOHandler()->enqueue(IOTask(this, aRead)); @@ -366,8 +395,13 @@ void Mesh::read() val.has_value()) setGridUnitSI(val.value()); else - throw std::runtime_error( - "Unexpected Attribute datatype for 'gridUnitSI'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'gridUnitSI' (expected double, " + "found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); if (scalar()) { @@ -387,7 +421,17 @@ void Mesh::read() pOpen.path = component; IOHandler()->enqueue(IOTask(&rc, pOpen)); rc.get().m_isConstant = true; - rc.read(); + try + { + rc.read(); + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read record component '" << component + << "' and will skip it due to read error:\n" + << err.what() << std::endl; + map.forget(component); + } } Parameter dList; @@ -404,7 +448,17 @@ void Mesh::read() rc.written() = false; rc.resetDataset(Dataset(*dOpen.dtype, *dOpen.extent)); rc.written() = true; - rc.read(); + try + { + rc.read(); + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read record component '" << component + << "' and will skip it due to read error:\n" + << err.what() << std::endl; + map.forget(component); + } } } diff --git a/src/ParticlePatches.cpp b/src/ParticlePatches.cpp index 76017bbf94..8cd48e212f 100644 --- a/src/ParticlePatches.cpp +++ b/src/ParticlePatches.cpp @@ -18,7 +18,11 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ + #include "openPMD/ParticlePatches.hpp" +#include "openPMD/Error.hpp" + +#include namespace openPMD { @@ -43,7 +47,17 @@ void ParticlePatches::read() PatchRecord &pr = (*this)[record_name]; pOpen.path = record_name; IOHandler()->enqueue(IOTask(&pr, pOpen)); - pr.read(); + try + { + pr.read(); + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read patch record '" << record_name + << "' due to read error and will skip it:" << err.what() + << std::endl; + this->container().erase(record_name); + } } Parameter dList; @@ -55,9 +69,13 @@ void ParticlePatches::read() { if (!("numParticles" == component_name || "numParticlesOffset" == component_name)) - throw std::runtime_error( - "Unexpected record component" + component_name + - "in particlePatch"); + { + + std::cerr << "Unexpected record component" + component_name + + "in particlePatch. Will ignore it." + << std::endl; + continue; + } PatchRecord &pr = Container::operator[](component_name); PatchRecordComponent &prc = pr[RecordComponent::SCALAR]; @@ -68,8 +86,13 @@ void ParticlePatches::read() IOHandler()->flush(internal::defaultFlushParams); if (determineDatatype() != *dOpen.dtype) - throw std::runtime_error( - "Unexpected datatype for " + component_name); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected datatype for " + component_name + + "(expected uint64, found " + + datatypeToString(*dOpen.dtype) + ")"); /* allow all attributes to be set */ prc.written() = false; @@ -77,7 +100,18 @@ void ParticlePatches::read() prc.written() = true; pr.dirty() = false; - prc.read(); + try + { + prc.read(); + } + catch (error::ReadError const &err) + { + std::cerr + << "Cannot read record component '" << component_name + << "' in particle patch and will skip it due to read error:\n" + << err.what() << std::endl; + Container::container().erase(component_name); + } } } } // namespace openPMD diff --git a/src/ParticleSpecies.cpp b/src/ParticleSpecies.cpp index 24ebadadec..998c6c6317 100644 --- a/src/ParticleSpecies.cpp +++ b/src/ParticleSpecies.cpp @@ -52,7 +52,17 @@ void ParticleSpecies::read() hasParticlePatches = true; pOpen.path = "particlePatches"; IOHandler()->enqueue(IOTask(&particlePatches, pOpen)); - particlePatches.read(); + try + { + particlePatches.read(); + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read particle patches and will skip them " + "due to read error:\n" + << err.what() << std::endl; + hasParticlePatches = false; + } } else { @@ -76,7 +86,18 @@ void ParticleSpecies::read() IOHandler()->flush(internal::defaultFlushParams); rc.get().m_isConstant = true; } - r.read(); + try + { + r.read(); + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read particle record '" << record_name + << "' and will skip it due to read error:\n" + << err.what() << std::endl; + + map.forget(record_name); + } } } @@ -111,12 +132,11 @@ void ParticleSpecies::read() rc.written() = true; r.read(); } - catch (std::runtime_error const &) + catch (error::ReadError const &err) { - std::cerr << "WARNING: Skipping invalid openPMD record '" - << record_name << "'" << std::endl; - while (!IOHandler()->m_work.empty()) - IOHandler()->m_work.pop(); + std::cerr << "Cannot read particle record '" << record_name + << "' and will skip it due to read error:\n" + << err.what() << std::endl; map.forget(record_name); //(*this)[record_name].erase(RecordComponent::SCALAR); diff --git a/src/ReadIterations.cpp b/src/ReadIterations.cpp index b567aa0ff1..679146e896 100644 --- a/src/ReadIterations.cpp +++ b/src/ReadIterations.cpp @@ -23,6 +23,8 @@ #include "openPMD/Series.hpp" +#include + namespace openPMD { @@ -167,25 +169,66 @@ std::optional SeriesIterator::nextIterationInStep() {FlushLevel::UserFlush}, /* flushIOHandler = */ true); - series.iterations[m_currentIteration].open(); + try + { + series.iterations[m_currentIteration].open(); + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read iteration '" << m_currentIteration + << "' and will skip it due to read error:\n" + << err.what() << std::endl; + return nextIterationInStep(); + } + return {this}; } case IterationEncoding::fileBased: - series.iterations[m_currentIteration].open(); - series.iterations[m_currentIteration].beginStep(/* reread = */ true); + try + { + /* + * Errors in here might appear due to deferred iteration parsing. + */ + series.iterations[m_currentIteration].open(); + /* + * Errors in here might appear due to reparsing after opening a + * new step. + */ + series.iterations[m_currentIteration].beginStep( + /* reread = */ true); + } + catch (error::ReadError const &err) + { + std::cerr << "[SeriesIterator] Cannot read iteration due to error " + "below, will skip it.\n" + << err.what() << std::endl; + return nextIterationInStep(); + } + return {this}; } throw std::runtime_error("Unreachable!"); } -std::optional SeriesIterator::nextStep() +std::optional SeriesIterator::nextStep(size_t recursion_depth) { // since we are in group-based iteration layout, it does not // matter which iteration we begin a step upon - AdvanceStatus status; + AdvanceStatus status{}; Iteration::BeginStepStatus::AvailableIterations_t availableIterations; - std::tie(status, availableIterations) = - Iteration::beginStep({}, *m_series, /* reread = */ true); + try + { + std::tie(status, availableIterations) = + Iteration::beginStep({}, *m_series, /* reread = */ true); + } + catch (error::ReadError const &err) + { + std::cerr << "[SeriesIterator] Cannot read iteration due to error " + "below, will skip it.\n" + << err.what() << std::endl; + m_series->advance(AdvanceMode::ENDSTEP); + return nextStep(recursion_depth + 1); + } if (availableIterations.has_value() && status != AdvanceStatus::RANDOMACCESS) @@ -224,7 +267,10 @@ std::optional SeriesIterator::nextStep() } else { - ++it; + for (size_t i = 0; i < recursion_depth && it != itEnd; ++i) + { + ++it; + } if (it == itEnd) { @@ -295,9 +341,22 @@ std::optional SeriesIterator::loopBody() auto iteration = iterations.at(currentIterationIndex.value()); if (iteration.get().m_closed != internal::CloseStatus::ClosedInBackend) { - iteration.open(); - option.value()->setCurrentIteration(); - return option; + try + { + iteration.open(); + option.value()->setCurrentIteration(); + return option; + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read iteration '" + << currentIterationIndex.value() + << "' and will skip it due to read error:\n" + << err.what() << std::endl; + option.value()->deactivateDeadIteration( + currentIterationIndex.value()); + return std::nullopt; + } } else { @@ -325,10 +384,34 @@ std::optional SeriesIterator::loopBody() return {this}; } - auto option = nextStep(); + auto option = nextStep(/*recursion_depth = */ 1); return guardReturn(option); } +void SeriesIterator::deactivateDeadIteration(iteration_index_t index) +{ + switch (m_series->iterationEncoding()) + { + case IterationEncoding::fileBased: { + Parameter param; + m_series->IOHandler()->enqueue( + IOTask(&m_series->iterations[index], std::move(param))); + m_series->IOHandler()->flush({FlushLevel::UserFlush}); + } + break; + case IterationEncoding::variableBased: + case IterationEncoding::groupBased: { + Parameter param; + param.mode = AdvanceMode::ENDSTEP; + m_series->IOHandler()->enqueue( + IOTask(&m_series->iterations[index], std::move(param))); + m_series->IOHandler()->flush({FlushLevel::UserFlush}); + } + break; + } + m_series->iterations.container().erase(index); +} + SeriesIterator &SeriesIterator::operator++() { if (!m_series.has_value()) @@ -340,6 +423,9 @@ SeriesIterator &SeriesIterator::operator++() /* * loopBody() might return an empty option to indicate a skipped iteration. * Loop until it returns something real for us. + * Note that this is not an infinite loop: + * Upon end of the Series, loopBody() does not return an empty option, + * but the end iterator. */ do { diff --git a/src/Record.cpp b/src/Record.cpp index 717d709072..485e817e14 100644 --- a/src/Record.cpp +++ b/src/Record.cpp @@ -109,7 +109,18 @@ void Record::read() if (scalar()) { /* using operator[] will incorrectly update parent */ - this->at(RecordComponent::SCALAR).read(); + auto &scalarComponent = this->at(RecordComponent::SCALAR); + try + { + scalarComponent.read(); + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read scalar record component and will skip it " + "due to read error:\n" + << err.what() << std::endl; + this->container().erase(RecordComponent::SCALAR); + } } else { @@ -124,7 +135,17 @@ void Record::read() pOpen.path = component; IOHandler()->enqueue(IOTask(&rc, pOpen)); rc.get().m_isConstant = true; - rc.read(); + try + { + rc.read(); + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read record component '" << component + << "' and will skip it due to read error:\n" + << err.what() << std::endl; + this->container().erase(component); + } } Parameter dList; @@ -141,7 +162,17 @@ void Record::read() rc.written() = false; rc.resetDataset(Dataset(*dOpen.dtype, *dOpen.extent)); rc.written() = true; - rc.read(); + try + { + rc.read(); + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read record component '" << component + << "' and will skip it due to read error:\n" + << err.what() << std::endl; + this->container().erase(component); + } } } diff --git a/src/RecordComponent.cpp b/src/RecordComponent.cpp index 83d5ac688e..b7679ca598 100644 --- a/src/RecordComponent.cpp +++ b/src/RecordComponent.cpp @@ -297,7 +297,15 @@ namespace rc.makeConstant(attr.get()); } - static constexpr char const *errorMsg = "Unexpected constant datatype"; + template + static void call(Args &&...) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Undefined constant datatype."); + } }; } // namespace @@ -335,7 +343,11 @@ void RecordComponent::readBase() oss << "Unexpected datatype (" << *aRead.dtype << ") for attribute 'shape' (" << determineDatatype() << " aka uint64_t)"; - throw std::runtime_error(oss.str()); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + oss.str()); } written() = false; @@ -350,7 +362,13 @@ void RecordComponent::readBase() val.has_value()) setUnitSI(val.value()); else - throw std::runtime_error("Unexpected Attribute datatype for 'unitSI'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'unitSI' (expected double, " + "found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); readAttributes(ReadMode::FullyReread); } diff --git a/src/Series.cpp b/src/Series.cpp index c25f0f173c..5b17c24642 100644 --- a/src/Series.cpp +++ b/src/Series.cpp @@ -587,7 +587,8 @@ Given file pattern: ')END" if (input->iterationEncoding == IterationEncoding::fileBased) readFileBased(); else - readGorVBased(); + readGorVBased( + /* do_always_throw_errors = */ false, /* init = */ true); if (series.iterations.empty()) { @@ -989,7 +990,10 @@ void Series::readFileBased() fOpen.encoding = iterationEncoding(); if (!auxiliary::directory_exists(IOHandler()->directory)) - throw no_such_file_error( + throw error::ReadError( + error::AffectedObject::File, + error::Reason::Inaccessible, + {}, "Supplied directory is not valid: " + IOHandler()->directory); auto isPartOfSeries = matcher( @@ -1017,19 +1021,36 @@ void Series::readFileBased() * parameter modification. Backend access type stays unchanged for the * lifetime of a Series. */ if (IOHandler()->m_backendAccess == Access::READ_ONLY) - throw no_such_file_error("No matching iterations found: " + name()); + throw error::ReadError( + error::AffectedObject::File, + error::Reason::Inaccessible, + {}, + "No matching iterations found: " + name()); else std::cerr << "No matching iterations found: " << name() << std::endl; } - auto readIterationEagerly = [](Iteration &iteration) { - iteration.runDeferredParseAccess(); + /* + * Return true if parsing was successful + */ + auto readIterationEagerly = + [](Iteration &iteration) -> std::optional { + try + { + iteration.runDeferredParseAccess(); + } + catch (error::ReadError const &err) + { + return err; + } Parameter fClose; iteration.IOHandler()->enqueue(IOTask(&iteration, fClose)); iteration.IOHandler()->flush(internal::defaultFlushParams); iteration.get().m_closed = internal::CloseStatus::ClosedTemporarily; + return {}; }; + std::vector unparseableIterations; if (series.m_parseLazily) { for (auto &iteration : series.iterations) @@ -1037,18 +1058,102 @@ void Series::readFileBased() iteration.second.get().m_closed = internal::CloseStatus::ParseAccessDeferred; } - // open the last iteration, just to parse Series attributes - auto getLastIteration = series.iterations.end(); - getLastIteration--; - auto &lastIteration = getLastIteration->second; - readIterationEagerly(lastIteration); + // open the first iteration, just to parse Series attributes + bool atLeastOneIterationSuccessful = false; + std::optional forwardFirstError; + for (auto &pair : series.iterations) + { + if (auto error = readIterationEagerly(pair.second); error) + { + std::cerr << "Cannot read iteration '" << pair.first + << "' and will skip it due to read error:\n" + << error->what() << std::endl; + unparseableIterations.push_back(pair.first); + if (!forwardFirstError.has_value()) + { + forwardFirstError = std::move(error); + } + } + else + { + atLeastOneIterationSuccessful = true; + break; + } + } + if (!atLeastOneIterationSuccessful) + { + if (forwardFirstError.has_value()) + { + auto &firstError = forwardFirstError.value(); + firstError.description.append( + "\n[Note] Not a single iteration can be successfully " + "parsed (see above errors). Returning the first observed " + "error, for better recoverability in user code. Need to " + "access at least one iteration even in deferred parsing " + "mode in order to read global Series attributes."); + throw firstError; + } + else + { + throw error::ReadError( + error::AffectedObject::Other, + error::Reason::Other, + {}, + "Not a single iteration can be successfully parsed (see " + "above errors). Need to access at least one iteration even " + "in deferred parsing mode in order to read global Series " + "attributes."); + } + } } else { + bool atLeastOneIterationSuccessful = false; + std::optional forwardFirstError; for (auto &iteration : series.iterations) { - readIterationEagerly(iteration.second); + if (auto error = readIterationEagerly(iteration.second); error) + { + std::cerr << "Cannot read iteration '" << iteration.first + << "' and will skip it due to read error:\n" + << error->what() << std::endl; + unparseableIterations.push_back(iteration.first); + if (!forwardFirstError.has_value()) + { + forwardFirstError = std::move(error); + } + } + else + { + atLeastOneIterationSuccessful = true; + } } + if (!atLeastOneIterationSuccessful) + { + if (forwardFirstError.has_value()) + { + auto &firstError = forwardFirstError.value(); + firstError.description.append( + "\n[Note] Not a single iteration can be successfully " + "parsed (see above errors). Returning the first observed " + "error, for better recoverability in user code."); + throw firstError; + } + else + { + throw error::ReadError( + error::AffectedObject::Other, + error::Reason::Other, + {}, + "Not a single iteration can be successfully parsed (see " + "above warnings)."); + } + } + } + + for (auto index : unparseableIterations) + { + series.iterations.container().erase(index); } if (padding > 0) @@ -1102,20 +1207,28 @@ void Series::readOneIterationFileBased(std::string const &filePath) * Unlike if the file were group-based, this one doesn't work * at all since the group paths are different. */ - throw std::runtime_error( + throw error::ReadError( + error::AffectedObject::Other, + error::Reason::Other, + {}, "Series constructor called with iteration " "regex '%T' suggests loading a " "time series with fileBased iteration " "encoding. Loaded file is variableBased."); } else - throw std::runtime_error("Unknown iterationEncoding: " + encoding); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unknown iterationEncoding: " + encoding); setAttribute("iterationEncoding", encoding); } else throw std::runtime_error( - "Unexpected Attribute datatype " - "for 'iterationEncoding'"); + "Unexpected Attribute datatype for 'iterationEncoding' (expected " + "string, found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); aRead.name = "iterationFormat"; IOHandler()->enqueue(IOTask(this, aRead)); @@ -1127,8 +1240,13 @@ void Series::readOneIterationFileBased(std::string const &filePath) written() = true; } else - throw std::runtime_error( - "Unexpected Attribute datatype for 'iterationFormat'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'iterationFormat' (expected " + "string, found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); Parameter pOpen; std::string version = openPMD(); @@ -1142,7 +1260,32 @@ void Series::readOneIterationFileBased(std::string const &filePath) series.iterations.readAttributes(ReadMode::OverrideExisting); } -auto Series::readGorVBased(bool do_init) +namespace +{ + /* + * This function is efficient if subtract is empty and inefficient + * otherwise. Use only where an empty subtract vector is the + * common case. + */ + template + void + vectorDifference(std::vector &baseVector, std::vector const &subtract) + { + for (auto const &elem : subtract) + { + for (auto it = baseVector.begin(); it != baseVector.end(); ++it) + { + if (*it == elem) + { + baseVector.erase(it); + break; + } + } + } + } +} // namespace + +auto Series::readGorVBased(bool do_always_throw_errors, bool do_init) -> std::optional> { auto &series = get(); @@ -1184,13 +1327,21 @@ auto Series::readGorVBased(bool do_init) series.m_overrideFilebasedFilename = series.m_name; } else - throw std::runtime_error( + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, "Unknown iterationEncoding: " + encoding); setAttribute("iterationEncoding", encoding); } else - throw std::runtime_error( - "Unexpected Attribute datatype for 'iterationEncoding'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'iterationEncoding' " + "(expected string, found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); aRead.name = "iterationFormat"; IOHandler()->enqueue(IOTask(this, aRead)); @@ -1202,8 +1353,13 @@ auto Series::readGorVBased(bool do_init) written() = true; } else - throw std::runtime_error( - "Unexpected Attribute datatype for 'iterationFormat'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'iterationFormat' (expected " + "string, found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); } Parameter pOpen; @@ -1211,30 +1367,19 @@ auto Series::readGorVBased(bool do_init) if (version == "1.0.0" || version == "1.0.1" || version == "1.1.0") pOpen.path = auxiliary::replace_first(basePath(), "/%T/", ""); else - throw std::runtime_error("Unknown openPMD version - " + version); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unknown openPMD version - " + version); IOHandler()->enqueue(IOTask(&series.iterations, pOpen)); readAttributes(ReadMode::IgnoreExisting); - auto withRWAccess = [this](auto &&functor) { - auto oldStatus = IOHandler()->m_seriesStatus; - IOHandler()->m_seriesStatus = internal::SeriesStatus::Parsing; - try - { - std::forward(functor)(); - } - catch (...) - { - IOHandler()->m_seriesStatus = oldStatus; - throw; - } - IOHandler()->m_seriesStatus = oldStatus; - }; - /* * 'snapshot' changes over steps, so reread that. */ - withRWAccess([&series]() { + internal::withRWAccess(IOHandler()->m_seriesStatus, [&series]() { series.iterations.readAttributes(ReadMode::OverrideExisting); }); @@ -1243,11 +1388,15 @@ auto Series::readGorVBased(bool do_init) IOHandler()->enqueue(IOTask(&series.iterations, pList)); IOHandler()->flush(internal::defaultFlushParams); - auto readSingleIteration = [&series, &pOpen, this, withRWAccess]( - IterationIndex_t index, - std::string path, - bool guardAgainstRereading, - bool beginStep) { + /* + * Return error if one is caught. + */ + auto readSingleIteration = + [&series, &pOpen, this]( + IterationIndex_t index, + std::string path, + bool guardAgainstRereading, + bool beginStep) -> std::optional { if (series.iterations.contains(index)) { // maybe re-read @@ -1256,13 +1405,16 @@ auto Series::readGorVBased(bool do_init) // reparsing is not needed if (guardAgainstRereading && i.written()) { - return; + return {}; } if (i.get().m_closed != internal::CloseStatus::ParseAccessDeferred) { pOpen.path = path; IOHandler()->enqueue(IOTask(&i, pOpen)); - withRWAccess([&i, &path]() { i.reread(path); }); + // @todo catch stuff from here too + internal::withRWAccess( + IOHandler()->m_seriesStatus, + [&i, &path]() { i.reread(path); }); } } else @@ -1272,7 +1424,18 @@ auto Series::readGorVBased(bool do_init) i.deferParseAccess({path, index, false, "", beginStep}); if (!series.m_parseLazily) { - i.runDeferredParseAccess(); + try + { + i.runDeferredParseAccess(); + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read iteration '" << index + << "' and will skip it due to read error:\n" + << err.what() << std::endl; + series.iterations.container().erase(index); + return {err}; + } i.get().m_closed = internal::CloseStatus::Open; } else @@ -1280,6 +1443,7 @@ auto Series::readGorVBased(bool do_init) i.get().m_closed = internal::CloseStatus::ParseAccessDeferred; } } + return std::nullopt; }; /* @@ -1294,20 +1458,31 @@ auto Series::readGorVBased(bool do_init) * Sic! This happens when a file-based Series is opened in group-based mode. */ case IterationEncoding::fileBased: { + std::vector unreadableIterations; for (auto const &it : *pList.paths) { IterationIndex_t index = std::stoull(it); - /* - * For now: parse a Series in RandomAccess mode. - * (beginStep = false) - * A streaming read mode might come in a future API addition. - */ - withRWAccess( - [&]() { readSingleIteration(index, it, true, false); }); + if (auto err = internal::withRWAccess( + IOHandler()->m_seriesStatus, + [&]() { + return readSingleIteration(index, it, true, false); + }); + err) + { + std::cerr << "Cannot read iteration " << index + << " and will skip it due to read error:\n" + << err.value().what() << std::endl; + if (do_always_throw_errors) + { + throw *err; + } + unreadableIterations.push_back(index); + } } if (currentSteps.has_value()) { - auto const &vec = currentSteps.value(); + auto &vec = currentSteps.value(); + vectorDifference(vec, unreadableIterations); return std::deque{vec.begin(), vec.end()}; } else @@ -1327,7 +1502,21 @@ auto Series::readGorVBased(bool do_init) * Variable-based iteration encoding relies on steps, so parsing * must happen after opening the first step. */ - withRWAccess([&]() { readSingleIteration(it, "", false, true); }); + if (auto err = internal::withRWAccess( + IOHandler()->m_seriesStatus, + [&readSingleIteration, it]() { + return readSingleIteration(it, "", false, true); + }); + err) + { + /* + * Cannot recover from errors in this place. + * If there is an error in the first iteration, the Series + * cannot be read in variable-based encoding. The read API will + * try to skip other iterations that have errors. + */ + throw *err; + } } return res; } @@ -1347,7 +1536,13 @@ void Series::readBase() val.has_value()) setOpenPMD(val.value()); else - throw std::runtime_error("Unexpected Attribute datatype for 'openPMD'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'openPMD' (expected string, " + "found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); aRead.name = "openPMDextension"; IOHandler()->enqueue(IOTask(this, aRead)); @@ -1356,8 +1551,13 @@ void Series::readBase() val.has_value()) setOpenPMDextension(val.value()); else - throw std::runtime_error( - "Unexpected Attribute datatype for 'openPMDextension'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'openPMDextension' (expected " + "uint32, found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); aRead.name = "basePath"; IOHandler()->enqueue(IOTask(this, aRead)); @@ -1366,8 +1566,13 @@ void Series::readBase() val.has_value()) setAttribute("basePath", val.value()); else - throw std::runtime_error( - "Unexpected Attribute datatype for 'basePath'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'basePath' (expected string, " + "found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); Parameter aList; IOHandler()->enqueue(IOTask(this, aList)); @@ -1392,8 +1597,13 @@ void Series::readBase() it.second.meshes.written() = true; } else - throw std::runtime_error( - "Unexpected Attribute datatype for 'meshesPath'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'meshesPath' (expected " + "string, found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); } if (std::count( @@ -1417,8 +1627,13 @@ void Series::readBase() it.second.particles.written() = true; } else - throw std::runtime_error( - "Unexpected Attribute datatype for 'particlesPath'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'particlesPath' (expected " + "string, found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); } } @@ -2100,8 +2315,14 @@ auto Series::currentSnapshot() const default: { std::stringstream s; s << "Unexpected datatype for '/data/snapshot': " << attribute.dtype + << " (expected a vector of integer, found " + + datatypeToString(attribute.dtype) + ")" << std::endl; - throw std::runtime_error(s.str()); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + s.str()); } } } diff --git a/src/auxiliary/JSON.cpp b/src/auxiliary/JSON.cpp index 0b277e0580..14ab0f3cb5 100644 --- a/src/auxiliary/JSON.cpp +++ b/src/auxiliary/JSON.cpp @@ -22,6 +22,7 @@ #include "openPMD/auxiliary/JSON.hpp" #include "openPMD/auxiliary/JSON_internal.hpp" +#include "openPMD/Error.hpp" #include "openPMD/auxiliary/Filesystem.hpp" #include "openPMD/auxiliary/StringManip.hpp" diff --git a/src/backend/Attributable.cpp b/src/backend/Attributable.cpp index 0c0b715e28..9010614c75 100644 --- a/src/backend/Attributable.cpp +++ b/src/backend/Attributable.cpp @@ -299,9 +299,14 @@ void Attributable::readAttributes(ReadMode mode) // Some backends may report the wrong type when reading if (vector.size() != 7) { - throw std::runtime_error( - "[Attributable] " - "Unexpected datatype for unitDimension."); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "[Attributable] Unexpected datatype for unitDimension " + "(supplied vector has " + + std::to_string(vector.size()) + + " entries, but 7 are expected)."); } std::array arr; std::copy_n(vector.begin(), 7, arr.begin()); @@ -541,7 +546,11 @@ void Attributable::readAttributes(ReadMode mode) internal::SetAttributeMode::WhileReadingAttributes); break; case DT::UNDEFINED: - throw std::runtime_error("Invalid Attribute datatype during read"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Undefined Attribute datatype during read"); } } diff --git a/src/backend/MeshRecordComponent.cpp b/src/backend/MeshRecordComponent.cpp index 49f2a99d64..e7eef6e047 100644 --- a/src/backend/MeshRecordComponent.cpp +++ b/src/backend/MeshRecordComponent.cpp @@ -47,8 +47,13 @@ void MeshRecordComponent::read() else if (auto val = a.getOptional >(); val.has_value()) setPosition(val.value()); else - throw std::runtime_error( - "Unexpected Attribute datatype for 'position'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'position' (expected a vector " + "of any floating point type, found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); readBase(); } diff --git a/src/backend/PatchRecord.cpp b/src/backend/PatchRecord.cpp index 5b9b708311..8f9e64012c 100644 --- a/src/backend/PatchRecord.cpp +++ b/src/backend/PatchRecord.cpp @@ -21,6 +21,8 @@ #include "openPMD/backend/PatchRecord.hpp" #include "openPMD/auxiliary/Memory.hpp" +#include + namespace openPMD { PatchRecord & @@ -68,8 +70,13 @@ void PatchRecord::read() val.has_value()) this->setAttribute("unitDimension", val.value()); else - throw std::runtime_error( - "Unexpected Attribute datatype for 'unitDimension'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'unitDimension' (expected an " + "array of seven floating point numbers, found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); Parameter dList; IOHandler()->enqueue(IOTask(this, dList)); @@ -86,7 +93,18 @@ void PatchRecord::read() prc.written() = false; prc.resetDataset(Dataset(*dOpen.dtype, *dOpen.extent)); prc.written() = true; - prc.read(); + try + { + prc.read(); + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read patch record component '" + << component_name + << "' and will skip it due to read error:" << err.what() + << std::endl; + this->container().erase(component_name); + } } dirty() = false; } diff --git a/src/backend/PatchRecordComponent.cpp b/src/backend/PatchRecordComponent.cpp index 4cac01424b..3db0545d40 100644 --- a/src/backend/PatchRecordComponent.cpp +++ b/src/backend/PatchRecordComponent.cpp @@ -130,7 +130,13 @@ void PatchRecordComponent::read() val.has_value()) setUnitSI(val.value()); else - throw std::runtime_error("Unexpected Attribute datatype for 'unitSI'"); + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Unexpected Attribute datatype for 'unitSI' (expected double, " + "found " + + datatypeToString(Attribute(*aRead.resource).dtype) + ")"); readAttributes(ReadMode::FullyReread); // this will set dirty() = false } diff --git a/test/JSONTest.cpp b/test/JSONTest.cpp index 485689acb4..ec193b8246 100644 --- a/test/JSONTest.cpp +++ b/test/JSONTest.cpp @@ -1,4 +1,5 @@ #include "openPMD/auxiliary/JSON.hpp" +#include "openPMD/Error.hpp" #include "openPMD/auxiliary/JSON_internal.hpp" #include "openPMD/openPMD.hpp" diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index b21bcc2e97..1dd599cf17 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -283,10 +283,14 @@ TEST_CASE("git_hdf5_sample_content_test", "[parallel][hdf5]") REQUIRE(raw_ptr[i] == constant_value); } } - catch (no_such_file_error &e) + catch (error::ReadError &e) { - std::cerr << "git sample not accessible. (" << e.what() << ")\n"; - return; + if (e.reason == error::Reason::Inaccessible) + { + std::cerr << "git sample not accessible. (" << e.what() << ")\n"; + return; + } + throw; } } @@ -619,10 +623,14 @@ TEST_CASE("hzdr_adios_sample_content_test", "[parallel][adios1]") REQUIRE(raw_ptr[j * 3 + k] == actual[rank][j][k]); } } - catch (no_such_file_error &e) + catch (error::ReadError &e) { - std::cerr << "git sample not accessible. (" << e.what() << ")\n"; - return; + if (e.reason == error::Reason::Inaccessible) + { + std::cerr << "git sample not accessible. (" << e.what() << ")\n"; + return; + } + throw; } } #endif diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index 0ded4eaa17..f4b1b0ebe0 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -2379,9 +2379,16 @@ inline void optional_paths_110_test(const std::string &backend) REQUIRE(s.iterations[400].particles.empty()); } } - catch (no_such_file_error &e) + catch (error::ReadError &e) { - std::cerr << "issue sample not accessible. (" << e.what() << ")\n"; + if (e.reason == error::Reason::Inaccessible) + { + std::cerr << "issue sample not accessible. (" << e.what() << ")\n"; + } + else + { + throw; + } } { @@ -2450,10 +2457,14 @@ void git_early_chunk_query( } } } - catch (no_such_file_error &e) + catch (error::ReadError &e) { - std::cerr << "git sample not accessible. (" << e.what() << ")\n"; - return; + if (e.reason == error::Reason::Inaccessible) + { + std::cerr << "git sample not accessible. (" << e.what() << ")\n"; + return; + } + throw; } } @@ -2490,9 +2501,16 @@ TEST_CASE("empty_alternate_fbpic", "[serial][hdf5]") helper::listSeries(list); } } - catch (no_such_file_error &e) + catch (error::ReadError &e) { - std::cerr << "issue sample not accessible. (" << e.what() << ")\n"; + if (e.reason == error::Reason::Inaccessible) + { + std::cerr << "issue sample not accessible. (" << e.what() << ")\n"; + } + else + { + throw; + } } } @@ -2679,10 +2697,14 @@ TEST_CASE("git_hdf5_sample_structure_test", "[serial][hdf5]") int32_t i32 = 32; REQUIRE_THROWS(o.setAttribute("setAttributeFail", i32)); } - catch (no_such_file_error &e) + catch (error::ReadError &e) { - std::cerr << "git sample not accessible. (" << e.what() << ")\n"; - return; + if (e.reason == error::Reason::Inaccessible) + { + std::cerr << "git sample not accessible. (" << e.what() << ")\n"; + return; + } + throw; } #else std::cerr << "Invasive tests not enabled. Hierarchy is not visible.\n"; @@ -2936,10 +2958,14 @@ TEST_CASE("git_hdf5_sample_attribute_test", "[serial][hdf5]") REQUIRE(weighting_scalar.getDimensionality() == 1); REQUIRE(weighting_scalar.getExtent() == e); } - catch (no_such_file_error &e) + catch (error::ReadError &e) { - std::cerr << "git sample not accessible. (" << e.what() << ")\n"; - return; + if (e.reason == error::Reason::Inaccessible) + { + std::cerr << "git sample not accessible. (" << e.what() << ")\n"; + return; + } + throw; } } @@ -3010,10 +3036,14 @@ TEST_CASE("git_hdf5_sample_content_test", "[serial][hdf5]") REQUIRE(raw_ptr[i] == constant_value); } } - catch (no_such_file_error &e) + catch (error::ReadError &e) { - std::cerr << "git sample not accessible. (" << e.what() << ")\n"; - return; + if (e.reason == error::Reason::Inaccessible) + { + std::cerr << "git sample not accessible. (" << e.what() << ")\n"; + return; + } + throw; } } @@ -3034,10 +3064,15 @@ TEST_CASE("git_hdf5_sample_fileBased_read_test", "[serial][hdf5]") REQUIRE(o.get().m_filenamePadding == 8); #endif } - catch (no_such_file_error &e) + catch (error::ReadError &e) { - std::cerr << "git sample not accessible. (" << e.what() << ")\n"; - return; + if (e.reason == error::Reason::Inaccessible && + e.affectedObject == error::AffectedObject::File) + { + std::cerr << "git sample not accessible. (" << e.what() << ")\n"; + return; + } + throw; } try @@ -3056,15 +3091,19 @@ TEST_CASE("git_hdf5_sample_fileBased_read_test", "[serial][hdf5]") REQUIRE(o.get().m_filenamePadding == 8); #endif } - catch (no_such_file_error &e) + catch (error::ReadError &e) { - std::cerr << "git sample not accessible. (" << e.what() << ")\n"; - return; + if (e.reason == error::Reason::Inaccessible) + { + std::cerr << "git sample not accessible. (" << e.what() << ")\n"; + return; + } + throw; } - REQUIRE_THROWS_WITH( + REQUIRE_THROWS_AS( Series("../samples/git-sample/data%07T.h5", Access::READ_ONLY), - Catch::Equals("No matching iterations found: data%07T")); + error::ReadError); try { @@ -3101,10 +3140,16 @@ TEST_CASE("git_hdf5_sample_fileBased_read_test", "[serial][hdf5]") auxiliary::remove_file(file); } } + // use no_such_file_error here to check that the backward-compatibility + // alias works catch (no_such_file_error &e) { - std::cerr << "git sample not accessible. (" << e.what() << ")\n"; - return; + if (e.reason == error::Reason::Inaccessible) + { + std::cerr << "git sample not accessible. (" << e.what() << ")\n"; + return; + } + throw; } } @@ -3181,10 +3226,14 @@ TEST_CASE("git_hdf5_sample_read_thetaMode", "[serial][hdf5][thetaMode]") auto data = B_z.loadChunk(offset, extent); o.flush(); } - catch (no_such_file_error &e) + catch (error::ReadError &e) { - std::cerr << "git sample not accessible. (" << e.what() << ")\n"; - return; + if (e.reason == error::Reason::Inaccessible) + { + std::cerr << "git sample not accessible. (" << e.what() << ")\n"; + return; + } + throw; } } @@ -3654,10 +3703,13 @@ TEST_CASE("hzdr_hdf5_sample_content_test", "[serial][hdf5]") REQUIRE( isSame(e_offset_z.getDatatype(), determineDatatype())); } - catch (no_such_file_error &e) + catch (error::ReadError &e) { - std::cerr << "HZDR sample not accessible. (" << e.what() << ")\n"; - return; + if (e.reason == error::Reason::Inaccessible) + { + std::cerr << "HZDR sample not accessible. (" << e.what() << ")\n"; + return; + } } } @@ -3848,10 +3900,13 @@ TEST_CASE("hzdr_adios1_sample_content_test", "[serial][adios1]") for (int c = 0; c < 3; ++c) REQUIRE(raw_ptr[((a * 3) + b) * 3 + c] == actual[a][b][c]); } - catch (no_such_file_error &e) + catch (error::ReadError &e) { - std::cerr << "HZDR sample not accessible. (" << e.what() << ")\n"; - return; + if (e.reason == error::Reason::Inaccessible) + { + std::cerr << "HZDR sample not accessible. (" << e.what() << ")\n"; + return; + } } } @@ -6052,7 +6107,7 @@ void deferred_parsing(std::string const &extension) Series series(basename + "%06T." + extension, Access::CREATE); std::vector buffer(20); std::iota(buffer.begin(), buffer.end(), 0.f); - auto dataset = series.iterations[1000].meshes["E"]["x"]; + auto dataset = series.iterations[0].meshes["E"]["x"]; dataset.resetDataset({Datatype::FLOAT, {20}}); dataset.storeChunk(buffer, {0}, {20}); series.flush(); @@ -6060,7 +6115,7 @@ void deferred_parsing(std::string const &extension) // create some empty pseudo files // if the reader tries accessing them it's game over { - for (size_t i = 0; i < 1000; i += 100) + for (size_t i = 1; i < 1000; i += 100) { std::string infix = std::to_string(i); std::string padding; @@ -6080,7 +6135,7 @@ void deferred_parsing(std::string const &extension) Access::READ_ONLY, "{\"defer_iteration_parsing\": true}"); auto dataset = - series.iterations[1000].open().meshes["E"]["x"].loadChunk( + series.iterations[0].open().meshes["E"]["x"].loadChunk( {0}, {20}); series.flush(); for (size_t i = 0; i < 20; ++i) @@ -6096,7 +6151,7 @@ void deferred_parsing(std::string const &extension) Access::READ_WRITE, "{\"defer_iteration_parsing\": true}"); auto dataset = - series.iterations[1000].open().meshes["E"]["x"].loadChunk( + series.iterations[0].open().meshes["E"]["x"].loadChunk( {0}, {20}); series.flush(); for (size_t i = 0; i < 20; ++i) @@ -6253,6 +6308,116 @@ TEST_CASE("chaotic_stream", "[serial]") } } +#ifdef openPMD_USE_INVASIVE_TESTS +void unfinished_iteration_test( + std::string const &ext, bool filebased, std::string const &config = "{}") +{ + std::cout << "\n\nTESTING " << ext << "\n\n" << std::endl; + std::string file = std::string("../samples/unfinished_iteration") + + (filebased ? "_%T." : ".") + ext; + { + Series write(file, Access::CREATE, config); + auto it0 = write.writeIterations()[0]; + auto it5 = write.writeIterations()[5]; + /* + * With enabled invasive tests, this attribute will let the Iteration + * fail parsing. + */ + it5.setAttribute("__openPMD_internal_fail", "asking for trouble"); + auto it10 = write.writeIterations()[10]; + auto E_x = it10.meshes["E"]["x"]; + auto e_density = it10.meshes["e_density"][RecordComponent::SCALAR]; + auto electron_x = it10.particles["e"]["position"]["x"]; + auto electron_mass = + it10.particles["e"]["mass"][RecordComponent::SCALAR]; + } + auto tryReading = [&config, file, filebased]( + std::string const &additionalConfig = "{}") { + { + Series read( + file, Access::READ_ONLY, json::merge(config, additionalConfig)); + + std::vector iterations; + std::cout << "Going to list iterations in " << file << ":" + << std::endl; + for (auto iteration : read.readIterations()) + { + std::cout << "Seeing iteration " << iteration.iterationIndex + << std::endl; + iterations.push_back(iteration.iterationIndex); + + Parameter readAttribute; + readAttribute.name = "this_does_definitely_not_exist"; + read.IOHandler()->enqueue(IOTask(&iteration, readAttribute)); + // enqueue a second time to check that the queue is cleared upon + // exception + read.IOHandler()->enqueue(IOTask(&iteration, readAttribute)); + + REQUIRE_THROWS_AS( + read.IOHandler()->flush({FlushLevel::InternalFlush}), + error::ReadError); + REQUIRE(read.IOHandler()->m_work.empty()); + } + REQUIRE( + (iterations == + std::vector{0, 10})); + } + + if (filebased) + { + Series read( + file, Access::READ_ONLY, json::merge(config, additionalConfig)); + if (additionalConfig == "{}") + { + // Eager parsing, defective iteration has already been removed + REQUIRE(!read.iterations.contains(5)); + read.iterations[0].open(); + read.iterations[10].open(); + } + else + { + REQUIRE_THROWS_AS(read.iterations[5].open(), error::ReadError); + read.iterations[0].open(); + read.iterations[10].open(); + } + } + }; + + tryReading(); + tryReading(R"({"defer_iteration_parsing": true})"); +} + +TEST_CASE("unfinished_iteration_test", "[serial]") +{ +#if openPMD_HAVE_ADIOS2 + unfinished_iteration_test("bp", false, R"({"backend": "adios2"})"); + unfinished_iteration_test( + "bp", + false, + R"( +{ + "backend": "adios2", + "iteration_encoding": "variable_based", + "adios2": { + "schema": 20210209 + } +} +)"); + unfinished_iteration_test("bp", true, R"({"backend": "adios2"})"); +#endif +#if openPMD_HAVE_ADIOS1 + unfinished_iteration_test("adios1.bp", false, R"({"backend": "adios1"})"); + unfinished_iteration_test("adios1.bp", true, R"({"backend": "adios1"})"); +#endif +#if openPMD_HAVE_HDF5 + unfinished_iteration_test("h5", false); + unfinished_iteration_test("h5", true); +#endif + unfinished_iteration_test("json", false); + unfinished_iteration_test("json", true); +} +#endif + TEST_CASE("late_setting_of_iterationencoding", "[serial]") { { From 9a22cd29c2205755cceedf1ee0af0ed1f744ecfa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Thu, 5 Jan 2023 17:09:46 +0100 Subject: [PATCH 23/82] json::merge(): Two little fixes (#1333) * Specify and test: JSON/TOML as input/output for json::merge * Add Python binding * Sort lines for the TOML equality check * Fix NVCC TOML ios_base::openmode * Revert "Fix NVCC TOML ios_base::openmode" This reverts commit 22f0585ee61bb54761b6c95c72f05d514cd8c729. * deactivate json_merging test for nvc++ --- include/openPMD/auxiliary/JSON.hpp | 27 ++++++++++-------- src/auxiliary/JSON.cpp | 17 +++++++++-- src/binding/python/Series.cpp | 40 ++++++++++++++++++++++++++ test/JSONTest.cpp | 46 ++++++++++++++++++++++++++++++ 4 files changed, 116 insertions(+), 14 deletions(-) diff --git a/include/openPMD/auxiliary/JSON.hpp b/include/openPMD/auxiliary/JSON.hpp index c4cc5832aa..8c2551fe0a 100644 --- a/include/openPMD/auxiliary/JSON.hpp +++ b/include/openPMD/auxiliary/JSON.hpp @@ -28,16 +28,16 @@ namespace openPMD namespace json { /** - * @brief Merge two JSON datasets into one. + * @brief Merge two JSON/TOML datasets into one. * * Merging rules: - * 1. If both `defaultValue` and `overwrite` are JSON objects, then the - * resulting JSON object will contain the union of both objects' keys. - * If a key is specified in both objects, the values corresponding to the - * key are merged recursively. - * Keys that point to a null value after this procedure will be pruned. - * 2. In any other case, the JSON dataset `defaultValue` is replaced in its - * entirety with the JSON dataset `overwrite`. + * 1. If both `defaultValue` and `overwrite` are JSON/TOML objects, then the + * resulting JSON/TOML object will contain the union of both objects' + * keys. If a key is specified in both objects, the values corresponding + * to the key are merged recursively. Keys that point to a null value + * after this procedure will be pruned. + * 2. In any other case, the JSON/TOML dataset `defaultValue` is replaced in + * its entirety with the JSON/TOML dataset `overwrite`. * * Note that item 2 means that datasets of different type will replace each * other without error. @@ -46,15 +46,18 @@ namespace json * * Possible use case: * An application uses openPMD-api and wants to do the following: - * 1. Set some default backend options as JSON parameters. + * 1. Set some default backend options as JSON/TOML parameters. * 2. Let its users specify custom backend options additionally. * * By using the json::merge() function, this application can then allow * users to overwrite default options, while keeping any other ones. * - * @param defaultValue - * @param overwrite - * @return std::string + * @param defaultValue A string containing either a JSON or a TOML dataset. + * @param overwrite A string containing either a JSON or TOML dataset (does + * not need to be the same as `defaultValue`). + * @return std::string The merged dataset, according to the above rules. If + * `defaultValue` was a JSON dataset, then as a JSON string, otherwise as a + * TOML string. */ std::string merge(std::string const &defaultValue, std::string const &overwrite); diff --git a/src/auxiliary/JSON.cpp b/src/auxiliary/JSON.cpp index 14ab0f3cb5..9cfe6539fa 100644 --- a/src/auxiliary/JSON.cpp +++ b/src/auxiliary/JSON.cpp @@ -570,8 +570,21 @@ merge(nlohmann::json &defaultVal, nlohmann::json const &overwrite) std::string merge(std::string const &defaultValue, std::string const &overwrite) { - auto res = parseOptions(defaultValue, /* considerFiles = */ false).config; + auto [res, returnFormat] = + parseOptions(defaultValue, /* considerFiles = */ false); merge(res, parseOptions(overwrite, /* considerFiles = */ false).config); - return res.dump(); + switch (returnFormat) + { + case SupportedLanguages::JSON: + return res.dump(); + break; + case SupportedLanguages::TOML: { + auto asToml = json::jsonToToml(res); + std::stringstream sstream; + sstream << asToml; + return sstream.str(); + } + } + throw std::runtime_error("Unreachable!"); } } // namespace openPMD::json diff --git a/src/binding/python/Series.cpp b/src/binding/python/Series.cpp index a85462261c..3dbaaa034c 100644 --- a/src/binding/python/Series.cpp +++ b/src/binding/python/Series.cpp @@ -23,6 +23,7 @@ #include #include "openPMD/Series.hpp" +#include "openPMD/auxiliary/JSON.hpp" #include "openPMD/config.hpp" #if openPMD_HAVE_MPI @@ -231,4 +232,43 @@ this method. "write_iterations", &Series::writeIterations, py::keep_alive<0, 1>()); + + m.def( + "merge_json", + &json::merge, + py::arg("default_value") = "{}", + py::arg("overwrite") = "{}", + R"END( +Merge two JSON/TOML datasets into one. + +Merging rules: +1. If both `defaultValue` and `overwrite` are JSON/TOML objects, then the +resulting JSON/TOML object will contain the union of both objects' +keys. If a key is specified in both objects, the values corresponding +to the key are merged recursively. Keys that point to a null value +after this procedure will be pruned. +2. In any other case, the JSON/TOML dataset `defaultValue` is replaced in +its entirety with the JSON/TOML dataset `overwrite`. + +Note that item 2 means that datasets of different type will replace each +other without error. +It also means that array types will replace each other without any notion +of appending or merging. + +Possible use case: +An application uses openPMD-api and wants to do the following: +1. Set some default backend options as JSON/TOML parameters. +2. Let its users specify custom backend options additionally. + +By using the json::merge() function, this application can then allow +users to overwrite default options, while keeping any other ones. + +Parameters: +* default_value: A string containing either a JSON or a TOML dataset. +* overwrite: A string containing either a JSON or TOML dataset (does + not need to be the same as `defaultValue`). +* returns: The merged dataset, according to the above rules. + If `defaultValue` was a JSON dataset, then as a JSON string, + otherwise as a TOML string. + )END"); } diff --git a/test/JSONTest.cpp b/test/JSONTest.cpp index ec193b8246..46b2459e52 100644 --- a/test/JSONTest.cpp +++ b/test/JSONTest.cpp @@ -5,8 +5,12 @@ #include +#include #include +#include +#include #include +#include using namespace openPMD; @@ -122,6 +126,7 @@ TEST_CASE("json_parsing", "[auxiliary]") REQUIRE(jsonUpper.dump() == jsonLower.dump()); } +#if !__NVCOMPILER // see https://github.com/ToruNiina/toml11/issues/205 TEST_CASE("json_merging", "auxiliary") { std::string defaultVal = R"END( @@ -174,7 +179,48 @@ TEST_CASE("json_merging", "auxiliary") REQUIRE( json::merge(defaultVal, overwrite) == json::parseOptions(expect, false).config.dump()); + + { + // The TOML library doesn't guarantee a specific order of output + // so we need to sort lines to compare with expected results + auto sort_lines = [](std::string const &s) -> std::vector { + std::vector v; + std::istringstream sstream(s); + for (std::string line; std::getline(sstream, line); + line = std::string()) + { + v.push_back(std::move(line)); + } + std::sort(v.begin(), v.end()); + return v; + }; + std::string leftJson = R"({"left": "val"})"; + std::string rightJson = R"({"right": "val"})"; + std::string leftToml = R"(left = "val")"; + std::string rightToml = R"(right = "val")"; + + std::string resJson = + nlohmann::json::parse(R"({"left": "val", "right": "val"})").dump(); + std::vector resToml = [&sort_lines]() { + constexpr char const *raw = R"( +left = "val" +right = "val" + )"; + std::istringstream istream( + raw, std::ios_base::binary | std::ios_base::in); + toml::value tomlVal = toml::parse(istream); + std::stringstream sstream; + sstream << tomlVal; + return sort_lines(sstream.str()); + }(); + + REQUIRE(json::merge(leftJson, rightJson) == resJson); + REQUIRE(json::merge(leftJson, rightToml) == resJson); + REQUIRE(sort_lines(json::merge(leftToml, rightJson)) == resToml); + REQUIRE(sort_lines(json::merge(leftToml, rightToml)) == resToml); + } } +#endif /* * This tests two things about the /data/snapshot attribute: From 351d751946e6d41997c9363bfad1fcb393f15a6b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Thu, 5 Jan 2023 17:14:23 +0100 Subject: [PATCH 24/82] Unify exception types (#1355) * Remove ParseError Leftover from lenient parsing PR, unused. * Swallow unsupported_data_error into OperationUnsupportedInBackend * Rename no_such_attribute_error -> error::NoSuchAttribute --- include/openPMD/Error.hpp | 19 +++++++--- include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp | 36 +++++++++--------- include/openPMD/IO/AbstractIOHandler.hpp | 11 ------ include/openPMD/ThrowError.hpp | 3 ++ include/openPMD/backend/Attributable.hpp | 13 +------ src/Error.cpp | 8 +++- src/IO/ADIOS/CommonADIOS1IOHandler.cpp | 40 +++++++++++--------- src/binding/python/Error.cpp | 2 + 8 files changed, 69 insertions(+), 63 deletions(-) diff --git a/include/openPMD/Error.hpp b/include/openPMD/Error.hpp index 9845cdcdf0..6bbabd1c73 100644 --- a/include/openPMD/Error.hpp +++ b/include/openPMD/Error.hpp @@ -107,13 +107,10 @@ namespace error std::string description_in); }; - /* - * Inrecoverable parse error from the frontend. - */ - class ParseError : public Error + class NoSuchAttribute : public Error { public: - ParseError(std::string what); + NoSuchAttribute(std::string attributeName); }; } // namespace error @@ -122,4 +119,16 @@ namespace error * */ using no_such_file_error = error::ReadError; + +/** + * @brief Backward-compatibility alias for unsupported_data_error. + * + */ +using unsupported_data_error = error::OperationUnsupportedInBackend; + +/** + * @brief Backward-compatibility alias for no_such_attribute_error. + * + */ +using no_such_attribute_error = error::NoSuchAttribute; } // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp b/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp index c4e99d773f..62e9f00493 100644 --- a/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp @@ -21,6 +21,7 @@ #pragma once #include "openPMD/IO/ADIOS/ADIOS1FilePosition.hpp" +#include "openPMD/ThrowError.hpp" #include "openPMD/auxiliary/StringManip.hpp" #include "openPMD/backend/Attribute.hpp" #include "openPMD/backend/Writable.hpp" @@ -102,8 +103,8 @@ inline ADIOS_DATATYPES getBP1DataType(Datatype dtype) else if (sizeof(long) == 8u) return adios_long; else - throw unsupported_data_error( - "No native equivalent for Datatype::SHORT found."); + error::throwOperationUnsupportedInBackend( + "ADIOS1", "No native equivalent for Datatype::SHORT found."); case DT::INT: case DT::VEC_INT: if (sizeof(int) == 2u) @@ -113,8 +114,8 @@ inline ADIOS_DATATYPES getBP1DataType(Datatype dtype) else if (sizeof(int) == 8u) return adios_long; else - throw unsupported_data_error( - "No native equivalent for Datatype::INT found."); + error::throwOperationUnsupportedInBackend( + "ADIOS1", "No native equivalent for Datatype::INT found."); case DT::LONG: case DT::VEC_LONG: if (sizeof(long) == 2u) @@ -124,8 +125,8 @@ inline ADIOS_DATATYPES getBP1DataType(Datatype dtype) else if (sizeof(long) == 8u) return adios_long; else - throw unsupported_data_error( - "No native equivalent for Datatype::LONG found."); + error::throwOperationUnsupportedInBackend( + "ADIOS1", "No native equivalent for Datatype::LONG found."); case DT::LONGLONG: case DT::VEC_LONGLONG: if (sizeof(long long) == 2u) @@ -135,8 +136,8 @@ inline ADIOS_DATATYPES getBP1DataType(Datatype dtype) else if (sizeof(long long) == 8u) return adios_long; else - throw unsupported_data_error( - "No native equivalent for Datatype::LONGLONG found."); + error::throwOperationUnsupportedInBackend( + "ADIOS1", "No native equivalent for Datatype::LONGLONG found."); case DT::USHORT: case DT::VEC_USHORT: if (sizeof(unsigned short) == 2u) @@ -146,8 +147,8 @@ inline ADIOS_DATATYPES getBP1DataType(Datatype dtype) else if (sizeof(unsigned long) == 8u) return adios_unsigned_long; else - throw unsupported_data_error( - "No native equivalent for Datatype::USHORT found."); + error::throwOperationUnsupportedInBackend( + "ADIOS1", "No native equivalent for Datatype::USHORT found."); case DT::UINT: case DT::VEC_UINT: if (sizeof(unsigned int) == 2u) @@ -157,8 +158,8 @@ inline ADIOS_DATATYPES getBP1DataType(Datatype dtype) else if (sizeof(unsigned int) == 8u) return adios_unsigned_long; else - throw unsupported_data_error( - "No native equivalent for Datatype::UINT found."); + error::throwOperationUnsupportedInBackend( + "ADIOS1", "No native equivalent for Datatype::UINT found."); case DT::ULONG: case DT::VEC_ULONG: if (sizeof(unsigned long) == 2u) @@ -168,8 +169,8 @@ inline ADIOS_DATATYPES getBP1DataType(Datatype dtype) else if (sizeof(unsigned long) == 8u) return adios_unsigned_long; else - throw unsupported_data_error( - "No native equivalent for Datatype::ULONG found."); + error::throwOperationUnsupportedInBackend( + "ADIOS1", "No native equivalent for Datatype::ULONG found."); case DT::ULONGLONG: case DT::VEC_ULONGLONG: if (sizeof(unsigned long long) == 2u) @@ -179,7 +180,8 @@ inline ADIOS_DATATYPES getBP1DataType(Datatype dtype) else if (sizeof(unsigned long long) == 8u) return adios_unsigned_long; else - throw unsupported_data_error( + error::throwOperationUnsupportedInBackend( + "ADIOS1", "No native equivalent for Datatype::ULONGLONG found."); case DT::FLOAT: case DT::VEC_FLOAT: @@ -199,8 +201,8 @@ inline ADIOS_DATATYPES getBP1DataType(Datatype dtype) return adios_double_complex; case DT::CLONG_DOUBLE: case DT::VEC_CLONG_DOUBLE: - throw unsupported_data_error( - "No native equivalent for Datatype::CLONG_DOUBLE found."); + error::throwOperationUnsupportedInBackend( + "ADIOS1", "No native equivalent for Datatype::CLONG_DOUBLE found."); case DT::STRING: return adios_string; case DT::VEC_STRING: diff --git a/include/openPMD/IO/AbstractIOHandler.hpp b/include/openPMD/IO/AbstractIOHandler.hpp index f444996c66..f6316dbe97 100644 --- a/include/openPMD/IO/AbstractIOHandler.hpp +++ b/include/openPMD/IO/AbstractIOHandler.hpp @@ -38,17 +38,6 @@ namespace openPMD { - -class unsupported_data_error : public std::runtime_error -{ -public: - unsupported_data_error(std::string const &what_arg) - : std::runtime_error(what_arg) - {} - virtual ~unsupported_data_error() - {} -}; - /** * @brief Determine what items should be flushed upon Series::flush() * diff --git a/include/openPMD/ThrowError.hpp b/include/openPMD/ThrowError.hpp index eae561aff7..4e48e9bfdc 100644 --- a/include/openPMD/ThrowError.hpp +++ b/include/openPMD/ThrowError.hpp @@ -67,4 +67,7 @@ throwOperationUnsupportedInBackend(std::string backend, std::string what); Reason reason_in, std::optional backend, std::string description_in); + +[[noreturn]] OPENPMDAPI_EXPORT void +throwNoSuchAttribute(std::string attributeName); } // namespace openPMD::error diff --git a/include/openPMD/backend/Attributable.hpp b/include/openPMD/backend/Attributable.hpp index 995923151c..007b821f5c 100644 --- a/include/openPMD/backend/Attributable.hpp +++ b/include/openPMD/backend/Attributable.hpp @@ -21,6 +21,7 @@ #pragma once #include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/ThrowError.hpp" #include "openPMD/auxiliary/OutOfRangeMsg.hpp" #include "openPMD/backend/Attribute.hpp" #include "openPMD/backend/Writable.hpp" @@ -50,16 +51,6 @@ class Attributable; class Iteration; class Series; -class no_such_attribute_error : public std::runtime_error -{ -public: - no_such_attribute_error(std::string const &what_arg) - : std::runtime_error(what_arg) - {} - virtual ~no_such_attribute_error() - {} -}; - namespace internal { class AttributableData @@ -475,7 +466,7 @@ inline bool Attributable::setAttributeImpl( { auxiliary::OutOfRangeMsg const out_of_range_msg( "Attribute", "can not be set (read-only)."); - throw no_such_attribute_error(out_of_range_msg(key)); + error::throwNoSuchAttribute(out_of_range_msg(key)); } dirty() = true; diff --git a/src/Error.cpp b/src/Error.cpp index 99096bd54e..a8e83338ed 100644 --- a/src/Error.cpp +++ b/src/Error.cpp @@ -133,7 +133,13 @@ namespace error affectedObject, reason, std::move(backend), std::move(description)); } - ParseError::ParseError(std::string what) : Error("Parse Error: " + what) + NoSuchAttribute::NoSuchAttribute(std::string attributeName) + : Error(std::move(attributeName)) {} + + void throwNoSuchAttribute(std::string attributeName) + { + throw NoSuchAttribute(std::move(attributeName)); + } } // namespace error } // namespace openPMD diff --git a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp index dded98d94c..5041355fe9 100644 --- a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp +++ b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp @@ -882,9 +882,9 @@ void CommonADIOS1IOHandlerImpl::openDataset( else if (sizeof(long long) == 2u) dtype = DT::LONGLONG; else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype adios_short " - "found."); + error::throwOperationUnsupportedInBackend( + "ADIOS1", + "No native equivalent for Datatype adios_short found."); break; case adios_integer: if (sizeof(short) == 4u) @@ -896,9 +896,9 @@ void CommonADIOS1IOHandlerImpl::openDataset( else if (sizeof(long long) == 4u) dtype = DT::LONGLONG; else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype adios_integer " - "found."); + error::throwOperationUnsupportedInBackend( + "ADIOS1", + "No native equivalent for Datatype adios_integer found."); break; case adios_long: if (sizeof(short) == 8u) @@ -910,8 +910,9 @@ void CommonADIOS1IOHandlerImpl::openDataset( else if (sizeof(long long) == 8u) dtype = DT::LONGLONG; else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype adios_long found."); + error::throwOperationUnsupportedInBackend( + "ADIOS1", + "No native equivalent for Datatype adios_long found."); break; case adios_unsigned_byte: dtype = DT::UCHAR; @@ -926,9 +927,10 @@ void CommonADIOS1IOHandlerImpl::openDataset( else if (sizeof(unsigned long long) == 2u) dtype = DT::ULONGLONG; else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype " - "adios_unsigned_short found."); + error::throwOperationUnsupportedInBackend( + "ADIOS1", + "No native equivalent for Datatype adios_unsigned_short " + "found."); break; case adios_unsigned_integer: if (sizeof(unsigned short) == 4u) @@ -940,9 +942,10 @@ void CommonADIOS1IOHandlerImpl::openDataset( else if (sizeof(unsigned long long) == 4u) dtype = DT::ULONGLONG; else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype " - "adios_unsigned_integer found."); + error::throwOperationUnsupportedInBackend( + "ADIOS1", + "No native equivalent for Datatype adios_unsigned_integer " + "found."); break; case adios_unsigned_long: if (sizeof(unsigned short) == 8u) @@ -954,9 +957,9 @@ void CommonADIOS1IOHandlerImpl::openDataset( else if (sizeof(unsigned long long) == 8u) dtype = DT::ULONGLONG; else - throw unsupported_data_error( - "[ADIOS1] No native equivalent for Datatype " - "adios_unsigned_long found."); + error::throwOperationUnsupportedInBackend( + "ADIOS1", + "No native equivalent for Datatype adios_unsigned_long found."); break; case adios_real: dtype = DT::FLOAT; @@ -977,7 +980,8 @@ void CommonADIOS1IOHandlerImpl::openDataset( case adios_string: case adios_string_array: default: - throw unsupported_data_error( + error::throwOperationUnsupportedInBackend( + "ADIOS1", "[ADIOS1] Datatype not implemented for ADIOS dataset writing"); } *parameters.dtype = dtype; diff --git a/src/binding/python/Error.cpp b/src/binding/python/Error.cpp index 3b81ebe923..df1b39a5b2 100644 --- a/src/binding/python/Error.cpp +++ b/src/binding/python/Error.cpp @@ -15,6 +15,8 @@ void init_Error(py::module &m) py::register_exception( m, "ErrorBackendConfigSchema", baseError); py::register_exception(m, "ErrorInternal", baseError); + py::register_exception( + m, "ErrorNoSuchAttribute", baseError); #ifndef NDEBUG m.def("test_throw", [](std::string description) { From 1774623c84304a388230f08e1374141992d1adbf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Thu, 5 Jan 2023 18:27:15 +0100 Subject: [PATCH 25/82] Deprecate ADIOS1 backend (#1314) * Add deprecation warning * Turn ADIOS1 off by default * Print warning on rank 0 only, make suppressible via OPENPMD_WARNING_SUPPRESS_ADIOS1_DEPRECATED=1 * Env Option: Rename & Document * CI: Cleanup * Add missing MPI_Init in test * Move parallel test to parallel tests * Docs: Update ADIOS1 Build Values * Docs: Two More Deprecated Hints Co-authored-by: Axel Huebl --- .github/workflows/linux.yml | 8 ++-- CMakeLists.txt | 2 +- README.md | 4 +- docs/source/backends/adios1.rst | 17 +++++---- docs/source/dev/buildoptions.rst | 2 +- docs/source/dev/dependencies.rst | 2 +- src/IO/AbstractIOHandlerHelper.cpp | 36 ++++++++++++++++++ test/CoreTest.cpp | 59 ----------------------------- test/ParallelIOTest.cpp | 60 ++++++++++++++++++++++++++++++ 9 files changed, 114 insertions(+), 76 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 1a1707df98..09424932dc 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -82,14 +82,14 @@ jobs: export OPENPMD_BP_BACKEND=ADIOS1 ctest --test-dir build --output-on-failure - clang7_nopy_ompi_h5_ad1_ad2_newLayout: + clang7_nopy_ompi_h5_ad2_newLayout: runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false steps: - uses: actions/checkout@v2 - name: Spack Cache uses: actions/cache@v2 - with: {path: /opt/spack, key: clang7_nopy_ompi_h5_ad1_ad2_v2 } + with: {path: /opt/spack, key: clang7_nopy_ompi_h5_ad2_v2 } - name: Install run: | sudo apt-get update @@ -237,7 +237,7 @@ jobs: cmake --build build --parallel 2 ctest --test-dir build --output-on-failure - gcc9_py38_pd_nompi_h5_ad1_ad2_libcpp: + gcc9_py38_pd_nompi_h5_ad2_libcpp: runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false steps: @@ -246,7 +246,7 @@ jobs: run: | sudo apt-get update sudo apt-get install g++ libopenmpi-dev libhdf5-openmpi-dev libadios-dev python3 python3-numpy python3-mpi4py python3-pandas -# TODO ADIOS1 (.pc file broken?) ADIOS2 +# TODO ADIOS2 - name: Build env: {CXXFLAGS: -Werror, PKG_CONFIG_PATH: /usr/lib/x86_64-linux-gnu/pkgconfig} run: | diff --git a/CMakeLists.txt b/CMakeLists.txt index b7a3425633..6ea49f586c 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -142,7 +142,7 @@ endfunction() openpmd_option(MPI "Parallel, Multi-Node I/O for clusters" AUTO) openpmd_option(HDF5 "HDF5 backend (.h5 files)" AUTO) -openpmd_option(ADIOS1 "ADIOS1 backend (.bp files)" AUTO) +openpmd_option(ADIOS1 "ADIOS1 backend (.bp files)" OFF) openpmd_option(ADIOS2 "ADIOS2 backend (.bp files)" AUTO) openpmd_option(PYTHON "Enable Python bindings" AUTO) diff --git a/README.md b/README.md index df45936b70..0973281760 100644 --- a/README.md +++ b/README.md @@ -111,7 +111,7 @@ Shipped internally in `share/openPMD/thirdParty/`: I/O backends: * [JSON](https://en.wikipedia.org/wiki/JSON) * [HDF5](https://support.hdfgroup.org/HDF5) 1.8.13+ (optional) -* [ADIOS1](https://www.olcf.ornl.gov/center-projects/adios) 1.13.1+ (optional) +* [ADIOS1](https://www.olcf.ornl.gov/center-projects/adios) 1.13.1+ (optional, deprecated) * [ADIOS2](https://github.com/ornladios/ADIOS2) 2.7.0+ (optional) while those can be built either with or without: @@ -254,7 +254,7 @@ CMake controls options with prefixed `-D`, e.g. `-DopenPMD_USE_MPI=OFF`: |------------------------------|------------------|------------------------------------------------------------------------------| | `openPMD_USE_MPI` | **AUTO**/ON/OFF | Parallel, Multi-Node I/O for clusters | | `openPMD_USE_HDF5` | **AUTO**/ON/OFF | HDF5 backend (`.h5` files) | -| `openPMD_USE_ADIOS1` | **AUTO**/ON/OFF | ADIOS1 backend (`.bp` files up to version BP3) | +| `openPMD_USE_ADIOS1` | AUTO/ON/**OFF** | ADIOS1 backend (`.bp` files up to version BP3) - deprecated | | `openPMD_USE_ADIOS2` | **AUTO**/ON/OFF | ADIOS2 backend (`.bp` files in BP3, BP4 or higher) | | `openPMD_USE_PYTHON` | **AUTO**/ON/OFF | Enable Python bindings | | `openPMD_USE_INVASIVE_TESTS` | ON/**OFF** | Enable unit tests that modify source code 1 | diff --git a/docs/source/backends/adios1.rst b/docs/source/backends/adios1.rst index 75e1918df8..11680d7e49 100644 --- a/docs/source/backends/adios1.rst +++ b/docs/source/backends/adios1.rst @@ -27,14 +27,15 @@ Backend-Specific Controls The following environment variables control ADIOS1 I/O behavior at runtime. Fine-tuning these is especially useful when running at large scale. -==================================== ========== ================================================================================ -environment variable default description -==================================== ========== ================================================================================ -``OPENPMD_ADIOS_NUM_AGGREGATORS`` ``1`` Number of I/O aggregator nodes for ADIOS1 ``MPI_AGGREGATE`` transport method. -``OPENPMD_ADIOS_NUM_OST`` ``0`` Number of I/O OSTs for ADIOS1 ``MPI_AGGREGATE`` transport method. -``OPENPMD_ADIOS_HAVE_METADATA_FILE`` ``1`` Online creation of the adios journal file (``1``: yes, ``0``: no). -``OPENPMD_BP_BACKEND`` ``ADIOS2`` Chose preferred ``.bp`` file backend if ``ADIOS1`` and ``ADIOS2`` are available. -==================================== ========== ================================================================================ +============================================== ========== ================================================================================ +environment variable default description +============================================== ========== ================================================================================ +``OPENPMD_ADIOS_NUM_AGGREGATORS`` ``1`` Number of I/O aggregator nodes for ADIOS1 ``MPI_AGGREGATE`` transport method. +``OPENPMD_ADIOS_NUM_OST`` ``0`` Number of I/O OSTs for ADIOS1 ``MPI_AGGREGATE`` transport method. +``OPENPMD_ADIOS_HAVE_METADATA_FILE`` ``1`` Online creation of the adios journal file (``1``: yes, ``0``: no). +``OPENPMD_BP_BACKEND`` ``ADIOS2`` Chose preferred ``.bp`` file backend if ``ADIOS1`` and ``ADIOS2`` are available. +``OPENPMD_ADIOS_SUPPRESS_DEPRECATED_WARNING`` ``0`` Set to ``1`` to suppress ADIOS1 deprecation warnings. +============================================== ========== ================================================================================ Please refer to the `ADIOS1 manual, section 6.1.5 `_ for details on I/O tuning. diff --git a/docs/source/dev/buildoptions.rst b/docs/source/dev/buildoptions.rst index ccd0614966..018857fc9e 100644 --- a/docs/source/dev/buildoptions.rst +++ b/docs/source/dev/buildoptions.rst @@ -14,7 +14,7 @@ CMake Option Values Description ============================== =============== ======================================================================== ``openPMD_USE_MPI`` **AUTO**/ON/OFF Parallel, Multi-Node I/O for clusters ``openPMD_USE_HDF5`` **AUTO**/ON/OFF HDF5 backend (``.h5`` files) -``openPMD_USE_ADIOS1`` **AUTO**/ON/OFF ADIOS1 backend (``.bp`` files up to version BP3) +``openPMD_USE_ADIOS1`` AUTO/ON/**OFF** ADIOS1 backend (``.bp`` files up to version BP3) - deprecated ``openPMD_USE_ADIOS2`` **AUTO**/ON/OFF ADIOS2 backend (``.bp`` files in BP3, BP4 or higher) ``openPMD_USE_PYTHON`` **AUTO**/ON/OFF Enable Python bindings ``openPMD_USE_INVASIVE_TESTS`` ON/**OFF** Enable unit tests that modify source code :sup:`1` diff --git a/docs/source/dev/dependencies.rst b/docs/source/dev/dependencies.rst index df2a835f98..d8c441ea07 100644 --- a/docs/source/dev/dependencies.rst +++ b/docs/source/dev/dependencies.rst @@ -27,7 +27,7 @@ Optional: I/O backends * `JSON `_ * `HDF5 `_ 1.8.13+ -* `ADIOS1 `_ 1.13.1+ +* `ADIOS1 `_ 1.13.1+ (deprecated) * `ADIOS2 `_ 2.7.0+ while those can be build either with or without: diff --git a/src/IO/AbstractIOHandlerHelper.cpp b/src/IO/AbstractIOHandlerHelper.cpp index ff9471c5dd..27efe722c1 100644 --- a/src/IO/AbstractIOHandlerHelper.cpp +++ b/src/IO/AbstractIOHandlerHelper.cpp @@ -20,6 +20,8 @@ */ #include "openPMD/IO/AbstractIOHandlerHelper.hpp" +#include "openPMD/config.hpp" + #include "openPMD/Error.hpp" #include "openPMD/IO/ADIOS/ADIOS1IOHandler.hpp" #include "openPMD/IO/ADIOS/ADIOS2IOHandler.hpp" @@ -28,8 +30,13 @@ #include "openPMD/IO/HDF5/HDF5IOHandler.hpp" #include "openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp" #include "openPMD/IO/JSON/JSONIOHandler.hpp" +#include "openPMD/auxiliary/Environment.hpp" #include "openPMD/auxiliary/JSON_internal.hpp" +#if openPMD_HAVE_MPI +#include +#endif + #include #include @@ -55,6 +62,22 @@ namespace } throw "Unreachable"; } + + constexpr char const *adios1Deprecation = R"( +[Deprecation warning] + Development on the ADIOS1 IO library has ceased. + Support for ADIOS1 in the openPMD-api has been deprecated + and will be removed in a future version. + + Please consider switching to ADIOS2. + We recommend checking your ADIOS1 datasets for compatibility with ADIOS2. + Conversion of data from one backend to another may optionally be achieved + by using the `openpmd-pipe` tool.) + + Suppress this warning via `export OPENPMD_ADIOS_SUPPRESS_DEPRECATED_WARNING=1`.)"; + + constexpr char const *suppressAdios1DeprecationWarning = + "OPENPMD_ADIOS_SUPPRESS_DEPRECATED_WARNING"; } // namespace #if openPMD_HAVE_MPI @@ -75,6 +98,15 @@ std::unique_ptr createIOHandler( return constructIOHandler( "HDF5", path, access, comm, std::move(options)); case Format::ADIOS1: + if (auxiliary::getEnvNum(suppressAdios1DeprecationWarning, 0) == 0) + { + int rank; + MPI_Comm_rank(comm, &rank); + if (rank == 0) + { + std::cerr << adios1Deprecation << std::endl; + } + } return constructIOHandler( "ADIOS1", path, access, std::move(options), comm); case Format::ADIOS2_BP: @@ -144,6 +176,10 @@ std::unique_ptr createIOHandler( return constructIOHandler( "HDF5", path, access, std::move(options)); case Format::ADIOS1: + if (auxiliary::getEnvNum(suppressAdios1DeprecationWarning, 0) == 0) + { + std::cerr << adios1Deprecation << std::endl; + } return constructIOHandler( "ADIOS1", path, access, std::move(options)); case Format::ADIOS2_BP: diff --git a/test/CoreTest.cpp b/test/CoreTest.cpp index 42045789a5..96aaebde9b 100644 --- a/test/CoreTest.cpp +++ b/test/CoreTest.cpp @@ -1354,63 +1354,4 @@ TEST_CASE("unavailable_backend", "[core]") "'HDF5'."); } #endif - -#if openPMD_HAVE_MPI -#if !openPMD_HAVE_ADIOS1 - { - auto fail = []() { - Series( - "unavailable.bp", - Access::CREATE, - MPI_COMM_WORLD, - R"({"backend": "ADIOS1"})"); - }; - REQUIRE_THROWS_WITH( - fail(), - "Wrong API usage: openPMD-api built without support for backend " - "'ADIOS1'."); - } -#endif -#if !openPMD_HAVE_ADIOS2 - { - auto fail = []() { - Series( - "unavailable.bp", - Access::CREATE, - MPI_COMM_WORLD, - R"({"backend": "ADIOS2"})"); - }; - REQUIRE_THROWS_WITH( - fail(), - "Wrong API usage: openPMD-api built without support for backend " - "'ADIOS2'."); - } -#endif -#if !openPMD_HAVE_ADIOS1 && !openPMD_HAVE_ADIOS2 - { - auto fail = []() { - Series("unavailable.bp", Access::CREATE, MPI_COMM_WORLD); - }; - REQUIRE_THROWS_WITH( - fail(), - "Wrong API usage: openPMD-api built without support for backend " - "'ADIOS2'."); - } -#endif -#if !openPMD_HAVE_HDF5 - { - auto fail = []() { - Series( - "unavailable.h5", - Access::CREATE, - MPI_COMM_WORLD, - R"({"backend": "HDF5"})"); - }; - REQUIRE_THROWS_WITH( - fail(), - "Wrong API usage: openPMD-api built without support for backend " - "'HDF5'."); - } -#endif -#endif } diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index 1dd599cf17..bc2b2c4db7 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -1604,4 +1604,64 @@ TEST_CASE("append_mode", "[parallel]") } } } + +TEST_CASE("unavailable_backend", "[core][parallel]") +{ +#if !openPMD_HAVE_ADIOS1 + { + auto fail = []() { + Series( + "unavailable.bp", + Access::CREATE, + MPI_COMM_WORLD, + R"({"backend": "ADIOS1"})"); + }; + REQUIRE_THROWS_WITH( + fail(), + "Wrong API usage: openPMD-api built without support for backend " + "'ADIOS1'."); + } +#endif +#if !openPMD_HAVE_ADIOS2 + { + auto fail = []() { + Series( + "unavailable.bp", + Access::CREATE, + MPI_COMM_WORLD, + R"({"backend": "ADIOS2"})"); + }; + REQUIRE_THROWS_WITH( + fail(), + "Wrong API usage: openPMD-api built without support for backend " + "'ADIOS2'."); + } +#endif +#if !openPMD_HAVE_ADIOS1 && !openPMD_HAVE_ADIOS2 + { + auto fail = []() { + Series("unavailable.bp", Access::CREATE, MPI_COMM_WORLD); + }; + REQUIRE_THROWS_WITH( + fail(), + "Wrong API usage: openPMD-api built without support for backend " + "'ADIOS2'."); + } +#endif +#if !openPMD_HAVE_HDF5 + { + auto fail = []() { + Series( + "unavailable.h5", + Access::CREATE, + MPI_COMM_WORLD, + R"({"backend": "HDF5"})"); + }; + REQUIRE_THROWS_WITH( + fail(), + "Wrong API usage: openPMD-api built without support for backend " + "'HDF5'."); + } +#endif +} #endif From e5b1245f8b3ef25b06cdc939717163c8bab93acf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Fri, 13 Jan 2023 18:47:23 +0100 Subject: [PATCH 26/82] Fix dtype_from_numpy (#1357) Use char comparison to detect type equality --- include/openPMD/binding/python/Numpy.hpp | 37 ++++++++++++------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/include/openPMD/binding/python/Numpy.hpp b/include/openPMD/binding/python/Numpy.hpp index f28ac8dbc2..8eead95e0d 100644 --- a/include/openPMD/binding/python/Numpy.hpp +++ b/include/openPMD/binding/python/Numpy.hpp @@ -35,45 +35,46 @@ inline Datatype dtype_from_numpy(pybind11::dtype const dt) { // ref: https://docs.scipy.org/doc/numpy/user/basics.types.html // ref: https://github.com/numpy/numpy/issues/10678#issuecomment-369363551 - if (dt.is(pybind11::dtype("b"))) + if (dt.char_() == pybind11::dtype("b").char_()) return Datatype::CHAR; - else if (dt.is(pybind11::dtype("B"))) + else if (dt.char_() == pybind11::dtype("B").char_()) return Datatype::UCHAR; - else if (dt.is(pybind11::dtype("short"))) + else if (dt.char_() == pybind11::dtype("short").char_()) return Datatype::SHORT; - else if (dt.is(pybind11::dtype("intc"))) + else if (dt.char_() == pybind11::dtype("intc").char_()) return Datatype::INT; - else if (dt.is(pybind11::dtype("int_"))) + else if (dt.char_() == pybind11::dtype("int_").char_()) return Datatype::LONG; - else if (dt.is(pybind11::dtype("longlong"))) + else if (dt.char_() == pybind11::dtype("longlong").char_()) return Datatype::LONGLONG; - else if (dt.is(pybind11::dtype("ushort"))) + else if (dt.char_() == pybind11::dtype("ushort").char_()) return Datatype::USHORT; - else if (dt.is(pybind11::dtype("uintc"))) + else if (dt.char_() == pybind11::dtype("uintc").char_()) return Datatype::UINT; - else if (dt.is(pybind11::dtype("uint"))) + else if (dt.char_() == pybind11::dtype("uint").char_()) return Datatype::ULONG; - else if (dt.is(pybind11::dtype("ulonglong"))) + else if (dt.char_() == pybind11::dtype("ulonglong").char_()) return Datatype::ULONGLONG; - else if (dt.is(pybind11::dtype("clongdouble"))) + else if (dt.char_() == pybind11::dtype("clongdouble").char_()) return Datatype::CLONG_DOUBLE; - else if (dt.is(pybind11::dtype("cdouble"))) + else if (dt.char_() == pybind11::dtype("cdouble").char_()) return Datatype::CDOUBLE; - else if (dt.is(pybind11::dtype("csingle"))) + else if (dt.char_() == pybind11::dtype("csingle").char_()) return Datatype::CFLOAT; - else if (dt.is(pybind11::dtype("longdouble"))) + else if (dt.char_() == pybind11::dtype("longdouble").char_()) return Datatype::LONG_DOUBLE; - else if (dt.is(pybind11::dtype("double"))) + else if (dt.char_() == pybind11::dtype("double").char_()) return Datatype::DOUBLE; - else if (dt.is(pybind11::dtype("single"))) + else if (dt.char_() == pybind11::dtype("single").char_()) return Datatype::FLOAT; - else if (dt.is(pybind11::dtype("bool"))) + else if (dt.char_() == pybind11::dtype("bool").char_()) return Datatype::BOOL; else { pybind11::print(dt); throw std::runtime_error( - "Datatype '...' not known in 'dtype_from_numpy'!"); // _s.format(dt) + std::string("Datatype '") + dt.char_() + + std::string("' not known in 'dtype_from_numpy'!")); // _s.format(dt) } } From f9663b693b99447d53780d2865549473b0830e75 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Fri, 13 Jan 2023 18:51:36 +0100 Subject: [PATCH 27/82] Enable empty string attributes (#1338) * Enable empty string attributes * Disable empty strings exclusively for ADIOS1 --- include/openPMD/backend/Attributable.hpp | 68 +-------- src/IO/ADIOS/CommonADIOS1IOHandler.cpp | 10 ++ src/IO/HDF5/HDF5Auxiliary.cpp | 5 +- src/IO/HDF5/HDF5IOHandler.cpp | 4 +- src/backend/Attributable.cpp | 186 +++++------------------ test/ParallelIOTest.cpp | 1 - test/SerialIOTest.cpp | 27 +++- 7 files changed, 82 insertions(+), 219 deletions(-) diff --git a/include/openPMD/backend/Attributable.hpp b/include/openPMD/backend/Attributable.hpp index 007b821f5c..90da6655bf 100644 --- a/include/openPMD/backend/Attributable.hpp +++ b/include/openPMD/backend/Attributable.hpp @@ -81,39 +81,6 @@ namespace internal A_MAP m_attributes; }; - enum class SetAttributeMode : char - { - WhileReadingAttributes, - FromPublicAPICall - }; - - /** Verify values of attributes in the frontend - * - * verify string attributes are not empty (backend restriction, e.g., HDF5) - */ - template - inline void attr_value_check( - std::string const /* key */, T /* value */, SetAttributeMode) - {} - - template <> - inline void attr_value_check( - std::string const key, std::string const value, SetAttributeMode mode) - { - switch (mode) - { - case SetAttributeMode::FromPublicAPICall: - if (value.empty()) - throw std::runtime_error( - "[setAttribute] Value for string attribute '" + key + - "' must not be empty!"); - break; - case SetAttributeMode::WhileReadingAttributes: - // no checks while reading - break; - } - } - template class BaseRecordData; } // namespace internal @@ -289,12 +256,6 @@ OPENPMD_protected void flushAttributes(internal::FlushParams const &); - template - bool setAttributeImpl( - std::string const &key, T value, internal::SetAttributeMode); - bool setAttributeImpl( - std::string const &key, char const value[], internal::SetAttributeMode); - enum ReadMode { /** @@ -435,30 +396,11 @@ OPENPMD_protected virtual void linkHierarchy(Writable &w); }; // Attributable -template -inline bool Attributable::setAttribute(std::string const &key, T value) -{ - return setAttributeImpl( - key, std::move(value), internal::SetAttributeMode::FromPublicAPICall); -} - -inline bool -Attributable::setAttribute(std::string const &key, char const value[]) -{ - return setAttributeImpl( - key, value, internal::SetAttributeMode::FromPublicAPICall); -} - // note: we explicitly instantiate Attributable::setAttributeImpl for all T in // Datatype in Attributable.cpp template -inline bool Attributable::setAttributeImpl( - std::string const &key, - T value, - internal::SetAttributeMode setAttributeMode) +inline bool Attributable::setAttribute(std::string const &key, T value) { - internal::attr_value_check(key, value, setAttributeMode); - auto &attri = get(); if (IOHandler() && IOHandler()->m_seriesStatus == internal::SeriesStatus::Default && @@ -487,12 +429,10 @@ inline bool Attributable::setAttributeImpl( } } -inline bool Attributable::setAttributeImpl( - std::string const &key, - char const value[], - internal::SetAttributeMode setAttributeMode) +inline bool +Attributable::setAttribute(std::string const &key, char const value[]) { - return this->setAttributeImpl(key, std::string(value), setAttributeMode); + return this->setAttribute(key, std::string(value)); } template diff --git a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp index 5041355fe9..f4c0d7ec45 100644 --- a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp +++ b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp @@ -235,6 +235,11 @@ void CommonADIOS1IOHandlerImpl::flush_attribute( } case DT::STRING: { auto const &v = att.get(); + if (v.empty()) + { + error::throwOperationUnsupportedInBackend( + "ADIOS1", "Empty string attributes not supported."); + } values = auxiliary::allocatePtr(Datatype::CHAR, v.length() + 1u); strcpy((char *)values.get(), v.c_str()); break; @@ -352,6 +357,11 @@ void CommonADIOS1IOHandlerImpl::flush_attribute( auto const &vec = att.get >(); for (size_t i = 0; i < vec.size(); ++i) { + if (vec[i].empty()) + { + error::throwOperationUnsupportedInBackend( + "ADIOS1", "Empty string attributes not supported."); + } size_t size = vec[i].size() + 1; ptr[i] = new char[size]; strncpy(ptr[i], vec[i].c_str(), size); diff --git a/src/IO/HDF5/HDF5Auxiliary.cpp b/src/IO/HDF5/HDF5Auxiliary.cpp index db2f0d315b..3a6c9e7cd4 100644 --- a/src/IO/HDF5/HDF5Auxiliary.cpp +++ b/src/IO/HDF5/HDF5Auxiliary.cpp @@ -111,8 +111,7 @@ hid_t openPMD::GetH5DataType::operator()(Attribute const &att) m_userTypes.at(typeid(std::complex).name())); case DT::STRING: { hid_t string_t_id = H5Tcopy(H5T_C_S1); - size_t const max_len = att.get().size(); - VERIFY(max_len > 0, "[HDF5] max_len must be >0 for STRING"); + size_t const max_len = att.get().size() + 1; herr_t status = H5Tset_size(string_t_id, max_len); VERIFY( status >= 0, @@ -123,7 +122,7 @@ hid_t openPMD::GetH5DataType::operator()(Attribute const &att) hid_t string_t_id = H5Tcopy(H5T_C_S1); size_t max_len = 0; for (std::string const &s : att.get >()) - max_len = std::max(max_len, s.size()); + max_len = std::max(max_len, s.size() + 1); VERIFY(max_len > 0, "[HDF5] max_len must be >0 for VEC_STRING"); herr_t status = H5Tset_size(string_t_id, max_len); VERIFY( diff --git a/src/IO/HDF5/HDF5IOHandler.cpp b/src/IO/HDF5/HDF5IOHandler.cpp index db26b2098d..1516571069 100644 --- a/src/IO/HDF5/HDF5IOHandler.cpp +++ b/src/IO/HDF5/HDF5IOHandler.cpp @@ -1621,8 +1621,8 @@ void HDF5IOHandlerImpl::writeAttribute( auto vs = att.get >(); size_t max_len = 0; for (std::string const &s : vs) - max_len = std::max(max_len, s.size()); - std::unique_ptr c_str(new char[max_len * vs.size()]); + max_len = std::max(max_len, s.size() + 1); + std::unique_ptr c_str(new char[max_len * vs.size()]()); for (size_t i = 0; i < vs.size(); ++i) strncpy(c_str.get() + i * max_len, vs[i].c_str(), max_len); status = H5Awrite(attribute_id, dataType, c_str.get()); diff --git a/src/backend/Attributable.cpp b/src/backend/Attributable.cpp index 9010614c75..eb93888718 100644 --- a/src/backend/Attributable.cpp +++ b/src/backend/Attributable.cpp @@ -310,189 +310,99 @@ void Attributable::readAttributes(ReadMode mode) } std::array arr; std::copy_n(vector.begin(), 7, arr.begin()); - setAttributeImpl( - key, - std::move(arr), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(key, std::move(arr)); } else { - setAttributeImpl( - key, - std::move(vector), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(key, std::move(vector)); } }; switch (*aRead.dtype) { case DT::CHAR: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::UCHAR: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::SCHAR: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::SHORT: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::INT: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::LONG: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::LONGLONG: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::USHORT: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::UINT: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::ULONG: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::ULONGLONG: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::FLOAT: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::DOUBLE: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::LONG_DOUBLE: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::CFLOAT: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::CDOUBLE: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::CLONG_DOUBLE: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::STRING: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::VEC_CHAR: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::VEC_SHORT: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::VEC_INT: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::VEC_LONG: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::VEC_LONGLONG: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::VEC_UCHAR: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::VEC_USHORT: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::VEC_UINT: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::VEC_ULONG: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::VEC_ULONGLONG: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::VEC_FLOAT: guardUnitDimension(att, a.get >()); @@ -504,46 +414,26 @@ void Attributable::readAttributes(ReadMode mode) guardUnitDimension(att, a.get >()); break; case DT::VEC_CFLOAT: - setAttributeImpl( - att, - a.get > >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get > >()); break; case DT::VEC_CDOUBLE: - setAttributeImpl( - att, - a.get > >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get > >()); break; case DT::VEC_CLONG_DOUBLE: - setAttributeImpl( - att, - a.get > >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute( + att, a.get > >()); break; case DT::VEC_SCHAR: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::VEC_STRING: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::ARR_DBL_7: - setAttributeImpl( - att, - a.get >(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get >()); break; case DT::BOOL: - setAttributeImpl( - att, - a.get(), - internal::SetAttributeMode::WhileReadingAttributes); + setAttribute(att, a.get()); break; case DT::UNDEFINED: throw error::ReadError( diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index bc2b2c4db7..44039adb66 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -305,7 +305,6 @@ TEST_CASE("hdf5_write_test", "[parallel][hdf5]") Series o = Series("../samples/parallel_write.h5", Access::CREATE, MPI_COMM_WORLD); - REQUIRE_THROWS_AS(o.setAuthor(""), std::runtime_error); o.setAuthor("Parallel HDF5"); ParticleSpecies &e = o.iterations[1].particles["e"]; diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index f4b1b0ebe0..b2490c6803 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -1251,6 +1251,7 @@ inline void dtype_test(const std::string &backend) bool test_long_long = (backend != "json") || sizeof(long long) <= 8; { Series s = Series("../samples/dtype_test." + backend, Access::CREATE); + bool adios1 = s.backend() == "ADIOS1" || s.backend() == "MPI_ADIOS1"; char c = 'c'; s.setAttribute("char", c); @@ -1281,6 +1282,10 @@ inline void dtype_test(const std::string &backend) } std::string str = "string"; s.setAttribute("string", str); + if (!adios1) + { + s.setAttribute("emptyString", ""); + } s.setAttribute("vecChar", std::vector({'c', 'h', 'a', 'r'})); s.setAttribute("vecInt16", std::vector({32766, 32767})); s.setAttribute( @@ -1310,6 +1315,13 @@ inline void dtype_test(const std::string &backend) } s.setAttribute( "vecString", std::vector({"vector", "of", "strings"})); + if (!adios1) + { + s.setAttribute( + "vecEmptyString", std::vector{"", "", ""}); + s.setAttribute( + "vecMixedString", std::vector{"hi", "", "ho"}); + } s.setAttribute("bool", true); s.setAttribute("boolF", false); @@ -1369,6 +1381,7 @@ inline void dtype_test(const std::string &backend) } Series s = Series("../samples/dtype_test." + backend, Access::READ_ONLY); + bool adios1 = s.backend() == "ADIOS1" || s.backend() == "MPI_ADIOS1"; REQUIRE(s.getAttribute("char").get() == 'c'); REQUIRE(s.getAttribute("uchar").get() == 'u'); @@ -1386,6 +1399,10 @@ inline void dtype_test(const std::string &backend) REQUIRE(s.getAttribute("longdouble").get() == 1.e80L); } REQUIRE(s.getAttribute("string").get() == "string"); + if (!adios1) + { + REQUIRE(s.getAttribute("emptyString").get().empty()); + } REQUIRE( s.getAttribute("vecChar").get >() == std::vector({'c', 'h', 'a', 'r'})); @@ -1429,6 +1446,15 @@ inline void dtype_test(const std::string &backend) REQUIRE( s.getAttribute("vecString").get >() == std::vector({"vector", "of", "strings"})); + if (!adios1) + { + REQUIRE( + s.getAttribute("vecEmptyString").get >() == + std::vector({"", "", ""})); + REQUIRE( + s.getAttribute("vecMixedString").get >() == + std::vector({"hi", "", "ho"})); + } REQUIRE(s.getAttribute("bool").get() == true); REQUIRE(s.getAttribute("boolF").get() == false); @@ -2191,7 +2217,6 @@ inline void bool_test(const std::string &backend) { Series o = Series("../samples/serial_bool." + backend, Access::CREATE); - REQUIRE_THROWS_AS(o.setAuthor(""), std::runtime_error); o.setAttribute("Bool attribute true", true); o.setAttribute("Bool attribute false", false); } From 896b66da29b83a99057a3301577b0e1938a0891e Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 17 Jan 2023 14:20:28 -0800 Subject: [PATCH 28/82] CI: Cache Action v3 (#1358) Update the Cache action to the latest version. --- .github/workflows/linux.yml | 12 ++++++------ .github/workflows/tooling.yml | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 09424932dc..d4463cfd5d 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -13,7 +13,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Spack Cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: {path: /opt/spack, key: clang6_nopy_nompi_h5_libcpp_v2 } - name: Install run: | @@ -45,7 +45,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Spack Cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: {path: /opt/spack, key: clang6_nopy_ompi_h5_ad1_ad2_bp3_libcpp_v2 } - name: Install run: | @@ -88,7 +88,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Spack Cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: {path: /opt/spack, key: clang7_nopy_ompi_h5_ad2_v2 } - name: Install run: | @@ -122,7 +122,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Spack Cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: {path: /opt/spack, key: clang14_py311_nompi_h5_ad1_ad2_v2 } - name: Install run: | @@ -158,7 +158,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Spack Cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: {path: /opt/spack, key: clang8_py38_mpich_h5_ad1_ad2_newLayout_v2 } - name: Install run: | @@ -207,7 +207,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Spack Cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: {path: /opt/spack, key: gcc7_py36_ompi_h5_ad1_ad2_v2 } - name: Install run: | diff --git a/.github/workflows/tooling.yml b/.github/workflows/tooling.yml index 65cdc05780..4814afa20e 100644 --- a/.github/workflows/tooling.yml +++ b/.github/workflows/tooling.yml @@ -14,7 +14,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Spack Cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: {path: /opt/spack, key: clangtidy10_nopy_ompi_h5_ad1_ad2 } - name: Install run: | @@ -42,7 +42,7 @@ jobs: steps: - uses: actions/checkout@v2 - name: Spack Cache - uses: actions/cache@v2 + uses: actions/cache@v3 with: {path: /opt/spack, key: clang10_nopy_ompi_h5_ad1_ad2 } - name: Install run: | From 669d16fc75cd2191c0eeed39552071ef49adc111 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 17 Jan 2023 16:21:17 -0600 Subject: [PATCH 29/82] [pre-commit.ci] pre-commit autoupdate (#1361) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pygrep-hooks: v1.9.0 → v1.10.0](https://github.com/pre-commit/pygrep-hooks/compare/v1.9.0...v1.10.0) - [github.com/hadialqattan/pycln: v2.1.2 → v2.1.3](https://github.com/hadialqattan/pycln/compare/v2.1.2...v2.1.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4c90802507..dcedf8ebe9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -35,7 +35,7 @@ repos: # documentation files: .rst - repo: https://github.com/pre-commit/pygrep-hooks - rev: v1.9.0 + rev: v1.10.0 hooks: - id: rst-backticks - id: rst-directive-colons @@ -71,7 +71,7 @@ repos: # Autoremoves unused Python imports - repo: https://github.com/hadialqattan/pycln - rev: v2.1.2 + rev: v2.1.3 hooks: - id: pycln name: pycln (python) From 0ddbbd6e47929c9244baacb1a550e445a46e8fa3 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 25 Jan 2023 09:35:03 -0800 Subject: [PATCH 30/82] [pre-commit.ci] pre-commit autoupdate (#1365) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/Lucas-C/pre-commit-hooks: v1.3.1 → v1.4.1](https://github.com/Lucas-C/pre-commit-hooks/compare/v1.3.1...v1.4.1) - [github.com/pre-commit/mirrors-clang-format: v15.0.6 → v15.0.7](https://github.com/pre-commit/mirrors-clang-format/compare/v15.0.6...v15.0.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index dcedf8ebe9..8d4f2d4004 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -48,7 +48,7 @@ repos: # Changes tabs to spaces - repo: https://github.com/Lucas-C/pre-commit-hooks - rev: v1.3.1 + rev: v1.4.1 hooks: - id: remove-tabs @@ -65,7 +65,7 @@ repos: # clang-format v13 # to run manually, use .github/workflows/clang-format/clang-format.sh - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v15.0.6 + rev: v15.0.7 hooks: - id: clang-format From 6bf4c06cab9f780bc5c2816a4b7987095680e76c Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 25 Jan 2023 09:35:49 -0800 Subject: [PATCH 31/82] CI: actions/checkout@v3 (#1362) Update from outdated v2 --- .github/workflows/intel.yml | 4 ++-- .github/workflows/linux.yml | 18 +++++++++--------- .github/workflows/macos.yml | 2 +- .github/workflows/nvidia.yml | 4 ++-- .github/workflows/source.yml | 8 ++++---- .github/workflows/tooling.yml | 4 ++-- .github/workflows/windows.yml | 4 ++-- 7 files changed, 22 insertions(+), 22 deletions(-) diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index 39a3d51e28..fa2e247153 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install run: | sudo .github/workflows/dependencies/install_icc @@ -39,7 +39,7 @@ jobs: runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install run: | sudo .github/workflows/dependencies/install_icx diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index d4463cfd5d..f19b4788d5 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -11,7 +11,7 @@ jobs: runs-on: ubuntu-18.04 if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Spack Cache uses: actions/cache@v3 with: {path: /opt/spack, key: clang6_nopy_nompi_h5_libcpp_v2 } @@ -43,7 +43,7 @@ jobs: runs-on: ubuntu-18.04 if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Spack Cache uses: actions/cache@v3 with: {path: /opt/spack, key: clang6_nopy_ompi_h5_ad1_ad2_bp3_libcpp_v2 } @@ -86,7 +86,7 @@ jobs: runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Spack Cache uses: actions/cache@v3 with: {path: /opt/spack, key: clang7_nopy_ompi_h5_ad2_v2 } @@ -120,7 +120,7 @@ jobs: runs-on: ubuntu-22.04 if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Spack Cache uses: actions/cache@v3 with: {path: /opt/spack, key: clang14_py311_nompi_h5_ad1_ad2_v2 } @@ -156,7 +156,7 @@ jobs: runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Spack Cache uses: actions/cache@v3 with: {path: /opt/spack, key: clang8_py38_mpich_h5_ad1_ad2_newLayout_v2 } @@ -205,7 +205,7 @@ jobs: runs-on: ubuntu-18.04 if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Spack Cache uses: actions/cache@v3 with: {path: /opt/spack, key: gcc7_py36_ompi_h5_ad1_ad2_v2 } @@ -241,7 +241,7 @@ jobs: runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install run: | sudo apt-get update @@ -265,7 +265,7 @@ jobs: container: image: quay.io/pypa/musllinux_1_1_x86_64 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install run: | apk update @@ -288,7 +288,7 @@ jobs: runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: conda-incubator/setup-miniconda@v2 name: Setup conda with: diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 209dd4e2ab..56603c4c8a 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -16,7 +16,7 @@ jobs: runs-on: macos-latest if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Install run: | rm -rf /usr/local/bin/2to3 diff --git a/.github/workflows/nvidia.yml b/.github/workflows/nvidia.yml index 446872426b..b902949a4c 100644 --- a/.github/workflows/nvidia.yml +++ b/.github/workflows/nvidia.yml @@ -13,7 +13,7 @@ jobs: if: github.event.pull_request.draft == false env: {CXX: nvcc, CXXFLAGS: "--forward-unknown-to-host-compiler -Xcompiler -Werror"} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Dependencies run: .github/workflows/dependencies/install_nvcc11.sh - name: Build & Install @@ -39,7 +39,7 @@ jobs: # line 4314: error: variable "::autoRegistrar73" was declared but never referenced # env: {CXXFLAGS: "-Werror -Wno-deprecated-declarations"} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Dependencies run: .github/workflows/dependencies/install_nvhpc21-11.sh - name: Build & Install diff --git a/.github/workflows/source.yml b/.github/workflows/source.yml index 2e7f0a9b9a..4a76960656 100644 --- a/.github/workflows/source.yml +++ b/.github/workflows/source.yml @@ -10,7 +10,7 @@ jobs: style: runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Non-ASCII Characters run: .github/workflows/source/hasNonASCII - name: TABs @@ -26,7 +26,7 @@ jobs: runs-on: ubuntu-22.04 if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: pyflakes run: | python3 -m pip install -U pyflakes @@ -35,7 +35,7 @@ jobs: documentation: runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: s-weigand/setup-conda@v1.1.1 with: update-conda: true @@ -49,7 +49,7 @@ jobs: runs-on: ubuntu-latest if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: urls-checker uses: urlstechie/urlchecker-action@master with: diff --git a/.github/workflows/tooling.yml b/.github/workflows/tooling.yml index 4814afa20e..9b46734578 100644 --- a/.github/workflows/tooling.yml +++ b/.github/workflows/tooling.yml @@ -12,7 +12,7 @@ jobs: runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Spack Cache uses: actions/cache@v3 with: {path: /opt/spack, key: clangtidy10_nopy_ompi_h5_ad1_ad2 } @@ -40,7 +40,7 @@ jobs: runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Spack Cache uses: actions/cache@v3 with: {path: /opt/spack, key: clang10_nopy_ompi_h5_ad1_ad2 } diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index e3eb4c671a..e64249bf5b 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -12,7 +12,7 @@ jobs: runs-on: windows-latest if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - name: Build & Install run: | python3.exe -m pip install --upgrade pip @@ -34,7 +34,7 @@ jobs: runs-on: windows-2019 if: github.event.pull_request.draft == false steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: seanmiddleditch/gha-setup-ninja@master - name: Build & Install shell: cmd From 4acd768b82d8a712923da72b78d7e90ee7d46f08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Wed, 25 Jan 2023 18:48:30 +0100 Subject: [PATCH 32/82] Add error/warning for changing datatype (#1356) --- src/IO/ADIOS/ADIOS2IOHandler.cpp | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/src/IO/ADIOS/ADIOS2IOHandler.cpp b/src/IO/ADIOS/ADIOS2IOHandler.cpp index d768ec5b6b..2d7713a54e 100644 --- a/src/IO/ADIOS/ADIOS2IOHandler.cpp +++ b/src/IO/ADIOS/ADIOS2IOHandler.cpp @@ -1745,6 +1745,29 @@ namespace detail } else if (attributeModifiable()) { + if (detail::fromADIOS2Type(t) != + basicDatatype(determineDatatype())) + { + if (impl->m_engineType == "bp5") + { + throw error::OperationUnsupportedInBackend( + "ADIOS2", + "Attempting to change datatype of attribute '" + + fullName + + "'. In the BP5 engine, this will lead to " + "corrupted " + "datasets."); + } + else + { + std::cerr << "[ADIOS2] Attempting to change datatype " + "of attribute '" + << fullName + << "'. This invokes undefined behavior. Will " + "proceed." + << std::endl; + } + } IO.RemoveAttribute(fullName); } else From 5a7b78e9009bf6e51c4282a8669a8ef3d1649249 Mon Sep 17 00:00:00 2001 From: Junmin Gu Date: Wed, 25 Jan 2023 12:36:47 -0800 Subject: [PATCH 33/82] Parallel Write Benchmark: Fix 1D (#1359) * fixed a bug in 1D test cases * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Update 8a_benchmark_write_parallel.cpp Removed commented code * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- examples/8a_benchmark_write_parallel.cpp | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/examples/8a_benchmark_write_parallel.cpp b/examples/8a_benchmark_write_parallel.cpp index f28676e143..82c32ce73a 100644 --- a/examples/8a_benchmark_write_parallel.cpp +++ b/examples/8a_benchmark_write_parallel.cpp @@ -701,6 +701,7 @@ void AbstractPattern::run() store(series, step); } } + return; } { // group/var based @@ -1004,22 +1005,19 @@ bool OneDimPattern::setLayOut(int step) return true; auto numPartition = m_Input.GetSeg(); - if (unitCount < numPartition) - numPartition = unitCount; - auto avg = unitCount / numPartition; - for (unsigned int i = 0; i < numPartition; i++) + if (1 == numPartition) { Offset offset = {unitOffset * m_MinBlock[0]}; - if (i < (numPartition - 1)) - { - Extent count = {avg * m_MinBlock[0]}; - m_InRankMeshLayout.emplace_back(offset, count); - } - else + Extent count = {unitCount * m_MinBlock[0]}; + m_InRankMeshLayout.emplace_back(offset, count); + } + else + { + Extent count = {m_MinBlock[0]}; + for (unsigned long i = 0; i < unitCount; i++) { - auto res = unitCount - avg * (numPartition - 1); - Extent count = {res * m_MinBlock[0]}; + Offset offset = {(unitOffset + i) * m_MinBlock[0]}; m_InRankMeshLayout.emplace_back(offset, count); } } From ccc03ad11c21811c641950159c74d0922bded27d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 2 Feb 2023 10:05:31 -0800 Subject: [PATCH 34/82] [pre-commit.ci] pre-commit autoupdate (#1366) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/Lucas-C/pre-commit-hooks: v1.4.1 → v1.4.2](https://github.com/Lucas-C/pre-commit-hooks/compare/v1.4.1...v1.4.2) - [github.com/pycqa/isort: 5.11.4 → 5.12.0](https://github.com/pycqa/isort/compare/5.11.4...5.12.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8d4f2d4004..bc2cefbda7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -48,7 +48,7 @@ repos: # Changes tabs to spaces - repo: https://github.com/Lucas-C/pre-commit-hooks - rev: v1.4.1 + rev: v1.4.2 hooks: - id: remove-tabs @@ -79,7 +79,7 @@ repos: # Sorts Python imports according to PEP8 # https://www.python.org/dev/peps/pep-0008/#imports - repo: https://github.com/pycqa/isort - rev: 5.11.4 + rev: 5.12.0 hooks: - id: isort name: isort (python) From e5353045e514bfb7494aba88bb268ea7018e2c9e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Thu, 2 Feb 2023 20:12:22 +0100 Subject: [PATCH 35/82] Iteration.time and Iteration.dt: Prefer double (#1369) --- src/binding/python/Iteration.cpp | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/src/binding/python/Iteration.cpp b/src/binding/python/Iteration.cpp index 480de17e7e..98f0f7c87f 100644 --- a/src/binding/python/Iteration.cpp +++ b/src/binding/python/Iteration.cpp @@ -44,30 +44,31 @@ void init_Iteration(py::module &m) return ss.str(); }) + /* + * Purposefully only using setTime and setDt here. + * Python does not let you select the overload anyway and uses the one + * that was last defined in Pybind. + * So, set a sensible default: double, since long double is not + * cross-platform compatible. + */ .def_property( - "time", &Iteration::time, &Iteration::setTime) + "time", &Iteration::time, &Iteration::setTime) .def_property( "time", &Iteration::time, &Iteration::setTime) .def_property( - "time", - &Iteration::time, - &Iteration::setTime) - .def_property("dt", &Iteration::dt, &Iteration::setDt) + "time", &Iteration::time, &Iteration::setTime) + .def_property("dt", &Iteration::dt, &Iteration::setDt) .def_property("dt", &Iteration::dt, &Iteration::setDt) .def_property( - "dt", &Iteration::dt, &Iteration::setDt) + "dt", &Iteration::dt, &Iteration::setDt) .def_property( "time_unit_SI", &Iteration::timeUnitSI, &Iteration::setTimeUnitSI) .def("open", &Iteration::open) .def("close", &Iteration::close, py::arg("flush") = true) // TODO remove in future versions (deprecated) - .def("set_time", &Iteration::setTime) .def("set_time", &Iteration::setTime) - .def("set_time", &Iteration::setTime) - .def("set_dt", &Iteration::setDt) .def("set_dt", &Iteration::setDt) - .def("set_dt", &Iteration::setDt) .def("set_time_unit_SI", &Iteration::setTimeUnitSI) .def_readwrite( From 24f9a90c3c62862e0f1e9d0b469e3738c57f9a34 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 10 Feb 2023 17:53:17 -0800 Subject: [PATCH 36/82] LoadChunk: Better Error Message (Type) (#1373) Give a better error message on type mismatches during `loadChunk(...)`. --- include/openPMD/RecordComponent.tpp | 9 +++++++-- test/CoreTest.cpp | 8 ++++++-- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/include/openPMD/RecordComponent.tpp b/include/openPMD/RecordComponent.tpp index 671932e470..8e6d2c8775 100644 --- a/include/openPMD/RecordComponent.tpp +++ b/include/openPMD/RecordComponent.tpp @@ -101,8 +101,13 @@ inline void RecordComponent::loadChunk( if( !isSameInteger< T >( getDatatype() ) && !isSameFloatingPoint< T >( getDatatype() ) && !isSameComplexFloatingPoint< T >( getDatatype() ) ) - throw std::runtime_error( - "Type conversion during chunk loading not yet implemented" ); + { + std::string const data_type_str = datatypeToString(getDatatype()); + std::string const requ_type_str = datatypeToString(determineDatatype()); + std::string err_msg = "Type conversion during chunk loading not yet implemented! "; + err_msg += "Data: " + data_type_str + "; Load as: " + requ_type_str; + throw std::runtime_error( err_msg ); + } uint8_t dim = getDimensionality(); diff --git a/test/CoreTest.cpp b/test/CoreTest.cpp index 96aaebde9b..821c29b614 100644 --- a/test/CoreTest.cpp +++ b/test/CoreTest.cpp @@ -1230,12 +1230,16 @@ TEST_CASE("load_chunk_wrong_datatype", "[core]") } { Series read("../samples/some_float_value.json", Access::READ_ONLY); + + std::string const err_msg = + "Type conversion during chunk loading not yet implemented! " + "Data: FLOAT; Load as: DOUBLE"; + REQUIRE_THROWS_WITH( read.iterations[0] .meshes["rho"][RecordComponent::SCALAR] .loadChunk({0}, {10}), - Catch::Equals( - "Type conversion during chunk loading not yet implemented")); + Catch::Equals(err_msg)); } } From 289902cd5f9c220e1c81fd4fef078d5a052930dd Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 10 Feb 2023 17:53:33 -0800 Subject: [PATCH 37/82] Pre-Commit: Smaller Files & NB (#1370) - Reduce threshold of "large file" from 50KB to 80B. - Remove Jupyter notebook output if present. --- .pre-commit-config.yaml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bc2cefbda7..d66a4662f6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -29,6 +29,7 @@ repos: - id: check-yaml args: [--allow-multiple-documents] - id: check-added-large-files + args: ['--maxkb=80'] - id: requirements-txt-fixer # - id: fix-encoding-pragma # exclude: ^noxfile.py$ @@ -107,6 +108,16 @@ repos: # additional_dependencies: # - black==21.10b0 # keep in sync with black hook +# Jupyter Notebooks: clean up all cell outputs +- repo: https://github.com/roy-ht/pre-commit-jupyter + rev: v1.2.1 + hooks: + - id: jupyter-notebook-cleanup + args: + - --pin-patterns + - "[pin];[donotremove]" + # - --remove-kernel-metadata + # Checks the manifest for missing files (native support) - repo: https://github.com/mgedmin/check-manifest rev: "0.49" From 71b3e941ee54fca41339ffb12b195a750e129b0c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Mon, 13 Feb 2023 19:55:40 +0100 Subject: [PATCH 38/82] New Access type READ_LINEAR (#1291) * Prepare scaffolding for Access::ReadLinearly Use access::readOnly for runtime checks in backends * Adapt frontend to linear read mode * Make OPEN_FILE task parameters writable Needed so backends can give feedback on their preferred parse mode. * ADIOS2 implementation * Necessary fixes for tests Commit with proper tests will follow later, this is just to highlight the API changes brought forth by this PR (API changes only for things that were dev and/or experimental) * Add and adapt Python bindings * Testing * Documentation: C++ and Python * rst documentation * Make SeriesIterator into a handle * Reading attributes without needing to open the first step * Don't write attributes too early * Delete old iterations in READ_LINEAR mode * DEREGISTER task --- docs/source/dev/design.rst | 2 +- docs/source/usage/streaming.rst | 2 + docs/source/usage/workflow.rst | 29 +- examples/10_streaming_read.cpp | 2 +- examples/10_streaming_read.py | 2 +- include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp | 74 +- .../IO/ADIOS/CommonADIOS1IOHandler.hpp | 4 +- include/openPMD/IO/AbstractIOHandlerImpl.hpp | 19 +- include/openPMD/IO/Access.hpp | 97 ++- include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp | 4 +- include/openPMD/IO/IOTask.hpp | 53 +- include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp | 5 +- include/openPMD/Iteration.hpp | 8 +- include/openPMD/ReadIterations.hpp | 48 +- include/openPMD/Series.hpp | 15 +- include/openPMD/backend/ParsePreference.hpp | 31 + include/openPMD/backend/Writable.hpp | 2 +- src/IO/ADIOS/ADIOS1IOHandler.cpp | 57 +- src/IO/ADIOS/ADIOS2IOHandler.cpp | 630 ++++++++++++++---- src/IO/ADIOS/CommonADIOS1IOHandler.cpp | 31 +- src/IO/ADIOS/ParallelADIOS1IOHandler.cpp | 55 +- src/IO/HDF5/HDF5IOHandler.cpp | 38 +- src/IO/JSON/JSONIOHandlerImpl.cpp | 38 +- src/Iteration.cpp | 69 +- src/Mesh.cpp | 11 +- src/ParticleSpecies.cpp | 11 +- src/ReadIterations.cpp | 286 ++++++-- src/Record.cpp | 11 +- src/RecordComponent.cpp | 12 +- src/Series.cpp | 96 ++- src/backend/PatchRecordComponent.cpp | 11 +- src/backend/Writable.cpp | 15 + src/binding/python/Access.cpp | 66 +- .../python/openpmd_api/pipe/__main__.py | 4 +- test/ParallelIOTest.cpp | 275 +++++++- test/SerialIOTest.cpp | 350 ++++++---- 36 files changed, 1823 insertions(+), 640 deletions(-) create mode 100644 include/openPMD/backend/ParsePreference.hpp mode change 100755 => 100644 src/binding/python/openpmd_api/pipe/__main__.py diff --git a/docs/source/dev/design.rst b/docs/source/dev/design.rst index e01816b612..ce43777407 100644 --- a/docs/source/dev/design.rst +++ b/docs/source/dev/design.rst @@ -23,7 +23,7 @@ Therefore, enabling users to handle hierarchical, self-describing file formats w .. literalinclude:: IOTask.hpp :language: cpp - :lines: 44-62 + :lines: 48-78 Every task is designed to be a fully self-contained description of one such atomic operation. By describing a required minimal step of work (without any side-effect), these operations are the foundation of the unified handling mechanism across suitable file formats. The actual low-level exchange of data is implemented in ``IOHandlers``, one per file format (possibly two if handlingi MPI-parallel work is possible and requires different behaviour). diff --git a/docs/source/usage/streaming.rst b/docs/source/usage/streaming.rst index c72c1121db..d70b929389 100644 --- a/docs/source/usage/streaming.rst +++ b/docs/source/usage/streaming.rst @@ -22,6 +22,7 @@ C++ ^^^ The reading end of the streaming API is activated through use of ``Series::readIterations()`` instead of accessing the field ``Series::iterations`` directly. +Use of ``Access::READ_LINEAR`` mode is recommended. The returned object of type ``ReadIterations`` can be used in a C++11 range-based for loop to iterate over objects of type ``IndexedIteration``. This class extends the ``Iteration`` class with a field ``IndexedIteration::iterationIndex``, denoting this iteration's index. @@ -40,6 +41,7 @@ Python ^^^^^^ The reading end of the streaming API is activated through use of ``Series.read_iterations()`` instead of accessing the field ``Series.iterations`` directly. +Use of ``Access.read_linear`` mode is recommended. The returned object of type ``ReadIterations`` can be used in a Python range-based for loop to iterate over objects of type ``IndexedIteration``. This class extends the ``Iteration`` class with a field ``IndexedIteration.iteration_index``, denoting this iteration's index. diff --git a/docs/source/usage/workflow.rst b/docs/source/usage/workflow.rst index ac5398b4cf..64194629ba 100644 --- a/docs/source/usage/workflow.rst +++ b/docs/source/usage/workflow.rst @@ -7,14 +7,41 @@ The openPMD-api distinguishes between a number of different access modes: * **Create mode**: Used for creating a new Series from scratch. Any file possibly existing in the specified location will be overwritten. -* **Read-only mode**: Used for reading from an existing Series. +* Two distinct read modes: **Read-random-access mode** and **Read-linear mode**. + (Specification of **Read-only mode** is equivalent to read-random-access mode.) + Both modes are used for reading from an existing Series. No modifications will be made. + + Differences between both modes: + + * When intending to use ``Series::readIterations()`` (i.e. step-by-step reading of iterations, e.g. in streaming), then **linear read mode** is preferred and always supported. + Data is parsed inside ``Series::readIterations()``, no data is available right after opening the Series. + Global attributes are available directly after calling ``Series::readIterations()``, Iterations and all their corresponding data become available by use of the returned Iterator, e.g. in a foreach loop. + * Otherwise (i.e. for random-access workflows), **random-access read mode** is required, but works only in backends that support random access. + Data is parsed and available right after opening the Series. + + In both modes, parsing of iterations can be deferred with the JSON/TOML option ``defer_iteration_parsing``. + + Detailed rules: + + 1. In backends that have no notion of IO steps (all except ADIOS2), *random-access read mode* can always be used. + 2. In backends that can be accessed either in random-access or step-by-step, the chosen access mode decides which approach is used. + Examples are the BP4 and BP5 engines of ADIOS2. + 3. In streaming backends, random-access is not possible. + When using such a backend, the access mode will be coerced automatically to *linear read mode*. + Use of Series::readIterations() is mandatory for access. + 4. Reading a variable-based Series is only fully supported with *linear access mode*. + If using *random-access read mode*, the dataset will be considered to only have one single step. + If the dataset only has one single step, this is guaranteed to work as expected. + Otherwise, it is undefined which step's data is returned. + * **Read/Write mode**: Creates a new Series if not existing, otherwise opens an existing Series for reading and writing. New datasets and iterations will be inserted as needed. Not fully supported by all backends: * ADIOS1: Automatically coerced to *Create* mode if the file does not exist yet and to *Read-only* mode if it exists. * ADIOS2: Automatically coerced to *Create* mode if the file does not exist yet and to *Read-only* mode if it exists. + Since this happens on a per-file level, this mode allows to read from existing iterations and write to new iterations at the same time in file-based iteration encoding. * **Append mode**: Restricted mode for appending new iterations to an existing Series that is supported by all backends at least in file-based iteration encoding, and by all but ADIOS1 in other encodings. The API is equivalent to that of the *Create* mode, meaning that no reading is supported whatsoever. diff --git a/examples/10_streaming_read.cpp b/examples/10_streaming_read.cpp index e271dd4393..a7e503a055 100644 --- a/examples/10_streaming_read.cpp +++ b/examples/10_streaming_read.cpp @@ -19,7 +19,7 @@ int main() return 0; } - Series series = Series("electrons.sst", Access::READ_ONLY); + Series series = Series("electrons.sst", Access::READ_LINEAR); for (IndexedIteration iteration : series.readIterations()) { diff --git a/examples/10_streaming_read.py b/examples/10_streaming_read.py index d6f7a6e67b..4cd29c46b2 100755 --- a/examples/10_streaming_read.py +++ b/examples/10_streaming_read.py @@ -17,7 +17,7 @@ print("SST engine not available in ADIOS2.") sys.exit(0) - series = io.Series("simData.sst", io.Access_Type.read_only, + series = io.Series("simData.sst", io.Access_Type.read_linear, json.dumps(config)) # Read all available iterations and print electron position data. diff --git a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp index 7c0dd1a44e..309648b782 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp @@ -164,7 +164,7 @@ class ADIOS2IOHandlerImpl void extendDataset( Writable *, Parameter const &) override; - void openFile(Writable *, Parameter const &) override; + void openFile(Writable *, Parameter &) override; void closeFile(Writable *, Parameter const &) override; @@ -213,6 +213,10 @@ class ADIOS2IOHandlerImpl void availableChunks( Writable *, Parameter &) override; + + void + deregister(Writable *, Parameter const &) override; + /** * @brief The ADIOS2 access type to chose for Engines opened * within this instance. @@ -248,7 +252,10 @@ class ADIOS2IOHandlerImpl */ std::string m_userSpecifiedExtension; - ADIOS2Schema::schema_t m_schema = ADIOS2Schema::schema_0000_00_00; + /* + * Empty option: No schema has been explicitly selected, use default. + */ + std::optional m_schema; enum class UseSpan : char { @@ -267,7 +274,11 @@ class ADIOS2IOHandlerImpl inline SupportedSchema schema() const { - switch (m_schema) + if (!m_schema.has_value()) + { + return SupportedSchema::s_0000_00_00; + } + switch (m_schema.value()) { case ADIOS2Schema::schema_0000_00_00: return SupportedSchema::s_0000_00_00; @@ -276,7 +287,7 @@ class ADIOS2IOHandlerImpl default: throw std::runtime_error( "[ADIOS2] Encountered unsupported schema version: " + - std::to_string(m_schema)); + std::to_string(m_schema.value())); } } @@ -331,11 +342,11 @@ class ADIOS2IOHandlerImpl * @return first parameter: the operators, second parameters: whether * operators have been configured */ - std::optional > + std::optional> getOperators(json::TracingJSON config); // use m_config - std::optional > getOperators(); + std::optional> getOperators(); std::string fileSuffix(bool verbose = true) const; @@ -361,7 +372,7 @@ class ADIOS2IOHandlerImpl */ std::unordered_map< InvalidatableFile, - std::unique_ptr > + std::unique_ptr> m_fileData; std::map m_operators; @@ -455,8 +466,8 @@ namespace detail template inline constexpr bool IsUnsupportedComplex_v = - std::is_same_v > || - std::is_same_v > >; + std::is_same_v> || + std::is_same_v>>; struct DatasetReader { @@ -581,7 +592,8 @@ namespace detail Parameter ¶ms, adios2::IO &IO, adios2::Engine &engine, - std::string const &varName); + std::string const &varName, + bool allSteps); template static void call(Params &&...); @@ -630,7 +642,7 @@ namespace detail }; template <> - struct AttributeTypes > + struct AttributeTypes> { static void createAttribute( adios2::IO &, @@ -663,13 +675,13 @@ namespace detail }; template <> - struct AttributeTypes > > + struct AttributeTypes>> { static void createAttribute( adios2::IO &, adios2::Engine &, detail::BufferedAttributeWrite &, - const std::vector > &) + const std::vector> &) { throw std::runtime_error( "[ADIOS2] Internal error: no support for long double complex " @@ -687,7 +699,7 @@ namespace detail } static bool attributeUnchanged( - adios2::IO &, std::string, std::vector >) + adios2::IO &, std::string, std::vector>) { throw std::runtime_error( "[ADIOS2] Internal error: no support for long double complex " @@ -696,7 +708,7 @@ namespace detail }; template - struct AttributeTypes > + struct AttributeTypes> { static void createAttribute( adios2::IO &IO, @@ -734,7 +746,7 @@ namespace detail }; template <> - struct AttributeTypes > + struct AttributeTypes> { static void createAttribute( adios2::IO &IO, @@ -772,7 +784,7 @@ namespace detail }; template - struct AttributeTypes > + struct AttributeTypes> { static void createAttribute( adios2::IO &IO, @@ -986,14 +998,14 @@ namespace detail * Hence, next to the actual file name, also store the name for the * IO. */ - std::string const m_IOName; + std::string m_IOName; adios2::ADIOS &m_ADIOS; adios2::IO m_IO; /** * The default queue for deferred actions. * Drained upon BufferedActions::flush(). */ - std::vector > m_buffer; + std::vector> m_buffer; /** * Buffer for attributes to be written in the new (variable-based) * attribute layout. @@ -1017,7 +1029,7 @@ namespace detail * We must store them somewhere until the next PerformPuts/Gets, EndStep * or Close in ADIOS2 to avoid use after free conditions. */ - std::vector > m_alreadyEnqueued; + std::vector> m_alreadyEnqueued; adios2::Mode m_mode; /** * The base pointer of an ADIOS2 span might change after reallocations. @@ -1027,7 +1039,7 @@ namespace detail * retrieval of the updated base pointer. * This map is cleared upon flush points. */ - std::map > m_updateSpans; + std::map> m_updateSpans; PreloadAdiosAttributes preloadAttributes; /* @@ -1049,6 +1061,10 @@ namespace detail */ bool optimizeAttributesStreaming = false; + using ParsePreference = + Parameter::ParsePreference; + ParsePreference parsePreference = ParsePreference::UpFront; + using AttributeMap_t = std::map; BufferedActions(ADIOS2IOHandlerImpl &impl, InvalidatableFile file); @@ -1257,13 +1273,6 @@ namespace detail */ std::string m_engineType; - /** - * See documentation for StreamStatus::Parsing. - * Will be set true under the circumstance described there in order to - * indicate that the first step should only be opened after parsing. - */ - bool delayOpeningTheFirstStep = false; - /* * ADIOS2 does not give direct access to its internal attribute and * variable maps, but will instead give access to copies of them. @@ -1279,6 +1288,11 @@ namespace detail std::optional m_availableAttributes; std::optional m_availableVariables; + /* + * Cannot write attributes right after opening the engine + * https://github.com/ornladios/ADIOS2/issues/3433 + */ + bool initializedDefaults = false; /* * finalize() will set this true to avoid running twice. */ @@ -1289,7 +1303,11 @@ namespace detail return m_impl->schema(); } + void create_IO(); + void configure_IO(ADIOS2IOHandlerImpl &impl); + void configure_IO_Read(std::optional userSpecifiedUsesteps); + void configure_IO_Write(std::optional userSpecifiedUsesteps); using AttributeLayout = ADIOS2IOHandlerImpl::AttributeLayout; inline AttributeLayout attributeLayout() const diff --git a/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp b/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp index 9f43b9ae18..9fbe0e0e87 100644 --- a/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp @@ -59,7 +59,7 @@ class CommonADIOS1IOHandlerImpl : public AbstractIOHandlerImpl Writable *, Parameter const &) override; void extendDataset( Writable *, Parameter const &) override; - void openFile(Writable *, Parameter const &) override; + void openFile(Writable *, Parameter &) override; void closeFile(Writable *, Parameter const &) override; void availableChunks( @@ -84,6 +84,8 @@ class CommonADIOS1IOHandlerImpl : public AbstractIOHandlerImpl void listDatasets(Writable *, Parameter &) override; void listAttributes(Writable *, Parameter &) override; + void + deregister(Writable *, Parameter const &) override; void close(int64_t); void close(ADIOS_FILE *); diff --git a/include/openPMD/IO/AbstractIOHandlerImpl.hpp b/include/openPMD/IO/AbstractIOHandlerImpl.hpp index 79e9b35739..925c5f83a0 100644 --- a/include/openPMD/IO/AbstractIOHandlerImpl.hpp +++ b/include/openPMD/IO/AbstractIOHandlerImpl.hpp @@ -202,6 +202,12 @@ class AbstractIOHandlerImpl deref_dynamic_cast >( i.parameter.get())); break; + case O::DEREGISTER: + deregister( + i.writable, + deref_dynamic_cast >( + i.parameter.get())); + break; } } catch (...) @@ -354,8 +360,7 @@ class AbstractIOHandlerImpl * root group "/" of the hierarchy in the opened file. The Writable should * be marked written when the operation completes successfully. */ - virtual void - openFile(Writable *, Parameter const &) = 0; + virtual void openFile(Writable *, Parameter &) = 0; /** Open all contained groups in a path, possibly recursively. * * The operation should overwrite existing file positions, even when the @@ -567,6 +572,16 @@ class AbstractIOHandlerImpl void keepSynchronous(Writable *, Parameter param); + /** Notify the backend that the Writable has been / will be deallocated. + * + * The backend should remove all references to this Writable from internal + * data structures. Subtle bugs might be possible if not doing this, since + * new objects might be allocated to the now-freed address. + * The Writable pointer must not be dereferenced. + */ + virtual void + deregister(Writable *, Parameter const ¶m) = 0; + AbstractIOHandler *m_handler; }; // AbstractIOHandlerImpl } // namespace openPMD diff --git a/include/openPMD/IO/Access.hpp b/include/openPMD/IO/Access.hpp index 2b5d37f260..eddd9f40e9 100644 --- a/include/openPMD/IO/Access.hpp +++ b/include/openPMD/IO/Access.hpp @@ -20,18 +20,111 @@ */ #pragma once +#include + namespace openPMD { /** File access mode to use during IO. */ enum class Access { - READ_ONLY, //!< open series as read-only, fails if series is not found - READ_WRITE, //!< open existing series as writable + /** + * Open Series as read-only, fails if Series is not found. + * When to use READ_ONLY or READ_LINEAR: + * + * * When intending to use Series::readIterations() + * (i.e. step-by-step reading of iterations, e.g. in streaming), + * then Access::READ_LINEAR is preferred and always supported. + * Data is parsed inside Series::readIterations(), no data is available + * right after opening the Series. + * * Otherwise (i.e. for random-access workflows), Access::READ_ONLY + * is required, but works only in backends that support random access. + * Data is parsed and available right after opening the Series. + * + * In both modes, parsing of iterations can be deferred with the JSON/TOML + * option `defer_iteration_parsing`. + * + * Detailed rules: + * + * 1. In backends that have no notion of IO steps (all except ADIOS2), + * Access::READ_ONLY can always be used. + * 2. In backends that can be accessed either in random-access or + * step-by-step, the chosen access mode decides which approach is used. + * Examples are the BP4 and BP5 engines of ADIOS2. + * 3. In streaming backends, random-access is not possible. + * When using such a backend, the access mode will be coerced + * automatically to Access::READ_LINEAR. Use of Series::readIterations() + * is mandatory for access. + * 4. Reading a variable-based Series is only fully supported with + * Access::READ_LINEAR. + * If using Access::READ_ONLY, the dataset will be considered to only + * have one single step. + * If the dataset only has one single step, this is guaranteed to work + * as expected. Otherwise, it is undefined which step's data is returned. + */ + READ_ONLY, + READ_RANDOM_ACCESS = READ_ONLY, //!< more explicit alias for READ_ONLY + /* + * Open Series as read-only, fails if Series is not found. + * This access mode requires use of Series::readIterations(). + * Global attributes are available directly after calling + * Series::readIterations(), Iterations and all their corresponding data + * become available by use of the returned Iterator, e.g. in a foreach loop. + * See Access::READ_ONLY for when to use this. + */ + READ_LINEAR, + /** + * Open existing Series as writable. + * Read mode corresponds with Access::READ_RANDOM_ACCESS. + */ + READ_WRITE, CREATE, //!< create new series and truncate existing (files) APPEND //!< write new iterations to an existing series without reading }; // Access +namespace access +{ + inline bool readOnly(Access access) + { + switch (access) + { + case Access::READ_LINEAR: + case Access::READ_ONLY: + return true; + case Access::READ_WRITE: + case Access::CREATE: + case Access::APPEND: + return false; + } + throw std::runtime_error("Unreachable!"); + } + + inline bool write(Access access) + { + return !readOnly(access); + } + + inline bool writeOnly(Access access) + { + switch (access) + { + case Access::READ_LINEAR: + case Access::READ_ONLY: + case Access::READ_WRITE: + return false; + case Access::CREATE: + case Access::APPEND: + return true; + } + throw std::runtime_error("Unreachable!"); + } + + inline bool read(Access access) + { + return !writeOnly(access); + } +} // namespace access + // deprecated name (used prior to 0.12.0) // note: "using old [[deprecated(msg)]] = new;" is still badly supported, thus // using typedef diff --git a/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp b/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp index 7502e36e3f..d8d564c281 100644 --- a/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp +++ b/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp @@ -52,7 +52,7 @@ class HDF5IOHandlerImpl : public AbstractIOHandlerImpl Writable *, Parameter const &) override; void availableChunks( Writable *, Parameter &) override; - void openFile(Writable *, Parameter const &) override; + void openFile(Writable *, Parameter &) override; void closeFile(Writable *, Parameter const &) override; void openPath(Writable *, Parameter const &) override; @@ -75,6 +75,8 @@ class HDF5IOHandlerImpl : public AbstractIOHandlerImpl void listDatasets(Writable *, Parameter &) override; void listAttributes(Writable *, Parameter &) override; + void + deregister(Writable *, Parameter const &) override; std::unordered_map m_fileNames; std::unordered_map m_fileNamesWithID; diff --git a/include/openPMD/IO/IOTask.hpp b/include/openPMD/IO/IOTask.hpp index 88c7d0380b..cf2cf520e5 100644 --- a/include/openPMD/IO/IOTask.hpp +++ b/include/openPMD/IO/IOTask.hpp @@ -27,6 +27,7 @@ #include "openPMD/auxiliary/Export.hpp" #include "openPMD/auxiliary/Variant.hpp" #include "openPMD/backend/Attribute.hpp" +#include "openPMD/backend/ParsePreference.hpp" #include #include @@ -44,21 +45,37 @@ Writable *getWritable(Attributable *); /** Type of IO operation between logical and persistent data. */ OPENPMDAPI_EXPORT_ENUM_CLASS(Operation){ - CREATE_FILE, CHECK_FILE, OPEN_FILE, CLOSE_FILE, + CREATE_FILE, + CHECK_FILE, + OPEN_FILE, + CLOSE_FILE, DELETE_FILE, - CREATE_PATH, CLOSE_PATH, OPEN_PATH, DELETE_PATH, + CREATE_PATH, + CLOSE_PATH, + OPEN_PATH, + DELETE_PATH, LIST_PATHS, - CREATE_DATASET, EXTEND_DATASET, OPEN_DATASET, DELETE_DATASET, - WRITE_DATASET, READ_DATASET, LIST_DATASETS, GET_BUFFER_VIEW, + CREATE_DATASET, + EXTEND_DATASET, + OPEN_DATASET, + DELETE_DATASET, + WRITE_DATASET, + READ_DATASET, + LIST_DATASETS, + GET_BUFFER_VIEW, - DELETE_ATT, WRITE_ATT, READ_ATT, LIST_ATTS, + DELETE_ATT, + WRITE_ATT, + READ_ATT, + LIST_ATTS, ADVANCE, AVAILABLE_CHUNKS, //!< Query chunks that can be loaded in a dataset - KEEP_SYNCHRONOUS //!< Keep two items in the object model synchronous with - //!< each other + KEEP_SYNCHRONOUS, //!< Keep two items in the object model synchronous with + //!< each other + DEREGISTER //!< Inform the backend that an object has been deleted. }; // note: if you change the enum members here, please update // docs/source/dev/design.rst @@ -151,7 +168,10 @@ struct OPENPMDAPI_EXPORT Parameter { Parameter() = default; Parameter(Parameter const &p) - : AbstractParameter(), name(p.name), encoding(p.encoding) + : AbstractParameter() + , name(p.name) + , encoding(p.encoding) + , out_parsePreference(p.out_parsePreference) {} std::unique_ptr clone() const override @@ -167,6 +187,9 @@ struct OPENPMDAPI_EXPORT Parameter * variableBased encoding. */ IterationEncoding encoding = IterationEncoding::groupBased; + using ParsePreference = internal::ParsePreference; + std::shared_ptr out_parsePreference = + std::make_shared(ParsePreference::UpFront); }; template <> @@ -673,6 +696,20 @@ struct OPENPMDAPI_EXPORT Parameter Writable *otherWritable; }; +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter +{ + Parameter() = default; + Parameter(Parameter const &) : AbstractParameter() + {} + + std::unique_ptr clone() const override + { + return std::make_unique>(*this); + } +}; + /** @brief Self-contained description of a single IO operation. * * Contained are diff --git a/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp b/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp index 738891f33e..4c68004bc7 100644 --- a/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp +++ b/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp @@ -174,7 +174,7 @@ class JSONIOHandlerImpl : public AbstractIOHandlerImpl void availableChunks( Writable *, Parameter &) override; - void openFile(Writable *, Parameter const &) override; + void openFile(Writable *, Parameter &) override; void closeFile(Writable *, Parameter const &) override; @@ -212,6 +212,9 @@ class JSONIOHandlerImpl : public AbstractIOHandlerImpl void listAttributes(Writable *, Parameter &) override; + void + deregister(Writable *, Parameter const &) override; + std::future flush(); private: diff --git a/include/openPMD/Iteration.hpp b/include/openPMD/Iteration.hpp index 179ade29fe..3c7fffc545 100644 --- a/include/openPMD/Iteration.hpp +++ b/include/openPMD/Iteration.hpp @@ -31,6 +31,7 @@ #include #include #include +#include #include namespace openPMD @@ -338,8 +339,11 @@ class Iteration : public Attributable * Useful in group-based iteration encoding where the Iteration will only * be known after opening the step. */ - static BeginStepStatus - beginStep(std::optional thisObject, Series &series, bool reread); + static BeginStepStatus beginStep( + std::optional thisObject, + Series &series, + bool reread, + std::set const &ignoreIterations = {}); /** * @brief End an IO step on the IO file (or file-like object) diff --git a/include/openPMD/ReadIterations.hpp b/include/openPMD/ReadIterations.hpp index 7d6266e4f0..c6a1e4fc36 100644 --- a/include/openPMD/ReadIterations.hpp +++ b/include/openPMD/ReadIterations.hpp @@ -22,10 +22,12 @@ #include "openPMD/Iteration.hpp" #include "openPMD/Series.hpp" +#include "openPMD/backend/ParsePreference.hpp" #include #include #include +#include namespace openPMD { @@ -55,15 +57,34 @@ class SeriesIterator using maybe_series_t = std::optional; - maybe_series_t m_series; - std::deque m_iterationsInCurrentStep; - uint64_t m_currentIteration{}; + struct SharedData + { + SharedData() = default; + SharedData(SharedData const &) = delete; + SharedData(SharedData &&) = delete; + SharedData &operator=(SharedData const &) = delete; + SharedData &operator=(SharedData &&) = delete; + + maybe_series_t series; + std::deque iterationsInCurrentStep; + uint64_t currentIteration{}; + std::optional parsePreference; + /* + * Necessary because in the old ADIOS2 schema, old iterations' metadata + * will leak into new steps, making the frontend think that the groups + * are still there and the iterations can be parsed again. + */ + std::set ignoreIterations; + }; + + std::shared_ptr m_data; public: //! construct the end() iterator explicit SeriesIterator(); - SeriesIterator(Series); + SeriesIterator( + Series, std::optional parsePreference); SeriesIterator &operator++(); @@ -78,7 +99,8 @@ class SeriesIterator private: inline bool setCurrentIteration() { - if (m_iterationsInCurrentStep.empty()) + auto &data = *m_data; + if (data.iterationsInCurrentStep.empty()) { std::cerr << "[ReadIterations] Encountered a step without " "iterations. Closing the Series." @@ -86,19 +108,20 @@ class SeriesIterator *this = end(); return false; } - m_currentIteration = *m_iterationsInCurrentStep.begin(); + data.currentIteration = *data.iterationsInCurrentStep.begin(); return true; } inline std::optional peekCurrentIteration() { - if (m_iterationsInCurrentStep.empty()) + auto &data = *m_data; + if (data.iterationsInCurrentStep.empty()) { return std::nullopt; } else { - return {*m_iterationsInCurrentStep.begin()}; + return {*data.iterationsInCurrentStep.begin()}; } } @@ -119,6 +142,8 @@ class SeriesIterator std::optional loopBody(); void deactivateDeadIteration(iteration_index_t); + + void initSeriesInLinearReadMode(); }; /** @@ -146,8 +171,13 @@ class ReadIterations using iterator_t = SeriesIterator; Series m_series; + std::optional alreadyOpened; + std::optional m_parsePreference; - ReadIterations(Series); + ReadIterations( + Series, + Access, + std::optional parsePreference); public: iterator_t begin(); diff --git a/include/openPMD/Series.hpp b/include/openPMD/Series.hpp index 30b618db40..7b85986992 100644 --- a/include/openPMD/Series.hpp +++ b/include/openPMD/Series.hpp @@ -30,6 +30,7 @@ #include "openPMD/auxiliary/Variant.hpp" #include "openPMD/backend/Attributable.hpp" #include "openPMD/backend/Container.hpp" +#include "openPMD/backend/ParsePreference.hpp" #include "openPMD/config.hpp" #include "openPMD/version.hpp" @@ -159,6 +160,14 @@ namespace internal */ bool m_lastFlushSuccessful = false; + /** + * Remember the preference that the backend specified for parsing. + * Not used in file-based iteration encoding, empty then. + * In linear read mode, parsing only starts after calling + * Series::readIterations(), empty before that point. + */ + std::optional m_parsePreference; + void close(); }; // SeriesData @@ -618,8 +627,10 @@ OPENPMD_private * ReadIterations since those methods should be aware when the current step * is broken). */ - std::optional > - readGorVBased(bool do_always_throw_errors, bool init); + std::optional > readGorVBased( + bool do_always_throw_errors, + bool init, + std::set const &ignoreIterations = {}); void readBase(); std::string iterationFilename(IterationIndex_t i); diff --git a/include/openPMD/backend/ParsePreference.hpp b/include/openPMD/backend/ParsePreference.hpp new file mode 100644 index 0000000000..cb68d78f99 --- /dev/null +++ b/include/openPMD/backend/ParsePreference.hpp @@ -0,0 +1,31 @@ +/* Copyright 2023 Franz Poeschel + * + * This file is part of openPMD-api. + * + * openPMD-api is free software: you can redistribute it and/or modify + * it under the terms of of either the GNU General Public License or + * the GNU Lesser General Public License as published by + * the Free Software Foundation, either version 3 of the License, or + * (at your option) any later version. + * + * openPMD-api is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License and the GNU Lesser General Public License + * for more details. + * + * You should have received a copy of the GNU General Public License + * and the GNU Lesser General Public License along with openPMD-api. + * If not, see . + */ + +#pragma once + +namespace openPMD::internal +{ +enum class ParsePreference : char +{ + UpFront, //m_handler->m_backendAccess != Access::READ_ONLY) + if (access::write(m_handler->m_backendAccess)) { for (auto &group : m_attributeWrites) for (auto &att : group.second) @@ -102,49 +102,55 @@ std::future ADIOS1IOHandlerImpl::flush() case O::CREATE_FILE: createFile( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::CHECK_FILE: checkFile( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::CREATE_PATH: createPath( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::OPEN_PATH: openPath( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::CREATE_DATASET: createDataset( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::WRITE_ATT: writeAttribute( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::OPEN_FILE: openFile( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::KEEP_SYNCHRONOUS: keepSynchronous( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::DEREGISTER: + deregister( + i.writable, + deref_dynamic_cast>( i.parameter.get())); break; default: @@ -183,19 +189,19 @@ std::future ADIOS1IOHandlerImpl::flush() case O::EXTEND_DATASET: extendDataset( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::CLOSE_PATH: closePath( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::OPEN_DATASET: openDataset( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::CLOSE_FILE: @@ -207,79 +213,79 @@ std::future ADIOS1IOHandlerImpl::flush() case O::DELETE_FILE: deleteFile( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::DELETE_PATH: deletePath( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::DELETE_DATASET: deleteDataset( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::DELETE_ATT: deleteAttribute( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::WRITE_DATASET: writeDataset( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::READ_DATASET: readDataset( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::GET_BUFFER_VIEW: getBufferView( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::READ_ATT: readAttribute( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::LIST_PATHS: listPaths( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::LIST_DATASETS: listDatasets( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::LIST_ATTS: listAttributes( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::ADVANCE: advance( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::AVAILABLE_CHUNKS: availableChunks( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; default: @@ -366,6 +372,7 @@ void ADIOS1IOHandler::enqueue(IOTask const &i) case Operation::OPEN_FILE: case Operation::WRITE_ATT: case Operation::KEEP_SYNCHRONOUS: + case Operation::DEREGISTER: m_setup.push(i); return; default: diff --git a/src/IO/ADIOS/ADIOS2IOHandler.cpp b/src/IO/ADIOS/ADIOS2IOHandler.cpp index 2d7713a54e..76bbddd708 100644 --- a/src/IO/ADIOS/ADIOS2IOHandler.cpp +++ b/src/IO/ADIOS/ADIOS2IOHandler.cpp @@ -64,6 +64,8 @@ namespace openPMD #if openPMD_HAVE_ADIOS2 +#define HAS_ADIOS_2_8 (ADIOS2_VERSION_MAJOR * 100 + ADIOS2_VERSION_MINOR >= 208) + #if openPMD_HAVE_MPI ADIOS2IOHandlerImpl::ADIOS2IOHandlerImpl( @@ -146,7 +148,11 @@ void ADIOS2IOHandlerImpl::init(json::TracingJSON cfg) [](unsigned char c) { return std::tolower(c); }); // environment-variable based configuration - m_schema = auxiliary::getEnvNum("OPENPMD2_ADIOS2_SCHEMA", m_schema); + if (int schemaViaEnv = auxiliary::getEnvNum("OPENPMD2_ADIOS2_SCHEMA", -1); + schemaViaEnv != -1) + { + m_schema = schemaViaEnv; + } if (cfg.json().contains("adios2")) { @@ -471,7 +477,7 @@ void ADIOS2IOHandlerImpl::createFile( Writable *writable, Parameter const ¶meters) { VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, + access::write(m_handler->m_backendAccess), "[ADIOS2] Creating a file in read-only mode is not possible."); if (!writable->written) @@ -596,7 +602,7 @@ void ADIOS2IOHandlerImpl::createPath( void ADIOS2IOHandlerImpl::createDataset( Writable *writable, const Parameter ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) { throw std::runtime_error( "[ADIOS2] Creating a dataset in a file opened as read " @@ -608,7 +614,7 @@ void ADIOS2IOHandlerImpl::createDataset( std::string name = auxiliary::removeSlashes(parameters.name); auto const file = - refreshFileFromParent(writable, /* preferParentFile = */ false); + refreshFileFromParent(writable, /* preferParentFile = */ true); auto filePos = setAndGetFilePosition(writable, name); filePos->gd = ADIOS2FilePosition::GD::DATASET; auto const varName = nameOfVariable(writable); @@ -679,7 +685,7 @@ void ADIOS2IOHandlerImpl::extendDataset( Writable *writable, const Parameter ¶meters) { VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, + access::write(m_handler->m_backendAccess), "[ADIOS2] Cannot extend datasets in read-only mode."); setAndGetFilePosition(writable); auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); @@ -691,7 +697,7 @@ void ADIOS2IOHandlerImpl::extendDataset( } void ADIOS2IOHandlerImpl::openFile( - Writable *writable, const Parameter ¶meters) + Writable *writable, Parameter ¶meters) { if (!auxiliary::directory_exists(m_handler->directory)) { @@ -714,7 +720,8 @@ void ADIOS2IOHandlerImpl::openFile( m_iterationEncoding = parameters.encoding; // enforce opening the file // lazy opening is deathly in parallel situations - getFileData(file, IfFileNotOpen::OpenImplicitly); + auto &fileData = getFileData(file, IfFileNotOpen::OpenImplicitly); + *parameters.out_parsePreference = fileData.parsePreference; } void ADIOS2IOHandlerImpl::closeFile( @@ -741,6 +748,8 @@ void ADIOS2IOHandlerImpl::closeFile( /* flushUnconditionally = */ false); m_fileData.erase(it); } + m_dirty.erase(fileIterator->second); + m_files.erase(fileIterator); } } @@ -770,7 +779,7 @@ void ADIOS2IOHandlerImpl::openDataset( writable->abstractFilePosition.reset(); auto pos = setAndGetFilePosition(writable, name); pos->gd = ADIOS2FilePosition::GD::DATASET; - auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ true); auto varName = nameOfVariable(writable); *parameters.dtype = detail::fromADIOS2Type(getFileData(file, IfFileNotOpen::ThrowError) @@ -810,7 +819,7 @@ void ADIOS2IOHandlerImpl::writeDataset( Writable *writable, const Parameter ¶meters) { VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, + access::write(m_handler->m_backendAccess), "[ADIOS2] Cannot write data in read-only mode."); setAndGetFilePosition(writable); auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); @@ -839,7 +848,7 @@ void ADIOS2IOHandlerImpl::writeAttribute( break; case AttributeLayout::ByAdiosVariables: { VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, + access::write(m_handler->m_backendAccess), "[ADIOS2] Cannot write attribute in read-only mode."); auto pos = setAndGetFilePosition(writable); auto file = @@ -987,6 +996,8 @@ void ADIOS2IOHandlerImpl::getBufferView( break; } + ba.requireActiveStep(); + if (parameters.update) { detail::I_UpdateSpan &updater = @@ -1262,7 +1273,7 @@ void ADIOS2IOHandlerImpl::listAttributes( void ADIOS2IOHandlerImpl::advance( Writable *writable, Parameter ¶meters) { - auto file = m_files[writable]; + auto file = m_files.at(writable); auto &ba = getFileData(file, IfFileNotOpen::ThrowError); *parameters.status = ba.advance(parameters.mode, /* calledExplicitly = */ true); @@ -1274,7 +1285,7 @@ void ADIOS2IOHandlerImpl::closePath( VERIFY_ALWAYS( writable->written, "[ADIOS2] Cannot close a path that has not been written yet."); - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) { // nothing to do return; @@ -1308,8 +1319,21 @@ void ADIOS2IOHandlerImpl::availableChunks( std::string varName = nameOfVariable(writable); auto engine = ba.getEngine(); // make sure that data are present auto datatype = detail::fromADIOS2Type(ba.m_IO.VariableType(varName)); + bool allSteps = m_handler->m_frontendAccess != Access::READ_LINEAR && + ba.streamStatus == detail::BufferedActions::StreamStatus::NoStream; switchAdios2VariableType( - datatype, parameters, ba.m_IO, engine, varName); + datatype, + parameters, + ba.m_IO, + engine, + varName, + /* allSteps = */ allSteps); +} + +void ADIOS2IOHandlerImpl::deregister( + Writable *writable, Parameter const &) +{ + m_files.erase(writable); } adios2::Mode ADIOS2IOHandlerImpl::adios2AccessMode(std::string const &fullPath) @@ -1318,13 +1342,25 @@ adios2::Mode ADIOS2IOHandlerImpl::adios2AccessMode(std::string const &fullPath) { case Access::CREATE: return adios2::Mode::Write; +#if HAS_ADIOS_2_8 + case Access::READ_LINEAR: + return adios2::Mode::Read; + case Access::READ_ONLY: + return adios2::Mode::ReadRandomAccess; +#else + case Access::READ_LINEAR: case Access::READ_ONLY: return adios2::Mode::Read; +#endif case Access::READ_WRITE: if (auxiliary::directory_exists(fullPath) || auxiliary::file_exists(fullPath)) { +#if HAS_ADIOS_2_8 + return adios2::Mode::ReadRandomAccess; +#else return adios2::Mode::Read; +#endif } else { @@ -1713,7 +1749,7 @@ namespace detail const Parameter ¶meters) { VERIFY_ALWAYS( - impl->m_handler->m_backendAccess != Access::READ_ONLY, + access::write(impl->m_handler->m_backendAccess), "[ADIOS2] Cannot write attribute in read-only mode."); auto pos = impl->setAndGetFilePosition(writable); auto file = impl->refreshFileFromParent( @@ -1908,7 +1944,7 @@ namespace detail adios2::Engine &engine) { VERIFY_ALWAYS( - impl->m_handler->m_backendAccess != Access::READ_ONLY, + access::write(impl->m_handler->m_backendAccess), "[ADIOS2] Cannot write data in read-only mode."); auto ptr = std::static_pointer_cast(bp.param.data).get(); @@ -1977,26 +2013,48 @@ namespace detail Parameter ¶ms, adios2::IO &IO, adios2::Engine &engine, - std::string const &varName) + std::string const &varName, + bool allSteps) { auto var = IO.InquireVariable(varName); - auto blocksInfo = engine.BlocksInfo(var, engine.CurrentStep()); auto &table = *params.chunks; - table.reserve(blocksInfo.size()); - for (auto const &info : blocksInfo) + auto addBlocksInfo = [&table](auto const &blocksInfo_) { + for (auto const &info : blocksInfo_) + { + Offset offset; + Extent extent; + auto size = info.Start.size(); + offset.reserve(size); + extent.reserve(size); + for (unsigned i = 0; i < size; ++i) + { + offset.push_back(info.Start[i]); + extent.push_back(info.Count[i]); + } + table.emplace_back( + std::move(offset), std::move(extent), info.WriterID); + } + }; + if (allSteps) { - Offset offset; - Extent extent; - auto size = info.Start.size(); - offset.reserve(size); - extent.reserve(size); - for (unsigned i = 0; i < size; ++i) + auto allBlocks = var.AllStepsBlocksInfo(); + table.reserve(std::accumulate( + allBlocks.begin(), + allBlocks.end(), + size_t(0), + [](size_t acc, auto const &block) { + return acc + block.size(); + })); + for (auto const &blocksInfo : allBlocks) { - offset.push_back(info.Start[i]); - extent.push_back(info.Count[i]); + addBlocksInfo(blocksInfo); } - table.emplace_back( - std::move(offset), std::move(extent), info.WriterID); + } + else + { + auto blocksInfo = engine.BlocksInfo(var, engine.CurrentStep()); + table.reserve(blocksInfo.size()); + addBlocksInfo(blocksInfo); } } @@ -2382,7 +2440,6 @@ namespace detail BufferedActions::BufferedActions( ADIOS2IOHandlerImpl &impl, InvalidatableFile file) : m_file(impl.fullPath(std::move(file))) - , m_IOName(std::to_string(impl.nameCounter++)) , m_ADIOS(impl.m_ADIOS) , m_impl(&impl) , m_engineType(impl.m_engineType) @@ -2390,8 +2447,8 @@ namespace detail // Declaring these members in the constructor body to avoid // initialization order hazards. Need the IO_ prefix since in some // situation there seems to be trouble with number-only IO names - m_IO = impl.m_ADIOS.DeclareIO("IO_" + m_IOName); m_mode = impl.adios2AccessMode(m_file); + create_IO(); if (!m_IO) { throw std::runtime_error( @@ -2405,6 +2462,12 @@ namespace detail } } + void BufferedActions::create_IO() + { + m_IOName = std::to_string(m_impl->nameCounter++); + m_IO = m_impl->m_ADIOS.DeclareIO("IO_" + m_IOName); + } + BufferedActions::~BufferedActions() { finalize(); @@ -2451,99 +2514,298 @@ namespace detail finalized = true; } - void BufferedActions::configure_IO(ADIOS2IOHandlerImpl &impl) + namespace { - (void)impl; - static std::set streamingEngines = { - "sst", - "insitumpi", - "inline", - "staging", - "nullcore", - "ssc", - "filestream", - "bp5"}; - // diskStreamingEngines is a subset of streamingEngines - static std::set diskStreamingEngines{"bp5", "filestream"}; - static std::set fileEngines = { - "bp4", "bp3", "hdf5", "file"}; + constexpr char const *alwaysSupportsUpfrontParsing[] = {"bp3", "hdf5"}; + constexpr char const *supportsUpfrontParsingInRandomAccessMode[] = { + "bp4", "bp5", "file", "filestream"}; + constexpr char const *nonPersistentEngines[] = { + "sst", "insitumpi", "inline", "staging", "nullcore", "ssc"}; - // step/variable-based iteration encoding requires the new schema - if (m_impl->m_iterationEncoding == IterationEncoding::variableBased) + bool supportedEngine(std::string const &engineType) { - m_impl->m_schema = ADIOS2Schema::schema_2021_02_09; + auto is_in_list = [&engineType](auto &list) { + for (auto const &e : list) + { + if (engineType == e) + { + return true; + } + } + return false; + }; + return is_in_list(alwaysSupportsUpfrontParsing) || + is_in_list(supportsUpfrontParsingInRandomAccessMode) || + is_in_list(nonPersistentEngines); } - // set engine type - bool isStreaming = false; + bool + supportsUpfrontParsing(Access access, std::string const &engineType) { - m_IO.SetEngine(m_engineType); - auto it = streamingEngines.find(m_engineType); - if (it != streamingEngines.end()) - { - isStreaming = true; - optimizeAttributesStreaming = - // Optimizing attributes in streaming mode is not needed in - // the variable-based ADIOS2 schema - schema() == SupportedSchema::s_0000_00_00 && - // Also, it should only be done when truly streaming, not - // when using a disk-based engine that behaves like a - // streaming engine (otherwise attributes might vanish) - diskStreamingEngines.find(m_engineType) == - diskStreamingEngines.end(); - streamStatus = StreamStatus::OutsideOfStep; + for (auto const &e : alwaysSupportsUpfrontParsing) + { + if (e == engineType) + { + return true; + } } - else + if (access != Access::READ_LINEAR) { - it = fileEngines.find(m_engineType); - if (it != fileEngines.end()) + for (auto const &e : supportsUpfrontParsingInRandomAccessMode) { - switch (m_mode) + if (e == engineType) { - case adios2::Mode::Read: - /* - * File engines, read mode: - * Use of steps is dictated by what is detected in the - * file being read. - */ - streamStatus = StreamStatus::Undecided; - delayOpeningTheFirstStep = true; - break; - case adios2::Mode::Write: - case adios2::Mode::Append: - /* - * File engines, write mode: - * Default for old layout is no steps. - * Default for new layout is to use steps. - */ - switch (schema()) - { - case SupportedSchema::s_0000_00_00: - streamStatus = StreamStatus::NoStream; - break; - case SupportedSchema::s_2021_02_09: - streamStatus = StreamStatus::OutsideOfStep; - break; - } - break; - default: - throw std::runtime_error("Unreachable!"); + return true; } - optimizeAttributesStreaming = false; } - else + } + return false; + } + + enum class PerstepParsing + { + Supported, + Unsupported, + Required + }; + + PerstepParsing + supportsPerstepParsing(Access access, std::string const &engineType) + { + // required in all streaming engines + for (auto const &e : nonPersistentEngines) + { + if (engineType == e) { - throw std::runtime_error( - "[ADIOS2IOHandler] Unknown engine type. Please choose " - "one out of [sst, staging, bp4, bp3, hdf5, file, " - "filestream, null]"); - // not listing unsupported engines + return PerstepParsing::Required; } } + // supported in file engines in READ_LINEAR mode + if (access != Access::READ_RANDOM_ACCESS) + { + return PerstepParsing::Supported; + } + + return PerstepParsing::Unsupported; + } + + bool nonpersistentEngine(std::string const &engineType) + { + for (auto &e : nonPersistentEngines) + { + if (e == engineType) + { + return true; + } + } + return false; + } + + bool + useStepsInWriting(SupportedSchema schema, std::string const &engineType) + { + if (engineType == "bp5") + { + /* + * BP5 does not require steps when reading, but it requires + * them when writing. + */ + return true; + } + switch (supportsPerstepParsing(Access::CREATE, engineType)) + { + case PerstepParsing::Required: + return true; + case PerstepParsing::Supported: + switch (schema) + { + case SupportedSchema::s_0000_00_00: + return false; + case SupportedSchema::s_2021_02_09: + return true; + } + break; + case PerstepParsing::Unsupported: + return false; + } + return false; // unreachable + } + } // namespace + + void BufferedActions::configure_IO_Read( + std::optional userSpecifiedUsesteps) + { + if (userSpecifiedUsesteps.has_value() && + m_impl->m_handler->m_backendAccess != Access::READ_WRITE) + { + std::cerr << "Explicitly specified `adios2.usesteps` in Read mode. " + "Usage of steps will be determined by what is found " + "in the file being read." + << std::endl; + } + + bool upfrontParsing = supportsUpfrontParsing( + m_impl->m_handler->m_backendAccess, m_engineType); + PerstepParsing perstepParsing = supportsPerstepParsing( + m_impl->m_handler->m_backendAccess, m_engineType); + + switch (m_impl->m_handler->m_backendAccess) + { + case Access::READ_LINEAR: + switch (perstepParsing) + { + case PerstepParsing::Supported: + case PerstepParsing::Required: + // all is fine, we can go forward with READ_LINEAR mode + /* + * We don't know yet if per-step parsing will be fine since the + * engine is not opened yet. + * In non-persistent (streaming) engines, per-step parsing is + * always fine and always required. + */ + streamStatus = nonpersistentEngine(m_engineType) + ? StreamStatus::OutsideOfStep + : StreamStatus::Undecided; + parsePreference = ParsePreference::PerStep; + m_IO.SetParameter("StreamReader", "On"); + break; + case PerstepParsing::Unsupported: + streamStatus = StreamStatus::NoStream; + parsePreference = ParsePreference::UpFront; + /* + * Note that in BP4 with linear access mode, we set the + * StreamReader option, disabling upfrontParsing capability. + * So, this branch is only taken by niche engines, such as + * BP3 or HDF5, or by BP5 with old ADIOS2 schema and normal read + * mode. Need to fall back to random access parsing. + */ +#if HAS_ADIOS_2_8 + m_mode = adios2::Mode::ReadRandomAccess; +#endif + break; + } + break; + case Access::READ_ONLY: + case Access::READ_WRITE: + /* + * Prefer up-front parsing, but try to fallback to per-step parsing + * if possible. + */ + if (upfrontParsing == nonpersistentEngine(m_engineType)) + { + throw error::Internal( + "Internal control flow error: With access types " + "READ_ONLY/READ_WRITE, support for upfront parsing is " + "equivalent to the chosen engine being file-based."); + } + if (upfrontParsing) + { + streamStatus = StreamStatus::NoStream; + parsePreference = ParsePreference::UpFront; + } + else + { + /* + * Scenario: A step-only workflow was used (i.e. a streaming + * engine), but Access::READ_ONLY was specified. + * Fall back to streaming read mode. + */ + m_mode = adios2::Mode::Read; + parsePreference = ParsePreference::PerStep; + streamStatus = StreamStatus::OutsideOfStep; + } + break; + default: + VERIFY_ALWAYS( + access::writeOnly(m_impl->m_handler->m_backendAccess), + "Internal control flow error: Must set parse preference for " + "any read mode."); + } + } + + void BufferedActions::configure_IO_Write( + std::optional userSpecifiedUsesteps) + { + optimizeAttributesStreaming = + // Optimizing attributes in streaming mode is not needed in + // the variable-based ADIOS2 schema + schema() == SupportedSchema::s_0000_00_00 && + // Also, it should only be done when truly streaming, not + // when using a disk-based engine that behaves like a + // streaming engine (otherwise attributes might vanish) + nonpersistentEngine(m_engineType); + + bool useSteps = useStepsInWriting(schema(), m_engineType); + if (userSpecifiedUsesteps.has_value()) + { + useSteps = userSpecifiedUsesteps.value(); + if (!useSteps && nonpersistentEngine(m_engineType)) + { + throw error::WrongAPIUsage( + "Cannot switch off IO steps for non-persistent stream " + "engines in ADIOS2."); + } + } + + streamStatus = + useSteps ? StreamStatus::OutsideOfStep : StreamStatus::NoStream; + } + + void BufferedActions::configure_IO(ADIOS2IOHandlerImpl &impl) + { + // step/variable-based iteration encoding requires the new schema + // but new schema is available only in ADIOS2 >= v2.8 + // use old schema to support at least one single iteration otherwise + if (!m_impl->m_schema.has_value()) + { + switch (m_impl->m_iterationEncoding) + { + case IterationEncoding::variableBased: + m_impl->m_schema = ADIOS2Schema::schema_2021_02_09; + break; + case IterationEncoding::groupBased: + case IterationEncoding::fileBased: + m_impl->m_schema = ADIOS2Schema::schema_0000_00_00; + break; + } + } + + // set engine type + { + m_IO.SetEngine(m_engineType); + } + + if (!supportedEngine(m_engineType)) + { + std::stringstream sstream; + sstream + << "User-selected ADIOS2 engine '" << m_engineType + << "' is not recognized by the openPMD-api. Select one of: '"; + bool first_entry = true; + auto add_entries = [&first_entry, &sstream](auto &list) { + for (auto const &e : list) + { + if (first_entry) + { + sstream << e; + first_entry = false; + } + else + { + sstream << ", " << e; + } + } + }; + add_entries(alwaysSupportsUpfrontParsing); + add_entries(supportsUpfrontParsingInRandomAccessMode); + add_entries(nonPersistentEngines); + sstream << "'." << std::endl; + throw error::WrongAPIUsage(sstream.str()); } // set engine parameters std::set alreadyConfigured; + std::optional userSpecifiedUsesteps; auto engineConfig = impl.config(ADIOS2Defaults::str_engine); if (!engineConfig.json().is_null()) { @@ -2575,14 +2837,8 @@ namespace detail if (!_useAdiosSteps.json().is_null() && m_mode != adios2::Mode::Read) { - bool tmp = _useAdiosSteps.json(); - if (isStreaming && !bool(tmp)) - { - throw std::runtime_error( - "Cannot switch off steps for streaming engines."); - } - streamStatus = bool(tmp) ? StreamStatus::OutsideOfStep - : StreamStatus::NoStream; + userSpecifiedUsesteps = + std::make_optional(_useAdiosSteps.json().get()); } if (engineConfig.json().contains(ADIOS2Defaults::str_flushtarget)) @@ -2621,6 +2877,36 @@ namespace detail } } } + + switch (m_impl->m_handler->m_backendAccess) + { + case Access::READ_LINEAR: + case Access::READ_ONLY: + configure_IO_Read(userSpecifiedUsesteps); + break; + case Access::READ_WRITE: + if ( +#if HAS_ADIOS_2_8 + m_mode == adios2::Mode::Read || + m_mode == adios2::Mode::ReadRandomAccess +#else + m_mode == adios2::Mode::Read +#endif + ) + { + configure_IO_Read(userSpecifiedUsesteps); + } + else + { + configure_IO_Write(userSpecifiedUsesteps); + } + break; + case Access::APPEND: + case Access::CREATE: + configure_IO_Write(userSpecifiedUsesteps); + break; + } + auto notYetConfigured = [&alreadyConfigured](std::string const ¶m) { auto it = alreadyConfigured.find( auxiliary::lowerCase(std::string(param))); @@ -2752,48 +3038,80 @@ namespace detail // usesSteps attribute only written upon ::advance() // this makes sure that the attribute is only put in case // the streaming API was used. - m_IO.DefineAttribute( - ADIOS2Defaults::str_adios2Schema, m_impl->m_schema); m_engine = std::make_optional( adios2::Engine(m_IO.Open(m_file, tempMode))); break; } +#if HAS_ADIOS_2_8 + case adios2::Mode::ReadRandomAccess: +#endif case adios2::Mode::Read: { m_engine = std::make_optional( adios2::Engine(m_IO.Open(m_file, m_mode))); - // decide attribute layout - // in streaming mode, this needs to be done after opening - // a step - // in file-based mode, we do it before - auto layoutVersion = [IO{m_IO}]() mutable { - auto attr = IO.InquireAttribute( + /* + * First round: decide attribute layout. + * This MUST occur before the `switch(streamStatus)` construct + * since the streamStatus might be changed after taking a look + * at the used schema. + */ + bool openedANewStep = false; + { + if (!supportsUpfrontParsing( + m_impl->m_handler->m_backendAccess, m_engineType)) + { + /* + * In BP5 with Linear read mode, we now need to + * tentatively open the first IO step. + * Otherwise we don't see the schema attribute. + * This branch is also taken by Streaming engines. + */ + if (m_engine->BeginStep() != adios2::StepStatus::OK) + { + throw std::runtime_error( + "[ADIOS2] Unexpected step status when " + "opening file/stream."); + } + openedANewStep = true; + } + auto attr = m_IO.InquireAttribute( ADIOS2Defaults::str_adios2Schema); if (!attr) { - return ADIOS2Schema::schema_0000_00_00; + m_impl->m_schema = ADIOS2Schema::schema_0000_00_00; } else { - return attr.Data()[0]; + m_impl->m_schema = attr.Data()[0]; } }; - // decide streaming mode + + /* + * Second round: Decide the streamStatus. + */ switch (streamStatus) { case StreamStatus::Undecided: { - m_impl->m_schema = layoutVersion(); auto attr = m_IO.InquireAttribute( ADIOS2Defaults::str_usesstepsAttribute); if (attr && attr.Data()[0] == 1) { - if (delayOpeningTheFirstStep) + if (parsePreference == ParsePreference::UpFront) { + if (openedANewStep) + { + throw error::Internal( + "Logic error in ADIOS2 backend! No need to " + "indiscriminately open a step before doing " + "anything in an engine that supports " + "up-front parsing."); + } streamStatus = StreamStatus::Parsing; } else { - if (m_engine.value().BeginStep() != - adios2::StepStatus::OK) + if (!openedANewStep && + m_engine.value().BeginStep() != + adios2::StepStatus::OK) { throw std::runtime_error( "[ADIOS2] Unexpected step status when " @@ -2804,23 +3122,37 @@ namespace detail } else { + /* + * If openedANewStep is true, then the file consists + * of one large step, we just leave it open. + */ streamStatus = StreamStatus::NoStream; } break; } + case StreamStatus::NoStream: + // using random-access mode + case StreamStatus::DuringStep: + // IO step might have sneakily been opened + // by setLayoutVersion(), because otherwise we don't see + // the schema attribute + break; case StreamStatus::OutsideOfStep: - if (m_engine.value().BeginStep() != adios2::StepStatus::OK) + if (openedANewStep) + { + streamStatus = StreamStatus::DuringStep; + } + else { - throw std::runtime_error( - "[ADIOS2] Unexpected step status when " - "opening file/stream."); + throw error::Internal( + "Control flow error: Step should have been opened " + "before this point."); } - m_impl->m_schema = layoutVersion(); - streamStatus = StreamStatus::DuringStep; break; default: throw std::runtime_error("[ADIOS2] Control flow error!"); } + if (attributeLayout() == AttributeLayout::ByAdiosVariables) { preloadAttributes.preloadAttributes(m_IO, m_engine.value()); @@ -2967,6 +3299,13 @@ namespace detail ba->run(*this); } + if (!initializedDefaults) + { + m_IO.DefineAttribute( + ADIOS2Defaults::str_adios2Schema, m_impl->m_schema.value()); + initializedDefaults = true; + } + if (writeAttributes) { for (auto &pair : m_attributeWrites) @@ -2975,7 +3314,12 @@ namespace detail } } +#if HAS_ADIOS_2_8 + if (this->m_mode == adios2::Mode::Read || + this->m_mode == adios2::Mode::ReadRandomAccess) +#else if (this->m_mode == adios2::Mode::Read) +#endif { level = FlushLevel::UserFlush; } @@ -3075,6 +3419,9 @@ namespace detail decideFlushAPICall(eng); break; case adios2::Mode::Read: +#if HAS_ADIOS_2_8 + case adios2::Mode::ReadRandomAccess: +#endif eng.PerformGets(); break; default: @@ -3097,8 +3444,14 @@ namespace detail // sic! no else if (streamStatus == StreamStatus::NoStream) { - m_IO.DefineAttribute( - ADIOS2Defaults::str_usesstepsAttribute, 0); + if ((m_mode == adios2::Mode::Write || + m_mode == adios2::Mode::Append) && + !m_IO.InquireAttribute( + ADIOS2Defaults::str_usesstepsAttribute)) + { + m_IO.DefineAttribute( + ADIOS2Defaults::str_usesstepsAttribute, 0); + } flush( ADIOS2FlushParams{FlushLevel::UserFlush}, /* writeAttributes = */ false); @@ -3113,7 +3466,10 @@ namespace detail * The usessteps tag should only be set when the Series is *logically* * using steps. */ - if (calledExplicitly) + if (calledExplicitly && + (m_mode == adios2::Mode::Write || m_mode == adios2::Mode::Append) && + !m_IO.InquireAttribute( + ADIOS2Defaults::str_usesstepsAttribute)) { m_IO.DefineAttribute( ADIOS2Defaults::str_usesstepsAttribute, 1); diff --git a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp index f4c0d7ec45..6b1630e854 100644 --- a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp +++ b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp @@ -412,7 +412,7 @@ template void CommonADIOS1IOHandlerImpl::createFile( Writable *writable, Parameter const ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[ADIOS1] Creating a file in read-only mode is not possible."); @@ -470,7 +470,7 @@ template void CommonADIOS1IOHandlerImpl::createPath( Writable *writable, Parameter const ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[ADIOS1] Creating a path in a file opened as read only is not " "possible."); @@ -534,7 +534,7 @@ template void CommonADIOS1IOHandlerImpl::createDataset( Writable *writable, Parameter const ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[ADIOS1] Creating a dataset in a file opened as read only is not " "possible."); @@ -543,9 +543,7 @@ void CommonADIOS1IOHandlerImpl::createDataset( { /* ADIOS variable definitions require the file to be (re-)opened to take * effect/not cause errors */ - auto res = m_filePaths.find(writable); - if (res == m_filePaths.end()) - res = m_filePaths.find(writable->parent); + auto res = m_filePaths.find(writable->parent); int64_t group = m_groups[res->second]; @@ -664,7 +662,7 @@ void CommonADIOS1IOHandlerImpl::extendDataset( template void CommonADIOS1IOHandlerImpl::openFile( - Writable *writable, Parameter const ¶meters) + Writable *writable, Parameter ¶meters) { if (!auxiliary::directory_exists(m_handler->directory)) error::throwReadError( @@ -739,7 +737,7 @@ void CommonADIOS1IOHandlerImpl::closeFile( if (myGroup != m_groups.end()) { auto attributeWrites = m_attributeWrites.find(myGroup->second); - if (this->m_handler->m_backendAccess != Access::READ_ONLY && + if (access::write(this->m_handler->m_backendAccess) && attributeWrites != m_attributeWrites.end()) { for (auto &att : attributeWrites->second) @@ -849,9 +847,7 @@ void CommonADIOS1IOHandlerImpl::openDataset( Writable *writable, Parameter ¶meters) { ADIOS_FILE *f; - auto res = m_filePaths.find(writable); - if (res == m_filePaths.end()) - res = m_filePaths.find(writable->parent); + auto res = m_filePaths.find(writable->parent); f = m_openReadFileHandles.at(res->second); /* Sanitize name */ @@ -1017,7 +1013,7 @@ template void CommonADIOS1IOHandlerImpl::deleteFile( Writable *writable, Parameter const ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[ADIOS1] Deleting a file opened as read only is not possible."); @@ -1103,7 +1099,7 @@ template void CommonADIOS1IOHandlerImpl::writeDataset( Writable *writable, Parameter const ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[ADIOS1] Writing into a dataset in a file opened as read-only is " "not possible."); @@ -1149,7 +1145,7 @@ void CommonADIOS1IOHandlerImpl::writeAttribute( // cannot do this return; } - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[ADIOS1] Writing an attribute in a file opened as read only is " "not possible."); @@ -1999,6 +1995,13 @@ void CommonADIOS1IOHandlerImpl::listAttributes( } } +template +void CommonADIOS1IOHandlerImpl::deregister( + Writable *writable, Parameter const &) +{ + m_filePaths.erase(writable); +} + template void CommonADIOS1IOHandlerImpl::initJson(json::TracingJSON config) { diff --git a/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp b/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp index fe628e48aa..396a9106d8 100644 --- a/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp +++ b/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp @@ -123,49 +123,55 @@ std::future ParallelADIOS1IOHandlerImpl::flush() case O::CREATE_FILE: createFile( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::CHECK_FILE: checkFile( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::CREATE_PATH: createPath( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::OPEN_PATH: openPath( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::CREATE_DATASET: createDataset( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::WRITE_ATT: writeAttribute( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::OPEN_FILE: openFile( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::KEEP_SYNCHRONOUS: keepSynchronous( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::DEREGISTER: + deregister( + i.writable, + deref_dynamic_cast>( i.parameter.get())); break; default: @@ -202,19 +208,19 @@ std::future ParallelADIOS1IOHandlerImpl::flush() case O::EXTEND_DATASET: extendDataset( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::CLOSE_PATH: closePath( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::OPEN_DATASET: openDataset( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::CLOSE_FILE: @@ -226,79 +232,79 @@ std::future ParallelADIOS1IOHandlerImpl::flush() case O::DELETE_FILE: deleteFile( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::DELETE_PATH: deletePath( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::DELETE_DATASET: deleteDataset( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::DELETE_ATT: deleteAttribute( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::WRITE_DATASET: writeDataset( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::READ_DATASET: readDataset( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::GET_BUFFER_VIEW: getBufferView( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::READ_ATT: readAttribute( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::LIST_PATHS: listPaths( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::LIST_DATASETS: listDatasets( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::LIST_ATTS: listAttributes( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::ADVANCE: advance( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; case O::AVAILABLE_CHUNKS: availableChunks( i.writable, - deref_dynamic_cast >( + deref_dynamic_cast>( i.parameter.get())); break; default: @@ -384,6 +390,7 @@ void ParallelADIOS1IOHandler::enqueue(IOTask const &i) case Operation::OPEN_FILE: case Operation::WRITE_ATT: case Operation::KEEP_SYNCHRONOUS: + case Operation::DEREGISTER: m_setup.push(i); return; default: diff --git a/src/IO/HDF5/HDF5IOHandler.cpp b/src/IO/HDF5/HDF5IOHandler.cpp index 1516571069..499b2e6e69 100644 --- a/src/IO/HDF5/HDF5IOHandler.cpp +++ b/src/IO/HDF5/HDF5IOHandler.cpp @@ -220,7 +220,7 @@ HDF5IOHandlerImpl::~HDF5IOHandlerImpl() void HDF5IOHandlerImpl::createFile( Writable *writable, Parameter const ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[HDF5] Creating a file in read-only mode is not possible."); @@ -258,6 +258,7 @@ void HDF5IOHandlerImpl::createFile( flags = H5F_ACC_EXCL; break; case Access::READ_ONLY: + case Access::READ_LINEAR: // condition has been checked above throw std::runtime_error( "[HDF5] Control flow error in createFile backend access mode."); @@ -322,7 +323,7 @@ void HDF5IOHandlerImpl::checkFile( void HDF5IOHandlerImpl::createPath( Writable *writable, Parameter const ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[HDF5] Creating a path in a file opened as read only is not " "possible."); @@ -413,7 +414,7 @@ void HDF5IOHandlerImpl::createPath( void HDF5IOHandlerImpl::createDataset( Writable *writable, Parameter const ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[HDF5] Creating a dataset in a file opened as read only is not " "possible."); @@ -652,7 +653,7 @@ void HDF5IOHandlerImpl::createDataset( void HDF5IOHandlerImpl::extendDataset( Writable *writable, Parameter const ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[HDF5] Extending a dataset in a file opened as read only is not " "possible."); @@ -771,7 +772,7 @@ void HDF5IOHandlerImpl::availableChunks( } void HDF5IOHandlerImpl::openFile( - Writable *writable, Parameter const ¶meters) + Writable *writable, Parameter ¶meters) { if (!auxiliary::directory_exists(m_handler->directory)) throw error::ReadError( @@ -796,19 +797,16 @@ void HDF5IOHandlerImpl::openFile( unsigned flags; Access at = m_handler->m_backendAccess; - if (at == Access::READ_ONLY) + if (access::readOnly(at)) flags = H5F_ACC_RDONLY; /* * Within the HDF5 backend, APPEND and READ_WRITE mode are * equivalent, but the openPMD frontend exposes no reading * functionality in APPEND mode. */ - else if ( - at == Access::READ_WRITE || at == Access::CREATE || - at == Access::APPEND) - flags = H5F_ACC_RDWR; else - throw std::runtime_error("[HDF5] Unknown file Access"); + flags = H5F_ACC_RDWR; + hid_t file_id; file_id = H5Fopen(name.c_str(), flags, m_fileAccessProperty); if (file_id < 0) @@ -1066,7 +1064,7 @@ void HDF5IOHandlerImpl::openDataset( void HDF5IOHandlerImpl::deleteFile( Writable *writable, Parameter const ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[HDF5] Deleting a file opened as read only is not possible."); @@ -1100,7 +1098,7 @@ void HDF5IOHandlerImpl::deleteFile( void HDF5IOHandlerImpl::deletePath( Writable *writable, Parameter const ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[HDF5] Deleting a path in a file opened as read only is not " "possible."); @@ -1152,7 +1150,7 @@ void HDF5IOHandlerImpl::deletePath( void HDF5IOHandlerImpl::deleteDataset( Writable *writable, Parameter const ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[HDF5] Deleting a path in a file opened as read only is not " "possible."); @@ -1204,7 +1202,7 @@ void HDF5IOHandlerImpl::deleteDataset( void HDF5IOHandlerImpl::deleteAttribute( Writable *writable, Parameter const ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[HDF5] Deleting an attribute in a file opened as read only is not " "possible."); @@ -1239,7 +1237,7 @@ void HDF5IOHandlerImpl::deleteAttribute( void HDF5IOHandlerImpl::writeDataset( Writable *writable, Parameter const ¶meters) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[HDF5] Writing into a dataset in a file opened as read only is " "not possible."); @@ -1366,7 +1364,7 @@ void HDF5IOHandlerImpl::writeAttribute( // cannot do this return; } - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( "[HDF5] Writing an attribute in a file opened as read only is not " "possible."); @@ -2486,6 +2484,12 @@ void HDF5IOHandlerImpl::listAttributes( "listing"); } +void HDF5IOHandlerImpl::deregister( + Writable *writable, Parameter const &) +{ + m_fileNames.erase(writable); +} + std::optional HDF5IOHandlerImpl::getFile(Writable *writable) { diff --git a/src/IO/JSON/JSONIOHandlerImpl.cpp b/src/IO/JSON/JSONIOHandlerImpl.cpp index 8f25b56584..13f20c193d 100644 --- a/src/IO/JSON/JSONIOHandlerImpl.cpp +++ b/src/IO/JSON/JSONIOHandlerImpl.cpp @@ -91,7 +91,7 @@ void JSONIOHandlerImpl::createFile( Writable *writable, Parameter const ¶meters) { VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, + access::write(m_handler->m_backendAccess), "[JSON] Creating a file in read-only mode is not possible."); if (!writable->written) @@ -199,7 +199,7 @@ void JSONIOHandlerImpl::createPath( void JSONIOHandlerImpl::createDataset( Writable *writable, Parameter const ¶meter) { - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) { throw std::runtime_error( "[JSON] Creating a dataset in a file opened as read only is not " @@ -267,7 +267,7 @@ void JSONIOHandlerImpl::extendDataset( Writable *writable, Parameter const ¶meters) { VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, + access::write(m_handler->m_backendAccess), "[JSON] Cannot extend a dataset in read-only mode.") setAndGetFilePosition(writable); refreshFileFromParent(writable); @@ -526,7 +526,7 @@ void JSONIOHandlerImpl::availableChunks( } void JSONIOHandlerImpl::openFile( - Writable *writable, Parameter const ¶meter) + Writable *writable, Parameter ¶meter) { if (!auxiliary::directory_exists(m_handler->directory)) { @@ -558,6 +558,7 @@ void JSONIOHandlerImpl::closeFile( if (fileIterator != m_files.end()) { putJsonContents(fileIterator->second); + m_dirty.erase(fileIterator->second); // do not invalidate the file // it still exists, it is just not open m_files.erase(fileIterator); @@ -615,7 +616,7 @@ void JSONIOHandlerImpl::deleteFile( Writable *writable, Parameter const ¶meters) { VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, + access::write(m_handler->m_backendAccess), "[JSON] Cannot delete files in read-only mode") if (!writable->written) @@ -646,7 +647,7 @@ void JSONIOHandlerImpl::deletePath( Writable *writable, Parameter const ¶meters) { VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, + access::write(m_handler->m_backendAccess), "[JSON] Cannot delete paths in read-only mode") if (!writable->written) @@ -724,7 +725,7 @@ void JSONIOHandlerImpl::deleteDataset( Writable *writable, Parameter const ¶meters) { VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, + access::write(m_handler->m_backendAccess), "[JSON] Cannot delete datasets in read-only mode") if (!writable->written) @@ -766,7 +767,7 @@ void JSONIOHandlerImpl::deleteAttribute( Writable *writable, Parameter const ¶meters) { VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, + access::write(m_handler->m_backendAccess), "[JSON] Cannot delete attributes in read-only mode") if (!writable->written) { @@ -783,7 +784,7 @@ void JSONIOHandlerImpl::writeDataset( Writable *writable, Parameter const ¶meters) { VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, + access::write(m_handler->m_backendAccess), "[JSON] Cannot write data in read-only mode."); auto pos = setAndGetFilePosition(writable); @@ -806,7 +807,7 @@ void JSONIOHandlerImpl::writeAttribute( // cannot do this return; } - if (m_handler->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(m_handler->m_backendAccess)) { throw std::runtime_error( "[JSON] Creating a dataset in a file opened as read only is not " @@ -940,6 +941,12 @@ void JSONIOHandlerImpl::listAttributes( } } +void JSONIOHandlerImpl::deregister( + Writable *writable, Parameter const &) +{ + m_files.erase(writable); +} + std::shared_ptr JSONIOHandlerImpl::getFilehandle(File fileName, Access access) { @@ -948,11 +955,8 @@ JSONIOHandlerImpl::getFilehandle(File fileName, Access access) "[JSON] Tried opening a file that has been overwritten or deleted.") auto path = fullPath(std::move(fileName)); auto fs = std::make_shared(); - switch (access) + if (access::write(access)) { - case Access::CREATE: - case Access::READ_WRITE: - case Access::APPEND: /* * Always truncate when writing, we alway write entire JSON * datasets, never partial ones. @@ -961,10 +965,10 @@ JSONIOHandlerImpl::getFilehandle(File fileName, Access access) * functionality in APPEND mode. */ fs->open(path, std::ios_base::out | std::ios_base::trunc); - break; - case Access::READ_ONLY: + } + else + { fs->open(path, std::ios_base::in); - break; } VERIFY(fs->good(), "[JSON] Failed opening a file '" + path + "'"); return fs; diff --git a/src/Iteration.cpp b/src/Iteration.cpp index 54d23589f7..26ab93940e 100644 --- a/src/Iteration.cpp +++ b/src/Iteration.cpp @@ -306,18 +306,15 @@ void Iteration::flushVariableBased( void Iteration::flush(internal::FlushParams const &flushParams) { - switch (IOHandler()->m_frontendAccess) + if (access::readOnly(IOHandler()->m_frontendAccess)) { - case Access::READ_ONLY: { for (auto &m : meshes) m.second.flush(m.first, flushParams); for (auto &species : particles) species.second.flush(species.first, flushParams); - break; } - case Access::READ_WRITE: - case Access::CREATE: - case Access::APPEND: { + else + { /* Find the root point [Series] of this file, * meshesPath and particlesPath are stored there */ Series s = retrieveSeries(); @@ -355,8 +352,6 @@ void Iteration::flush(internal::FlushParams const &flushParams) } flushAttributes(flushParams); - break; - } } } @@ -686,8 +681,10 @@ auto Iteration::beginStep(bool reread) -> BeginStepStatus } auto Iteration::beginStep( - std::optional thisObject, Series &series, bool reread) - -> BeginStepStatus + std::optional thisObject, + Series &series, + bool reread, + std::set const &ignoreIterations) -> BeginStepStatus { BeginStepStatus res; using IE = IterationEncoding; @@ -743,36 +740,26 @@ auto Iteration::beginStep( if (reread && status != AdvanceStatus::RANDOMACCESS && (series.iterationEncoding() == IE::groupBased || series.iterationEncoding() == IE::variableBased) && - (IOHandl->m_frontendAccess == Access::READ_ONLY || - IOHandl->m_frontendAccess == Access::READ_WRITE)) + access::read(series.IOHandler()->m_frontendAccess)) { - switch (IOHandl->m_frontendAccess) + bool previous = series.iterations.written(); + series.iterations.written() = false; + auto oldStatus = IOHandl->m_seriesStatus; + IOHandl->m_seriesStatus = internal::SeriesStatus::Parsing; + try { - case Access::READ_ONLY: - case Access::READ_WRITE: { - bool previous = series.iterations.written(); - series.iterations.written() = false; - auto oldStatus = IOHandl->m_seriesStatus; - IOHandl->m_seriesStatus = internal::SeriesStatus::Parsing; - try - { - res.iterationsInOpenedStep = series.readGorVBased( - /* do_always_throw_errors = */ true, /* init = */ false); - } - catch (...) - { - IOHandl->m_seriesStatus = oldStatus; - throw; - } - IOHandl->m_seriesStatus = oldStatus; - series.iterations.written() = previous; - break; + res.iterationsInOpenedStep = series.readGorVBased( + /* do_always_throw_errors = */ true, + /* init = */ false, + ignoreIterations); } - case Access::CREATE: - case Access::APPEND: - // no re-reading necessary - break; + catch (...) + { + IOHandl->m_seriesStatus = oldStatus; + throw; } + IOHandl->m_seriesStatus = oldStatus; + series.iterations.written() = previous; } res.stepStatus = status; @@ -871,10 +858,8 @@ void Iteration::linkHierarchy(Writable &w) void Iteration::runDeferredParseAccess() { - switch (IOHandler()->m_frontendAccess) + if (access::read(IOHandler()->m_frontendAccess)) { - case Access::READ_ONLY: - case Access::READ_WRITE: { auto &it = get(); if (!it.m_deferredParseAccess.has_value()) { @@ -906,12 +891,6 @@ void Iteration::runDeferredParseAccess() // reset this thing it.m_deferredParseAccess = std::optional(); IOHandler()->m_seriesStatus = oldStatus; - break; - } - case Access::CREATE: - case Access::APPEND: - // no parsing in those modes - return; } } diff --git a/src/Mesh.cpp b/src/Mesh.cpp index 5b91dd26dc..7454ff005e 100644 --- a/src/Mesh.cpp +++ b/src/Mesh.cpp @@ -217,16 +217,13 @@ template Mesh &Mesh::setTimeOffset(float); void Mesh::flush_impl( std::string const &name, internal::FlushParams const &flushParams) { - switch (IOHandler()->m_frontendAccess) + if (access::readOnly(IOHandler()->m_frontendAccess)) { - case Access::READ_ONLY: { for (auto &comp : *this) comp.second.flush(comp.first, flushParams); - break; } - case Access::READ_WRITE: - case Access::CREATE: - case Access::APPEND: { + else + { if (!written()) { if (scalar()) @@ -268,8 +265,6 @@ void Mesh::flush_impl( } } flushAttributes(flushParams); - break; - } } } diff --git a/src/ParticleSpecies.cpp b/src/ParticleSpecies.cpp index 998c6c6317..eb502ae371 100644 --- a/src/ParticleSpecies.cpp +++ b/src/ParticleSpecies.cpp @@ -161,18 +161,15 @@ namespace void ParticleSpecies::flush( std::string const &path, internal::FlushParams const &flushParams) { - switch (IOHandler()->m_frontendAccess) + if (access::readOnly(IOHandler()->m_frontendAccess)) { - case Access::READ_ONLY: { for (auto &record : *this) record.second.flush(record.first, flushParams); for (auto &patch : particlePatches) patch.second.flush(patch.first, flushParams); - break; } - case Access::READ_WRITE: - case Access::CREATE: - case Access::APPEND: { + else + { auto it = find("position"); if (it != end()) it->second.setUnitDimension({{UnitDimension::L, 1}}); @@ -191,8 +188,6 @@ void ParticleSpecies::flush( for (auto &patch : particlePatches) patch.second.flush(patch.first, flushParams); } - break; - } } } diff --git a/src/ReadIterations.cpp b/src/ReadIterations.cpp index 679146e896..fdc64c7845 100644 --- a/src/ReadIterations.cpp +++ b/src/ReadIterations.cpp @@ -20,6 +20,7 @@ */ #include "openPMD/ReadIterations.hpp" +#include "openPMD/Error.hpp" #include "openPMD/Series.hpp" @@ -28,11 +29,94 @@ namespace openPMD { -SeriesIterator::SeriesIterator() : m_series() -{} +namespace +{ + bool reread(std::optional parsePreference) + { + if (parsePreference.has_value()) + { + using PP = Parameter::ParsePreference; + + switch (parsePreference.value()) + { + case PP::PerStep: + return true; + case PP::UpFront: + return false; + } + return false; + } + else + { + throw error::Internal( + "Group/Variable-based encoding: Parse preference must be set."); + } + } +} // namespace + +SeriesIterator::SeriesIterator() = default; + +void SeriesIterator::initSeriesInLinearReadMode() +{ + auto &data = *m_data; + auto &series = *data.series; + series.IOHandler()->m_seriesStatus = internal::SeriesStatus::Parsing; + try + { + switch (series.iterationEncoding()) + { + using IE = IterationEncoding; + case IE::fileBased: + series.readFileBased(); + break; + case IE::groupBased: + case IE::variableBased: { + Parameter fOpen; + fOpen.name = series.get().m_name; + fOpen.encoding = series.iterationEncoding(); + series.IOHandler()->enqueue(IOTask(&series, fOpen)); + series.IOHandler()->flush(internal::defaultFlushParams); + using PP = Parameter::ParsePreference; + switch (*fOpen.out_parsePreference) + { + case PP::PerStep: + series.advance(AdvanceMode::BEGINSTEP); + series.readGorVBased( + /* do_always_throw_errors = */ false, /* init = */ true); + break; + case PP::UpFront: + series.readGorVBased( + /* do_always_throw_errors = */ false, /* init = */ true); + series.advance(AdvanceMode::BEGINSTEP); + break; + } + data.parsePreference = *fOpen.out_parsePreference; + break; + } + } + } + catch (...) + { + series.IOHandler()->m_seriesStatus = internal::SeriesStatus::Default; + throw; + } + series.IOHandler()->m_seriesStatus = internal::SeriesStatus::Default; +} -SeriesIterator::SeriesIterator(Series series) : m_series(std::move(series)) +SeriesIterator::SeriesIterator( + Series series_in, std::optional parsePreference) + : m_data{std::make_shared()} { + auto &data = *m_data; + data.parsePreference = std::move(parsePreference); + data.series = std::move(series_in); + auto &series = data.series.value(); + if (series.IOHandler()->m_frontendAccess == Access::READ_LINEAR && + series.iterations.empty()) + { + initSeriesInLinearReadMode(); + } + auto it = series.get().iterations.begin(); if (it == series.get().iterations.end()) { @@ -74,9 +158,9 @@ SeriesIterator::SeriesIterator(Series series) : m_series(std::move(series)) openIteration(series.iterations.begin()->second); status = it->second.beginStep(/* reread = */ true); - for (auto const &pair : m_series.value().iterations) + for (auto const &pair : series.iterations) { - m_iterationsInCurrentStep.push_back(pair.first); + data.iterationsInCurrentStep.push_back(pair.first); } break; case IterationEncoding::groupBased: @@ -88,8 +172,8 @@ SeriesIterator::SeriesIterator(Series series) : m_series(std::move(series)) */ Iteration::BeginStepStatus::AvailableIterations_t availableIterations; - std::tie(status, availableIterations) = - it->second.beginStep(/* reread = */ true); + std::tie(status, availableIterations) = it->second.beginStep( + /* reread = */ reread(data.parsePreference)); /* * In random-access mode, do not use the information read in the * `snapshot` attribute, instead simply go through iterations @@ -99,11 +183,11 @@ SeriesIterator::SeriesIterator(Series series) : m_series(std::move(series)) if (availableIterations.has_value() && status != AdvanceStatus::RANDOMACCESS) { - m_iterationsInCurrentStep = availableIterations.value(); - if (!m_iterationsInCurrentStep.empty()) + data.iterationsInCurrentStep = availableIterations.value(); + if (!data.iterationsInCurrentStep.empty()) { - openIteration( - series.iterations.at(m_iterationsInCurrentStep.at(0))); + openIteration(series.iterations.at( + data.iterationsInCurrentStep.at(0))); } } else if (!series.iterations.empty()) @@ -112,13 +196,14 @@ SeriesIterator::SeriesIterator(Series series) : m_series(std::move(series)) * Fallback implementation: Assume that each step corresponds * with an iteration in ascending order. */ - m_iterationsInCurrentStep = {series.iterations.begin()->first}; + data.iterationsInCurrentStep = { + series.iterations.begin()->first}; openIteration(series.iterations.begin()->second); } else { // this is a no-op, but let's keep it explicit - m_iterationsInCurrentStep = {}; + data.iterationsInCurrentStep = {}; } break; @@ -141,20 +226,21 @@ SeriesIterator::SeriesIterator(Series series) : m_series(std::move(series)) std::optional SeriesIterator::nextIterationInStep() { + auto &data = *m_data; using ret_t = std::optional; - if (m_iterationsInCurrentStep.empty()) + if (data.iterationsInCurrentStep.empty()) { return ret_t{}; } - m_iterationsInCurrentStep.pop_front(); - if (m_iterationsInCurrentStep.empty()) + data.iterationsInCurrentStep.pop_front(); + if (data.iterationsInCurrentStep.empty()) { return ret_t{}; } - auto oldIterationIndex = m_currentIteration; - m_currentIteration = *m_iterationsInCurrentStep.begin(); - auto &series = m_series.value(); + auto oldIterationIndex = data.currentIteration; + data.currentIteration = *data.iterationsInCurrentStep.begin(); + auto &series = data.series.value(); switch (series.iterationEncoding()) { @@ -171,11 +257,11 @@ std::optional SeriesIterator::nextIterationInStep() try { - series.iterations[m_currentIteration].open(); + series.iterations[data.currentIteration].open(); } catch (error::ReadError const &err) { - std::cerr << "Cannot read iteration '" << m_currentIteration + std::cerr << "Cannot read iteration '" << data.currentIteration << "' and will skip it due to read error:\n" << err.what() << std::endl; return nextIterationInStep(); @@ -189,12 +275,12 @@ std::optional SeriesIterator::nextIterationInStep() /* * Errors in here might appear due to deferred iteration parsing. */ - series.iterations[m_currentIteration].open(); + series.iterations[data.currentIteration].open(); /* * Errors in here might appear due to reparsing after opening a * new step. */ - series.iterations[m_currentIteration].beginStep( + series.iterations[data.currentIteration].beginStep( /* reread = */ true); } catch (error::ReadError const &err) @@ -212,28 +298,32 @@ std::optional SeriesIterator::nextIterationInStep() std::optional SeriesIterator::nextStep(size_t recursion_depth) { + auto &data = *m_data; // since we are in group-based iteration layout, it does not // matter which iteration we begin a step upon AdvanceStatus status{}; Iteration::BeginStepStatus::AvailableIterations_t availableIterations; try { - std::tie(status, availableIterations) = - Iteration::beginStep({}, *m_series, /* reread = */ true); + std::tie(status, availableIterations) = Iteration::beginStep( + {}, + *data.series, + /* reread = */ reread(data.parsePreference), + data.ignoreIterations); } catch (error::ReadError const &err) { std::cerr << "[SeriesIterator] Cannot read iteration due to error " "below, will skip it.\n" << err.what() << std::endl; - m_series->advance(AdvanceMode::ENDSTEP); + data.series->advance(AdvanceMode::ENDSTEP); return nextStep(recursion_depth + 1); } if (availableIterations.has_value() && status != AdvanceStatus::RANDOMACCESS) { - m_iterationsInCurrentStep = availableIterations.value(); + data.iterationsInCurrentStep = availableIterations.value(); } else { @@ -241,8 +331,8 @@ std::optional SeriesIterator::nextStep(size_t recursion_depth) * Fallback implementation: Assume that each step corresponds * with an iteration in ascending order. */ - auto &series = m_series.value(); - auto it = series.iterations.find(m_currentIteration); + auto &series = data.series.value(); + auto it = series.iterations.find(data.currentIteration); auto itEnd = series.iterations.end(); if (it == itEnd) { @@ -261,8 +351,7 @@ std::optional SeriesIterator::nextStep(size_t recursion_depth) * will skip such iterations and hope to find something in a * later IO step. No need to finish right now. */ - m_iterationsInCurrentStep = {}; - m_series->advance(AdvanceMode::ENDSTEP); + data.iterationsInCurrentStep = {}; } } else @@ -289,13 +378,12 @@ std::optional SeriesIterator::nextStep(size_t recursion_depth) * hope to find something in a later IO step. No need to * finish right now. */ - m_iterationsInCurrentStep = {}; - m_series->advance(AdvanceMode::ENDSTEP); + data.iterationsInCurrentStep = {}; } } else { - m_iterationsInCurrentStep = {it->first}; + data.iterationsInCurrentStep = {it->first}; } } } @@ -311,15 +399,16 @@ std::optional SeriesIterator::nextStep(size_t recursion_depth) std::optional SeriesIterator::loopBody() { - Series &series = m_series.value(); + auto &data = *m_data; + Series &series = data.series.value(); auto &iterations = series.iterations; /* * Might not be present because parsing might have failed in previous step */ - if (iterations.contains(m_currentIteration)) + if (iterations.contains(data.currentIteration)) { - auto ¤tIteration = iterations[m_currentIteration]; + auto ¤tIteration = iterations[data.currentIteration]; if (!currentIteration.closed()) { currentIteration.close(); @@ -327,7 +416,7 @@ std::optional SeriesIterator::loopBody() } auto guardReturn = - [&iterations]( + [&series, &iterations]( auto const &option) -> std::optional { if (!option.has_value() || *option.value() == end()) { @@ -336,33 +425,49 @@ std::optional SeriesIterator::loopBody() auto currentIterationIndex = option.value()->peekCurrentIteration(); if (!currentIterationIndex.has_value()) { + series.advance(AdvanceMode::ENDSTEP); return std::nullopt; } - auto iteration = iterations.at(currentIterationIndex.value()); - if (iteration.get().m_closed != internal::CloseStatus::ClosedInBackend) + // If we had the iteration already, then it's either not there at all + // (because old iterations are deleted in linear access mode), + // or it's still there but closed in random-access mode + auto index = currentIterationIndex.value(); + + if (iterations.contains(index)) { - try + auto iteration = iterations.at(index); + if (iteration.get().m_closed != + internal::CloseStatus::ClosedInBackend) { - iteration.open(); - option.value()->setCurrentIteration(); - return option; + try + { + iterations.at(index).open(); + option.value()->setCurrentIteration(); + return option; + } + catch (error::ReadError const &err) + { + std::cerr << "Cannot read iteration '" + << currentIterationIndex.value() + << "' and will skip it due to read error:\n" + << err.what() << std::endl; + option.value()->deactivateDeadIteration( + currentIterationIndex.value()); + return std::nullopt; + } } - catch (error::ReadError const &err) + else { - std::cerr << "Cannot read iteration '" - << currentIterationIndex.value() - << "' and will skip it due to read error:\n" - << err.what() << std::endl; - option.value()->deactivateDeadIteration( - currentIterationIndex.value()); - return std::nullopt; + // we had this iteration already, skip it + iteration.endStep(); + return std::nullopt; // empty, go into next iteration } } else { // we had this iteration already, skip it - iteration.endStep(); - return std::nullopt; // empty, go into next iteration + series.advance(AdvanceMode::ENDSTEP); + return std::nullopt; } }; @@ -390,35 +495,38 @@ std::optional SeriesIterator::loopBody() void SeriesIterator::deactivateDeadIteration(iteration_index_t index) { - switch (m_series->iterationEncoding()) + auto &data = *m_data; + switch (data.series->iterationEncoding()) { case IterationEncoding::fileBased: { Parameter param; - m_series->IOHandler()->enqueue( - IOTask(&m_series->iterations[index], std::move(param))); - m_series->IOHandler()->flush({FlushLevel::UserFlush}); + data.series->IOHandler()->enqueue( + IOTask(&data.series->iterations[index], std::move(param))); + data.series->IOHandler()->flush({FlushLevel::UserFlush}); } break; case IterationEncoding::variableBased: case IterationEncoding::groupBased: { Parameter param; param.mode = AdvanceMode::ENDSTEP; - m_series->IOHandler()->enqueue( - IOTask(&m_series->iterations[index], std::move(param))); - m_series->IOHandler()->flush({FlushLevel::UserFlush}); + data.series->IOHandler()->enqueue( + IOTask(&data.series->iterations[index], std::move(param))); + data.series->IOHandler()->flush({FlushLevel::UserFlush}); } break; } - m_series->iterations.container().erase(index); + data.series->iterations.container().erase(index); } SeriesIterator &SeriesIterator::operator++() { - if (!m_series.has_value()) + auto &data = *m_data; + if (!data.series.has_value()) { *this = end(); return *this; } + auto oldIterationIndex = data.currentIteration; std::optional res; /* * loopBody() might return an empty option to indicate a skipped iteration. @@ -435,21 +543,45 @@ SeriesIterator &SeriesIterator::operator++() auto resvalue = res.value(); if (*resvalue != end()) { - (**resvalue).setStepStatus(StepStatus::DuringStep); + auto &series = data.series.value(); + auto index = data.currentIteration; + auto &iteration = series.iterations[index]; + iteration.setStepStatus(StepStatus::DuringStep); + + if (series.IOHandler()->m_frontendAccess == Access::READ_LINEAR) + { + /* + * Linear read mode: Any data outside the current iteration is + * inaccessible. Delete the iteration. This has two effects: + * + * 1) Avoid confusion. + * 2) Avoid memory buildup in long-running workflows with many + * iterations. + * + * @todo Also delete data in the backends upon doing this. + */ + auto &container = series.iterations.container(); + container.erase(oldIterationIndex); + data.ignoreIterations.emplace(oldIterationIndex); + } } return *resvalue; } IndexedIteration SeriesIterator::operator*() { + auto &data = *m_data; return IndexedIteration( - m_series.value().iterations[m_currentIteration], m_currentIteration); + data.series.value().iterations[data.currentIteration], + data.currentIteration); } bool SeriesIterator::operator==(SeriesIterator const &other) const { - return this->m_currentIteration == other.m_currentIteration && - this->m_series.has_value() == other.m_series.has_value(); + return (this->m_data.operator bool() && other.m_data.operator bool() && + (this->m_data->currentIteration == + other.m_data->currentIteration)) || + (!this->m_data.operator bool() && !other.m_data.operator bool()); } bool SeriesIterator::operator!=(SeriesIterator const &other) const @@ -462,12 +594,26 @@ SeriesIterator SeriesIterator::end() return SeriesIterator{}; } -ReadIterations::ReadIterations(Series series) : m_series(std::move(series)) -{} +ReadIterations::ReadIterations( + Series series, + Access access, + std::optional parsePreference) + : m_series(std::move(series)), m_parsePreference(std::move(parsePreference)) +{ + if (access == Access::READ_LINEAR) + { + // Open the iterator now already, so that metadata may already be read + alreadyOpened = iterator_t{m_series, m_parsePreference}; + } +} ReadIterations::iterator_t ReadIterations::begin() { - return iterator_t{m_series}; + if (!alreadyOpened.has_value()) + { + alreadyOpened = iterator_t{m_series, m_parsePreference}; + } + return alreadyOpened.value(); } ReadIterations::iterator_t ReadIterations::end() diff --git a/src/Record.cpp b/src/Record.cpp index 485e817e14..dc6949efdb 100644 --- a/src/Record.cpp +++ b/src/Record.cpp @@ -46,16 +46,13 @@ Record &Record::setUnitDimension(std::map const &udim) void Record::flush_impl( std::string const &name, internal::FlushParams const &flushParams) { - switch (IOHandler()->m_frontendAccess) + if (access::readOnly(IOHandler()->m_frontendAccess)) { - case Access::READ_ONLY: { for (auto &comp : *this) comp.second.flush(comp.first, flushParams); - break; } - case Access::READ_WRITE: - case Access::CREATE: - case Access::APPEND: { + else + { if (!written()) { if (scalar()) @@ -99,8 +96,6 @@ void Record::flush_impl( } flushAttributes(flushParams); - break; - } } } diff --git a/src/RecordComponent.cpp b/src/RecordComponent.cpp index b7679ca598..bf46587ee2 100644 --- a/src/RecordComponent.cpp +++ b/src/RecordComponent.cpp @@ -200,18 +200,16 @@ void RecordComponent::flush( rc.m_name = name; return; } - switch (IOHandler()->m_frontendAccess) + if (access::readOnly(IOHandler()->m_frontendAccess)) { - case Access::READ_ONLY: while (!rc.m_chunks.empty()) { IOHandler()->enqueue(rc.m_chunks.front()); rc.m_chunks.pop(); } - break; - case Access::READ_WRITE: - case Access::CREATE: - case Access::APPEND: { + } + else + { /* * This catches when a user forgets to use resetDataset. */ @@ -277,8 +275,6 @@ void RecordComponent::flush( } flushAttributes(flushParams); - break; - } } } diff --git a/src/Series.cpp b/src/Series.cpp index 5b17c24642..6fe3f53d02 100644 --- a/src/Series.cpp +++ b/src/Series.cpp @@ -334,10 +334,17 @@ Series &Series::setIterationFormat(std::string const &i) if (iterationEncoding() == IterationEncoding::groupBased || iterationEncoding() == IterationEncoding::variableBased) - if (basePath() != i && (openPMD() == "1.0.1" || openPMD() == "1.0.0")) + { + if (!containsAttribute("basePath")) + { + setBasePath(i); + } + else if ( + basePath() != i && (openPMD() == "1.0.1" || openPMD() == "1.0.0")) throw std::invalid_argument( "iterationFormat must not differ from basePath " + basePath() + " for group- or variableBased data"); + } setAttribute("iterationFormat", i); return *this; @@ -576,6 +583,12 @@ Given file pattern: ')END" switch (IOHandler()->m_frontendAccess) { + case Access::READ_LINEAR: + // don't parse anything here + // no data accessible before opening the first step + // setIterationEncoding(input->iterationEncoding); + series.m_iterationEncoding = input->iterationEncoding; + break; case Access::READ_ONLY: case Access::READ_WRITE: { /* Allow creation of values in Containers and setting of Attributes @@ -736,6 +749,7 @@ void Series::flushFileBased( switch (IOHandler()->m_frontendAccess) { case Access::READ_ONLY: + case Access::READ_LINEAR: for (auto it = begin; it != end; ++it) { // Phase 1 @@ -845,9 +859,8 @@ void Series::flushGorVBased( bool flushIOHandler) { auto &series = get(); - switch (IOHandler()->m_frontendAccess) + if (access::readOnly(IOHandler()->m_frontendAccess)) { - case Access::READ_ONLY: for (auto it = begin; it != end; ++it) { // Phase 1 @@ -880,10 +893,9 @@ void Series::flushGorVBased( IOHandler()->flush(flushParams); } } - break; - case Access::READ_WRITE: - case Access::CREATE: - case Access::APPEND: { + } + else + { if (!written()) { if (IOHandler()->m_frontendAccess == Access::APPEND) @@ -957,8 +969,6 @@ void Series::flushGorVBased( { IOHandler()->flush(flushParams); } - break; - } } } @@ -1020,7 +1030,7 @@ void Series::readFileBased() /* Frontend access type might change during Series::read() to allow * parameter modification. Backend access type stays unchanged for the * lifetime of a Series. */ - if (IOHandler()->m_backendAccess == Access::READ_ONLY) + if (access::readOnly(IOHandler()->m_backendAccess)) throw error::ReadError( error::AffectedObject::File, error::Reason::Inaccessible, @@ -1285,7 +1295,10 @@ namespace } } // namespace -auto Series::readGorVBased(bool do_always_throw_errors, bool do_init) +auto Series::readGorVBased( + bool do_always_throw_errors, + bool do_init, + std::set const &ignoreIterations) -> std::optional> { auto &series = get(); @@ -1294,6 +1307,7 @@ auto Series::readGorVBased(bool do_always_throw_errors, bool do_init) fOpen.encoding = iterationEncoding(); IOHandler()->enqueue(IOTask(this, fOpen)); IOHandler()->flush(internal::defaultFlushParams); + series.m_parsePreference = *fOpen.out_parsePreference; if (do_init) { @@ -1311,7 +1325,28 @@ auto Series::readGorVBased(bool do_always_throw_errors, bool do_init) if (encoding == "groupBased") series.m_iterationEncoding = IterationEncoding::groupBased; else if (encoding == "variableBased") + { series.m_iterationEncoding = IterationEncoding::variableBased; + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) + { + std::cerr << R"( +The opened Series uses variable-based encoding, but is being accessed by +READ_ONLY mode which operates in random-access manner. +Random-access is (currently) unsupported by variable-based encoding +and some iterations may not be found by this access mode. +Consider using Access::READ_LINEAR and Series::readIterations().)" + << std::endl; + } + else if (IOHandler()->m_frontendAccess == Access::READ_WRITE) + { + throw error::WrongAPIUsage(R"( +The opened Series uses variable-based encoding, but is being accessed by +READ_WRITE mode which does not (yet) support variable-based encoding. +Please choose either Access::READ_LINEAR for reading or Access::APPEND for +creating new iterations. + )"); + } + } else if (encoding == "fileBased") { series.m_iterationEncoding = IterationEncoding::fileBased; @@ -1446,9 +1481,6 @@ auto Series::readGorVBased(bool do_always_throw_errors, bool do_init) return std::nullopt; }; - /* - * @todo in BP5, a BeginStep() might be necessary before this - */ auto currentSteps = currentSnapshot(); switch (iterationEncoding()) @@ -1462,6 +1494,10 @@ auto Series::readGorVBased(bool do_always_throw_errors, bool do_init) for (auto const &it : *pList.paths) { IterationIndex_t index = std::stoull(it); + if (ignoreIterations.find(index) != ignoreIterations.end()) + { + continue; + } if (auto err = internal::withRWAccess( IOHandler()->m_seriesStatus, [&]() { @@ -1491,10 +1527,20 @@ auto Series::readGorVBased(bool do_always_throw_errors, bool do_init) } } case IterationEncoding::variableBased: { - std::deque res = {0}; + std::deque res{}; if (currentSteps.has_value() && !currentSteps.value().empty()) { - res = {currentSteps.value().begin(), currentSteps.value().end()}; + for (auto index : currentSteps.value()) + { + if (ignoreIterations.find(index) == ignoreIterations.end()) + { + res.push_back(index); + } + } + } + else + { + res = {0}; } for (auto it : res) { @@ -1564,7 +1610,22 @@ void Series::readBase() IOHandler()->flush(internal::defaultFlushParams); if (auto val = Attribute(*aRead.resource).getOptional(); val.has_value()) + { + if ( // might have been previously initialized in READ_LINEAR access + // mode + containsAttribute("basePath") && + getAttribute("basePath").get() != val.value()) + { + throw error::ReadError( + error::AffectedObject::Attribute, + error::Reason::UnexpectedContent, + {}, + "Value for 'basePath' ('" + val.value() + + "') does not match expected value '" + + getAttribute("basePath").get() + "'."); + } setAttribute("basePath", val.value()); + } else throw error::ReadError( error::AffectedObject::Attribute, @@ -2265,7 +2326,8 @@ ReadIterations Series::readIterations() { // Use private constructor instead of copy constructor to avoid // object slicing - return {this->m_series}; + return { + this->m_series, IOHandler()->m_frontendAccess, get().m_parsePreference}; } WriteIterations Series::writeIterations() diff --git a/src/backend/PatchRecordComponent.cpp b/src/backend/PatchRecordComponent.cpp index 3db0545d40..e1477ef7bd 100644 --- a/src/backend/PatchRecordComponent.cpp +++ b/src/backend/PatchRecordComponent.cpp @@ -84,19 +84,16 @@ void PatchRecordComponent::flush( std::string const &name, internal::FlushParams const &flushParams) { auto &rc = get(); - switch (IOHandler()->m_frontendAccess) + if (access::readOnly(IOHandler()->m_frontendAccess)) { - case Access::READ_ONLY: { while (!rc.m_chunks.empty()) { IOHandler()->enqueue(rc.m_chunks.front()); rc.m_chunks.pop(); } - break; } - case Access::READ_WRITE: - case Access::CREATE: - case Access::APPEND: { + else + { if (!written()) { Parameter dCreate; @@ -114,8 +111,6 @@ void PatchRecordComponent::flush( } flushAttributes(flushParams); - break; - } } } diff --git a/src/backend/Writable.cpp b/src/backend/Writable.cpp index 32d4cd9816..f886e94046 100644 --- a/src/backend/Writable.cpp +++ b/src/backend/Writable.cpp @@ -27,6 +27,21 @@ namespace openPMD Writable::Writable(internal::AttributableData *a) : attributable{a} {} +Writable::~Writable() +{ + if (!IOHandler || !IOHandler->has_value()) + { + return; + } + /* + * Enqueueing a pointer to this object, which is now being deleted. + * The DEREGISTER task must not dereference the pointer, but only use it to + * remove references to this object from internal data structures. + */ + IOHandler->value()->enqueue( + IOTask(this, Parameter())); +} + void Writable::seriesFlush(std::string backendConfig) { seriesFlush({FlushLevel::UserFlush, std::move(backendConfig)}); diff --git a/src/binding/python/Access.cpp b/src/binding/python/Access.cpp index 338f42db25..8fcdcb73c7 100644 --- a/src/binding/python/Access.cpp +++ b/src/binding/python/Access.cpp @@ -29,7 +29,67 @@ using namespace openPMD; void init_Access(py::module &m) { py::enum_(m, "Access") - .value("read_only", Access::READ_ONLY) - .value("read_write", Access::READ_WRITE) - .value("create", Access::CREATE); + .value( + "read_only", + Access::READ_ONLY, + R"(\ +Open Series as read-only, fails if Series is not found. +When to use READ_ONLY or READ_LINEAR: + +* When intending to use Series.read_iterations() +(i.e. step-by-step reading of iterations, e.g. in streaming), +then Access.read_linear is preferred and always supported. +Data is parsed inside Series.read_iterations(), no data is available +right after opening the Series. +* Otherwise (i.e. for random-access workflows), Access.read_only +is required, but works only in backends that support random access. +Data is parsed and available right after opening the Series. + +In both modes, parsing of iterations can be deferred with the JSON/TOML +option `defer_iteration_parsing`. + +Detailed rules: + +1. In backends that have no notion of IO steps (all except ADIOS2), +Access.read_only can always be used. +2. In backends that can be accessed either in random-access or +step-by-step, the chosen access mode decides which approach is used. +Examples are the BP4 and BP5 engines of ADIOS2. +3. In streaming backends, random-access is not possible. +When using such a backend, the access mode will be coerced +automatically to Access.read_linear. Use of Series.read_iterations() +is mandatory for access. +4. Reading a variable-based Series is only fully supported with +Access.read_linear. +If using Access.read_only, the dataset will be considered to only +have one single step. +If the dataset only has one single step, this is guaranteed to work +as expected. Otherwise, it is undefined which step's data is returned.)") + .value( + "read_random_access", + Access::READ_RANDOM_ACCESS, + "more explicit alias for read_only") + .value( + "read_write", + Access::READ_WRITE, + "Open existing Series as writable. Read mode corresponds with " + "Access::READ_RANDOM_ACCESS.") + .value( + "create", + Access::CREATE, + "create new series and truncate existing (files)") + .value( + "append", + Access::APPEND, + "write new iterations to an existing series without reading") + .value( + "read_linear", + Access::READ_LINEAR, + R"(\ + Open Series as read-only, fails if Series is not found. +This access mode requires use of Series.read_iterations(). +Global attributes are available directly after calling +Series.read_iterations(), Iterations and all their corresponding data +become available by use of the returned Iterator, e.g. in a foreach loop. +See Access.read_only for when to use this.)"); } diff --git a/src/binding/python/openpmd_api/pipe/__main__.py b/src/binding/python/openpmd_api/pipe/__main__.py old mode 100755 new mode 100644 index d7f0590567..436bd233f1 --- a/src/binding/python/openpmd_api/pipe/__main__.py +++ b/src/binding/python/openpmd_api/pipe/__main__.py @@ -204,7 +204,7 @@ def run(self): if not HAVE_MPI or (args.mpi is None and self.comm.size == 1): print("Opening data source") sys.stdout.flush() - inseries = io.Series(self.infile, io.Access.read_only, + inseries = io.Series(self.infile, io.Access.read_linear, self.inconfig) print("Opening data sink") sys.stdout.flush() @@ -215,7 +215,7 @@ def run(self): else: print("Opening data source on rank {}.".format(self.comm.rank)) sys.stdout.flush() - inseries = io.Series(self.infile, io.Access.read_only, self.comm, + inseries = io.Series(self.infile, io.Access.read_linear, self.comm, self.inconfig) print("Opening data sink on rank {}.".format(self.comm.rank)) sys.stdout.flush() diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index 44039adb66..a334c4b241 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -9,6 +9,11 @@ #if openPMD_HAVE_MPI #include +#if openPMD_HAVE_ADIOS2 +#include +#define HAS_ADIOS_2_8 (ADIOS2_VERSION_MAJOR * 100 + ADIOS2_VERSION_MINOR >= 208) +#endif + #include #include #include @@ -1128,9 +1133,13 @@ void adios2_streaming(bool variableBasedLayout) using namespace std::chrono_literals; std::this_thread::sleep_for(1s); + /* + * READ_LINEAR always works in Streaming, but READ_ONLY must stay + * working at least for groupbased iteration encoding + */ Series readSeries( "../samples/adios2_stream.sst", - Access::READ_ONLY, + variableBasedLayout ? Access::READ_LINEAR : Access::READ_ONLY, // inline TOML R"(defer_iteration_parsing = true)"); @@ -1388,31 +1397,73 @@ TEST_CASE("adios2_ssc", "[parallel][adios2]") adios2_ssc(); } +enum class ParseMode +{ + /* + * Conventional workflow. Just parse the whole thing and yield iterations + * in rising order. + */ + NoSteps, + /* + * The Series is parsed ahead of time upon opening, but it has steps. + * Parsing ahead of time is the conventional workflow to support + * random-access. + * Reading such a Series with the streaming API is only possible if all + * steps are in ascending order, otherwise the openPMD-api has no way of + * associating IO steps with interation indices. + * Reading such a Series with the Streaming API will become possible with + * the Linear read mode to be introduced by #1291. + */ + AheadOfTimeWithoutSnapshot, + /* + * In Linear read mode, a Series is not parsed ahead of time, but + * step-by-step, giving the openPMD-api a way to associate IO steps with + * iterations. No snapshot attribute exists, so the fallback mode is chosen: + * Iterations are returned in ascending order. + * If an IO step returns an iteration whose index is lower than the + * last one, it will be skipped. + * This mode of parsing is not available for the BP4 engine with ADIOS2 + * schema 0, since BP4 does not associate attributes with the step in + * which they were created, making it impossible to separate parsing into + * single steps. + */ + LinearWithoutSnapshot, + /* + * Snapshot attribute exists and dictates the iteration index returned by + * an IO step. Duplicate iterations will be skipped. + */ + WithSnapshot +}; + void append_mode( std::string const &extension, bool variableBased, + ParseMode parseMode, std::string jsonConfig = "{}") { std::string filename = (variableBased ? "../samples/append/append_variablebased." : "../samples/append/append_groupbased.") + extension; + int mpi_size{}, mpi_rank{}; + MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); + MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); MPI_Barrier(MPI_COMM_WORLD); if (auxiliary::directory_exists("../samples/append")) { auxiliary::remove_directory("../samples/append"); } MPI_Barrier(MPI_COMM_WORLD); - std::vector data(10, 0); - auto writeSomeIterations = [&data]( + std::vector data(10, 999); + auto writeSomeIterations = [&data, mpi_size, mpi_rank]( WriteIterations &&writeIterations, std::vector indices) { for (auto index : indices) { auto it = writeIterations[index]; auto dataset = it.meshes["E"]["x"]; - dataset.resetDataset({Datatype::INT, {10}}); - dataset.storeChunk(data, {0}, {10}); + dataset.resetDataset({Datatype::INT, {unsigned(mpi_size), 10}}); + dataset.storeChunk(data, {unsigned(mpi_rank), 0}, {1, 10}); // test that it works without closing too it.close(); } @@ -1430,6 +1481,7 @@ void append_mode( writeSomeIterations( write.writeIterations(), std::vector{0, 1}); } + MPI_Barrier(MPI_COMM_WORLD); { Series write(filename, Access::APPEND, MPI_COMM_WORLD, jsonConfig); if (variableBased) @@ -1448,9 +1500,10 @@ void append_mode( } writeSomeIterations( - write.writeIterations(), std::vector{2, 3}); + write.writeIterations(), std::vector{3, 2}); write.flush(); } + MPI_Barrier(MPI_COMM_WORLD); { using namespace std::chrono_literals; /* @@ -1476,32 +1529,142 @@ void append_mode( } writeSomeIterations( - write.writeIterations(), std::vector{4, 3}); + write.writeIterations(), std::vector{4, 3, 10}); write.flush(); } + MPI_Barrier(MPI_COMM_WORLD); { - Series read(filename, Access::READ_ONLY, MPI_COMM_WORLD); - if (variableBased || extension == "bp5") + Series write(filename, Access::APPEND, MPI_COMM_WORLD, jsonConfig); + if (variableBased) { + write.setIterationEncoding(IterationEncoding::variableBased); + } + if (write.backend() == "MPI_ADIOS1") + { + REQUIRE_THROWS_AS( + write.flush(), error::OperationUnsupportedInBackend); + // destructor will be noisy now + return; + } + + writeSomeIterations( + write.writeIterations(), std::vector{7, 1, 11}); + write.flush(); + } + MPI_Barrier(MPI_COMM_WORLD); + + auto verifyIteration = [mpi_size](auto &&it) { + auto chunk = it.meshes["E"]["x"].template loadChunk( + {0, 0}, {unsigned(mpi_size), 10}); + it.seriesFlush(); + for (size_t i = 0; i < unsigned(mpi_size) * 10; ++i) + { + REQUIRE(chunk.get()[i] == 999); + } + }; + + { + switch (parseMode) + { + case ParseMode::NoSteps: { + Series read(filename, Access::READ_LINEAR, MPI_COMM_WORLD); + unsigned counter = 0; + uint64_t iterationOrder[] = {0, 1, 2, 3, 4, 7, 10, 11}; + for (auto iteration : read.readIterations()) + { + REQUIRE(iteration.iterationIndex == iterationOrder[counter]); + verifyIteration(iteration); + ++counter; + } + REQUIRE(counter == 8); + } + break; + case ParseMode::LinearWithoutSnapshot: { + Series read(filename, Access::READ_LINEAR, MPI_COMM_WORLD); + unsigned counter = 0; + uint64_t iterationOrder[] = {0, 1, 3, 4, 10, 11}; + for (auto iteration : read.readIterations()) + { + REQUIRE(iteration.iterationIndex == iterationOrder[counter]); + verifyIteration(iteration); + ++counter; + } + REQUIRE(counter == 6); + } + break; + case ParseMode::WithSnapshot: { // in variable-based encodings, iterations are not parsed ahead of // time but as they go + Series read(filename, Access::READ_LINEAR, MPI_COMM_WORLD); + unsigned counter = 0; + uint64_t iterationOrder[] = {0, 1, 3, 2, 4, 10, 7, 11}; + for (auto iteration : read.readIterations()) + { + REQUIRE(iteration.iterationIndex == iterationOrder[counter]); + verifyIteration(iteration); + ++counter; + } + REQUIRE(counter == 8); + // Cannot do listSeries here because the Series is already drained + REQUIRE_THROWS_AS(helper::listSeries(read), error::WrongAPIUsage); + } + break; + case ParseMode::AheadOfTimeWithoutSnapshot: { + Series read(filename, Access::READ_LINEAR, MPI_COMM_WORLD); unsigned counter = 0; + uint64_t iterationOrder[] = {0, 1, 2, 3, 4, 7, 10, 11}; + /* + * This one is a bit tricky: + * The BP4 engine has no way of parsing a Series in the old + * ADIOS2 schema step-by-step, since attributes are not + * associated with the step in which they were created. + * As a result, when readIterations() is called, the whole thing + * is parsed immediately ahead-of-time. + * We can then iterate through the iterations and access metadata, + * but since the IO steps don't correspond with the order of + * iterations returned (there is no way to figure out that order), + * we cannot load data in here. + * BP4 in the old ADIOS2 schema only supports either of the + * following: 1) A Series in which the iterations are present in + * ascending order. 2) Or accessing the Series in READ_ONLY mode. + */ for (auto const &iteration : read.readIterations()) { - REQUIRE(iteration.iterationIndex == counter); + REQUIRE(iteration.iterationIndex == iterationOrder[counter]); ++counter; } - REQUIRE(counter == 5); + REQUIRE(counter == 8); + /* + * Roadmap: for now, reading this should work by ignoring the last + * duplicate iteration. + * After merging https://github.com/openPMD/openPMD-api/pull/949, we + * should see both instances when reading. + * Final goal: Read only the last instance. + */ + REQUIRE_THROWS_AS(helper::listSeries(read), error::WrongAPIUsage); } - else + break; + } + } + MPI_Barrier(MPI_COMM_WORLD); + if (!variableBased) + { + Series read(filename, Access::READ_ONLY, MPI_COMM_WORLD); + REQUIRE(read.iterations.size() == 8); + unsigned counter = 0; + uint64_t iterationOrder[] = {0, 1, 2, 3, 4, 7, 10, 11}; + for (auto iteration : read.readIterations()) { - REQUIRE(read.iterations.size() == 5); - helper::listSeries(read); + REQUIRE(iteration.iterationIndex == iterationOrder[counter]); + verifyIteration(iteration); + ++counter; } + REQUIRE(counter == 8); } #if 100000000 * ADIOS2_VERSION_MAJOR + 1000000 * ADIOS2_VERSION_MINOR + \ 10000 * ADIOS2_VERSION_PATCH + 100 * ADIOS2_VERSION_TWEAK >= \ 208002700 + MPI_Barrier(MPI_COMM_WORLD); // AppendAfterSteps has a bug before that version if (extension == "bp5") { @@ -1533,30 +1696,69 @@ void append_mode( write.writeIterations(), std::vector{4, 5}); write.flush(); } + MPI_Barrier(MPI_COMM_WORLD); + { + Series read(filename, Access::READ_LINEAR, MPI_COMM_WORLD); + switch (parseMode) + { + case ParseMode::LinearWithoutSnapshot: { + uint64_t iterationOrder[] = {0, 1, 3, 4, 10}; + unsigned counter = 0; + for (auto iteration : read.readIterations()) + { + REQUIRE( + iteration.iterationIndex == iterationOrder[counter]); + verifyIteration(iteration); + ++counter; + } + REQUIRE(counter == 5); + } + break; + case ParseMode::WithSnapshot: { + // in variable-based encodings, iterations are not parsed ahead + // of time but as they go + unsigned counter = 0; + uint64_t iterationOrder[] = {0, 1, 3, 2, 4, 10, 7, 5}; + for (auto iteration : read.readIterations()) + { + REQUIRE( + iteration.iterationIndex == iterationOrder[counter]); + verifyIteration(iteration); + ++counter; + } + REQUIRE(counter == 8); + } + break; + default: + throw std::runtime_error("Test configured wrong."); + break; + } + } + MPI_Barrier(MPI_COMM_WORLD); + if (!variableBased) { Series read(filename, Access::READ_ONLY, MPI_COMM_WORLD); - // in variable-based encodings, iterations are not parsed ahead of - // time but as they go + uint64_t iterationOrder[] = {0, 1, 2, 3, 4, 5, 7, 10}; unsigned counter = 0; for (auto const &iteration : read.readIterations()) { - REQUIRE(iteration.iterationIndex == counter); + REQUIRE(iteration.iterationIndex == iterationOrder[counter]); ++counter; } - REQUIRE(counter == 6); - helper::listSeries(read); + REQUIRE(counter == 8); + // Cannot do listSeries here because the Series is already + // drained + REQUIRE_THROWS_AS(helper::listSeries(read), error::WrongAPIUsage); } } #endif } -TEST_CASE("append_mode", "[parallel]") +TEST_CASE("append_mode", "[serial]") { for (auto const &t : testedFileExtensions()) { - if (t == "bp" || t == "bp4" || t == "bp5") - { - std::string jsonConfigOld = R"END( + std::string jsonConfigOld = R"END( { "adios2": { @@ -1567,7 +1769,7 @@ TEST_CASE("append_mode", "[parallel]") } } })END"; - std::string jsonConfigNew = R"END( + std::string jsonConfigNew = R"END( { "adios2": { @@ -1578,28 +1780,27 @@ TEST_CASE("append_mode", "[parallel]") } } })END"; + if (t == "bp" || t == "bp4" || t == "bp5") + { /* * Troublesome combination: * 1) ADIOS2 v2.7 * 2) Parallel writer * 3) Append mode - * 4) Writing to a scalar variable * - * 4) is done by schema 2021 which will be phased out, so the tests - * are just deactivated. */ - if (auxiliary::getEnvNum("OPENPMD2_ADIOS2_SCHEMA", 0) != 0) - { - continue; - } - append_mode(t, false, jsonConfigOld); - // append_mode(t, true, jsonConfigOld); - // append_mode(t, false, jsonConfigNew); - // append_mode(t, true, jsonConfigNew); +#if HAS_ADIOS_2_8 + append_mode( + t, false, ParseMode::LinearWithoutSnapshot, jsonConfigOld); + append_mode(t, false, ParseMode::WithSnapshot, jsonConfigNew); + // This test config does not make sense + // append_mode(t, true, ParseMode::WithSnapshot, jsonConfigOld); + append_mode(t, true, ParseMode::WithSnapshot, jsonConfigNew); +#endif } else { - append_mode(t, false); + append_mode(t, false, ParseMode::NoSteps); } } } @@ -1663,4 +1864,4 @@ TEST_CASE("unavailable_backend", "[core][parallel]") } #endif } -#endif +#endif // openPMD_HAVE_ADIOS2 && openPMD_HAVE_MPI diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index b2490c6803..2e20c53676 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -4837,7 +4837,8 @@ this = "should not warn" void bp4_steps( std::string const &file, std::string const &options_write, - std::string const &options_read) + std::string const &options_read, + Access access = Access::READ_ONLY) { { Series writeSeries(file, Access::CREATE, options_write); @@ -4862,7 +4863,7 @@ void bp4_steps( return; } - Series readSeries(file, Access::READ_ONLY, options_read); + Series readSeries(file, access, options_read); size_t last_iteration_index = 0; for (auto iteration : readSeries.readIterations()) @@ -4915,6 +4916,7 @@ TEST_CASE("bp4_steps", "[serial][adios2]") type = "bp4" UseSteps = false )"; + // sing the yes no song bp4_steps("../samples/bp4steps_yes_yes.bp", useSteps, useSteps); bp4_steps("../samples/bp4steps_no_yes.bp", dontUseSteps, useSteps); @@ -4923,6 +4925,17 @@ TEST_CASE("bp4_steps", "[serial][adios2]") bp4_steps("../samples/nullcore.bp", nullcore, ""); bp4_steps("../samples/bp4steps_default.bp", "{}", "{}"); + // bp4_steps( + // "../samples/newlayout_bp4steps_yes_yes.bp", + // useSteps, + // useSteps, + // Access::READ_LINEAR); + // bp4_steps( + // "../samples/newlayout_bp4steps_yes_no.bp", + // useSteps, + // dontUseSteps, + // Access::READ_LINEAR); + /* * Do this whole thing once more, but this time use the new attribute * layout. @@ -4957,6 +4970,17 @@ TEST_CASE("bp4_steps", "[serial][adios2]") "../samples/newlayout_bp4steps_no_yes.bp", dontUseSteps, useSteps); bp4_steps( "../samples/newlayout_bp4steps_no_no.bp", dontUseSteps, dontUseSteps); + + bp4_steps( + "../samples/newlayout_bp4steps_yes_yes.bp", + useSteps, + useSteps, + Access::READ_LINEAR); + bp4_steps( + "../samples/newlayout_bp4steps_yes_no.bp", + useSteps, + dontUseSteps, + Access::READ_LINEAR); } #endif @@ -5358,6 +5382,7 @@ void variableBasedSeries(std::string const &file) constexpr Extent::value_type extent = 1000; { Series writeSeries(file, Access::CREATE, selectADIOS2); + writeSeries.setAttribute("some_global", "attribute"); writeSeries.setIterationEncoding(IterationEncoding::variableBased); REQUIRE( writeSeries.iterationEncoding() == @@ -5406,10 +5431,19 @@ void variableBasedSeries(std::string const &file) auto testRead = [&file, &extent, &selectADIOS2]( std::string const &jsonConfig) { + /* + * Need linear read mode to access more than a single iteration in + * variable-based iteration encoding. + */ Series readSeries( - file, Access::READ_ONLY, json::merge(selectADIOS2, jsonConfig)); + file, Access::READ_LINEAR, json::merge(selectADIOS2, jsonConfig)); size_t last_iteration_index = 0; + REQUIRE(!readSeries.containsAttribute("some_global")); + readSeries.readIterations(); + REQUIRE( + readSeries.getAttribute("some_global").get() == + "attribute"); for (auto iteration : readSeries.readIterations()) { if (iteration.iterationIndex > 2) @@ -5810,42 +5844,46 @@ void iterate_nonstreaming_series( } } - Series readSeries( - file, - Access::READ_ONLY, - json::merge(jsonConfig, R"({"defer_iteration_parsing": true})")); - - size_t last_iteration_index = 0; - // conventionally written Series must be readable with streaming-aware API! - for (auto iteration : readSeries.readIterations()) + for (auto access : {Access::READ_LINEAR, Access::READ_ONLY}) { - // ReadIterations takes care of Iteration::open()ing iterations - auto E_x = iteration.meshes["E"]["x"]; - REQUIRE(E_x.getDimensionality() == 2); - REQUIRE(E_x.getExtent()[0] == 2); - REQUIRE(E_x.getExtent()[1] == extent); - auto chunk = E_x.loadChunk({0, 0}, {1, extent}); - auto chunk2 = E_x.loadChunk({1, 0}, {1, extent}); - // we encourage manually closing iterations, but it should not matter - // so let's do the switcharoo for this test - if (last_iteration_index % 2 == 0) - { - readSeries.flush(); - } - else - { - iteration.close(); - } + Series readSeries( + file, + access, + json::merge(jsonConfig, R"({"defer_iteration_parsing": true})")); - int value = variableBasedLayout ? 0 : iteration.iterationIndex; - for (size_t i = 0; i < extent; ++i) + size_t last_iteration_index = 0; + // conventionally written Series must be readable with streaming-aware + // API! + for (auto iteration : readSeries.readIterations()) { - REQUIRE(chunk.get()[i] == value); - REQUIRE(chunk2.get()[i] == int(i)); + // ReadIterations takes care of Iteration::open()ing iterations + auto E_x = iteration.meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 2); + REQUIRE(E_x.getExtent()[0] == 2); + REQUIRE(E_x.getExtent()[1] == extent); + auto chunk = E_x.loadChunk({0, 0}, {1, extent}); + auto chunk2 = E_x.loadChunk({1, 0}, {1, extent}); + // we encourage manually closing iterations, but it should not + // matter so let's do the switcharoo for this test + if (last_iteration_index % 2 == 0) + { + readSeries.flush(); + } + else + { + iteration.close(); + } + + int value = variableBasedLayout ? 0 : iteration.iterationIndex; + for (size_t i = 0; i < extent; ++i) + { + REQUIRE(chunk.get()[i] == value); + REQUIRE(chunk2.get()[i] == int(i)); + } + last_iteration_index = iteration.iterationIndex; } - last_iteration_index = iteration.iterationIndex; + REQUIRE(last_iteration_index == 9); } - REQUIRE(last_iteration_index == 9); } TEST_CASE("iterate_nonstreaming_series", "[serial][adios2]") @@ -5870,13 +5908,13 @@ TEST_CASE("iterate_nonstreaming_series", "[serial][adios2]") backend.extension, false, json::merge( - backend.jsonBaseConfig(), "adios2.engine = \"bp5\"")); + backend.jsonBaseConfig(), "adios2.engine.type = \"bp5\"")); iterate_nonstreaming_series( "../samples/iterate_nonstreaming_series_groupbased_bp5." + backend.extension, false, json::merge( - backend.jsonBaseConfig(), "adios2.engine = \"bp5\"")); + backend.jsonBaseConfig(), "adios2.engine.type = \"bp5\"")); } #endif } @@ -6306,7 +6344,12 @@ void chaotic_stream(std::string filename, bool variableBased) series.close(); REQUIRE(!series.operator bool()); - Series read(filename, Access::READ_ONLY); + /* + * Random-access read mode would go by the openPMD group names instead + * of the ADIOS2 steps. + * Hence, the order would be ascending. + */ + Series read(filename, Access::READ_LINEAR); size_t index = 0; for (const auto &iteration : read.readIterations()) { @@ -6335,11 +6378,13 @@ TEST_CASE("chaotic_stream", "[serial]") #ifdef openPMD_USE_INVASIVE_TESTS void unfinished_iteration_test( - std::string const &ext, bool filebased, std::string const &config = "{}") + std::string const &ext, + IterationEncoding encoding, + std::string const &config = "{}") { std::cout << "\n\nTESTING " << ext << "\n\n" << std::endl; std::string file = std::string("../samples/unfinished_iteration") + - (filebased ? "_%T." : ".") + ext; + (encoding == IterationEncoding::fileBased ? "_%T." : ".") + ext; { Series write(file, Access::CREATE, config); auto it0 = write.writeIterations()[0]; @@ -6356,11 +6401,11 @@ void unfinished_iteration_test( auto electron_mass = it10.particles["e"]["mass"][RecordComponent::SCALAR]; } - auto tryReading = [&config, file, filebased]( + auto tryReading = [&config, file, encoding]( + Access access, std::string const &additionalConfig = "{}") { { - Series read( - file, Access::READ_ONLY, json::merge(config, additionalConfig)); + Series read(file, access, json::merge(config, additionalConfig)); std::vector iterations; std::cout << "Going to list iterations in " << file << ":" @@ -6388,10 +6433,10 @@ void unfinished_iteration_test( std::vector{0, 10})); } - if (filebased) + if (encoding == IterationEncoding::fileBased && + access == Access::READ_ONLY) { - Series read( - file, Access::READ_ONLY, json::merge(config, additionalConfig)); + Series read(file, access, json::merge(config, additionalConfig)); if (additionalConfig == "{}") { // Eager parsing, defective iteration has already been removed @@ -6408,38 +6453,54 @@ void unfinished_iteration_test( } }; - tryReading(); - tryReading(R"({"defer_iteration_parsing": true})"); + tryReading(Access::READ_LINEAR); + tryReading(Access::READ_LINEAR, R"({"defer_iteration_parsing": true})"); + if (encoding != IterationEncoding::variableBased) + { + /* + * In variable-based iteration encoding, READ_ONLY mode will make + * iteration metadata leak into other iterations, causing iteration 0 + * to fail being parsed. + * (See also the warning that occurs when trying to access a variable- + * based Series in READ_ONLY mode) + */ + tryReading(Access::READ_ONLY); + tryReading(Access::READ_ONLY, R"({"defer_iteration_parsing": true})"); + } } TEST_CASE("unfinished_iteration_test", "[serial]") { #if openPMD_HAVE_ADIOS2 - unfinished_iteration_test("bp", false, R"({"backend": "adios2"})"); + unfinished_iteration_test( + "bp", IterationEncoding::groupBased, R"({"backend": "adios2"})"); unfinished_iteration_test( "bp", - false, + IterationEncoding::variableBased, R"( -{ - "backend": "adios2", - "iteration_encoding": "variable_based", - "adios2": { - "schema": 20210209 - } -} -)"); - unfinished_iteration_test("bp", true, R"({"backend": "adios2"})"); + { + "backend": "adios2", + "iteration_encoding": "variable_based", + "adios2": { + "schema": 20210209 + } + } + )"); + unfinished_iteration_test( + "bp", IterationEncoding::fileBased, R"({"backend": "adios2"})"); #endif #if openPMD_HAVE_ADIOS1 - unfinished_iteration_test("adios1.bp", false, R"({"backend": "adios1"})"); - unfinished_iteration_test("adios1.bp", true, R"({"backend": "adios1"})"); + unfinished_iteration_test( + "adios1.bp", IterationEncoding::groupBased, R"({"backend": "adios1"})"); + unfinished_iteration_test( + "adios1.bp", IterationEncoding::fileBased, R"({"backend": "adios1"})"); #endif #if openPMD_HAVE_HDF5 - unfinished_iteration_test("h5", false); - unfinished_iteration_test("h5", true); + unfinished_iteration_test("h5", IterationEncoding::groupBased); + unfinished_iteration_test("h5", IterationEncoding::fileBased); #endif - unfinished_iteration_test("json", false); - unfinished_iteration_test("json", true); + unfinished_iteration_test("json", IterationEncoding::groupBased); + unfinished_iteration_test("json", IterationEncoding::fileBased); } #endif @@ -6554,14 +6615,16 @@ enum class ParseMode */ AheadOfTimeWithoutSnapshot, /* - * A Series of the BP5 engine is not parsed ahead of time, but step-by-step, - * giving the openPMD-api a way to associate IO steps with iterations. - * No snapshot attribute exists, so the fallback mode is chosen: + * In Linear read mode, a Series is not parsed ahead of time, but + * step-by-step, giving the openPMD-api a way to associate IO steps with + * iterations. No snapshot attribute exists, so the fallback mode is chosen: * Iterations are returned in ascending order. * If an IO step returns an iteration whose index is lower than the * last one, it will be skipped. - * This mode of parsing will be generalized into the Linear read mode with - * PR #1291. + * This mode of parsing is not available for the BP4 engine with ADIOS2 + * schema 0, since BP4 does not associate attributes with the step in + * which they were created, making it impossible to separate parsing into + * single steps. */ LinearWithoutSnapshot, /* @@ -6581,7 +6644,7 @@ void append_mode( { auxiliary::remove_directory("../samples/append"); } - std::vector data(10, 0); + std::vector data(10, 999); auto writeSomeIterations = [&data]( WriteIterations &&writeIterations, std::vector indices) { @@ -6626,7 +6689,7 @@ void append_mode( } writeSomeIterations( - write.writeIterations(), std::vector{2, 3}); + write.writeIterations(), std::vector{3, 2}); write.flush(); } { @@ -6675,40 +6738,55 @@ void append_mode( write.writeIterations(), std::vector{7, 1, 11}); write.flush(); } + + auto verifyIteration = [](auto &&it) { + auto chunk = it.meshes["E"]["x"].template loadChunk({0}, {10}); + it.seriesFlush(); + for (size_t i = 0; i < 10; ++i) + { + REQUIRE(chunk.get()[i] == 999); + } + }; + { - Series read(filename, Access::READ_ONLY); switch (parseMode) { case ParseMode::NoSteps: { + Series read(filename, Access::READ_LINEAR); unsigned counter = 0; uint64_t iterationOrder[] = {0, 1, 2, 3, 4, 7, 10, 11}; - for (auto const &iteration : read.readIterations()) + for (auto iteration : read.readIterations()) { REQUIRE(iteration.iterationIndex == iterationOrder[counter]); + verifyIteration(iteration); ++counter; } REQUIRE(counter == 8); } break; case ParseMode::LinearWithoutSnapshot: { + Series read(filename, Access::READ_LINEAR); unsigned counter = 0; - uint64_t iterationOrder[] = {0, 1, 2, 3, 4, 10, 11}; - for (auto const &iteration : read.readIterations()) + uint64_t iterationOrder[] = {0, 1, 3, 4, 10, 11}; + for (auto iteration : read.readIterations()) { REQUIRE(iteration.iterationIndex == iterationOrder[counter]); + verifyIteration(iteration); ++counter; } - REQUIRE(counter == 7); + REQUIRE(counter == 6); } break; case ParseMode::WithSnapshot: { // in variable-based encodings, iterations are not parsed ahead of // time but as they go + Series read(filename, Access::READ_LINEAR); unsigned counter = 0; - uint64_t iterationOrder[] = {0, 1, 2, 3, 4, 10, 7, 11}; - for (auto const &iteration : read.readIterations()) + uint64_t iterationOrder[] = {0, 1, 3, 2, 4, 10, 7, 11}; + for (auto iteration : read.readIterations()) { REQUIRE(iteration.iterationIndex == iterationOrder[counter]); + verifyIteration(iteration); ++counter; } REQUIRE(counter == 8); @@ -6717,17 +6795,27 @@ void append_mode( } break; case ParseMode::AheadOfTimeWithoutSnapshot: { - REQUIRE(read.iterations.size() == 8); + Series read(filename, Access::READ_LINEAR); unsigned counter = 0; uint64_t iterationOrder[] = {0, 1, 2, 3, 4, 7, 10, 11}; /* - * Use conventional read API since streaming API is not possible - * without Linear read mode. - * (See also comments inside ParseMode enum). + * This one is a bit tricky: + * The BP4 engine has no way of parsing a Series in the old + * ADIOS2 schema step-by-step, since attributes are not + * associated with the step in which they were created. + * As a result, when readIterations() is called, the whole thing + * is parsed immediately ahead-of-time. + * We can then iterate through the iterations and access metadata, + * but since the IO steps don't correspond with the order of + * iterations returned (there is no way to figure out that order), + * we cannot load data in here. + * BP4 in the old ADIOS2 schema only supports either of the + * following: 1) A Series in which the iterations are present in + * ascending order. 2) Or accessing the Series in READ_ONLY mode. */ - for (auto const &iteration : read.iterations) + for (auto const &iteration : read.readIterations()) { - REQUIRE(iteration.first == iterationOrder[counter]); + REQUIRE(iteration.iterationIndex == iterationOrder[counter]); ++counter; } REQUIRE(counter == 8); @@ -6738,11 +6826,25 @@ void append_mode( * should see both instances when reading. * Final goal: Read only the last instance. */ - helper::listSeries(read); + REQUIRE_THROWS_AS(helper::listSeries(read), error::WrongAPIUsage); } break; } } + if (!variableBased) + { + Series read(filename, Access::READ_ONLY); + REQUIRE(read.iterations.size() == 8); + unsigned counter = 0; + uint64_t iterationOrder[] = {0, 1, 2, 3, 4, 7, 10, 11}; + for (auto iteration : read.readIterations()) + { + REQUIRE(iteration.iterationIndex == iterationOrder[counter]); + verifyIteration(iteration); + ++counter; + } + REQUIRE(counter == 8); + } // AppendAfterSteps has a bug before that version #if 100000000 * ADIOS2_VERSION_MAJOR + 1000000 * ADIOS2_VERSION_MINOR + \ 10000 * ADIOS2_VERSION_PATCH + 100 * ADIOS2_VERSION_TWEAK >= \ @@ -6777,49 +6879,57 @@ void append_mode( write.flush(); } { - Series read(filename, Access::READ_ONLY); + Series read(filename, Access::READ_LINEAR); switch (parseMode) { case ParseMode::LinearWithoutSnapshot: { - uint64_t iterationOrder[] = {0, 1, 2, 3, 4, 10}; + uint64_t iterationOrder[] = {0, 1, 3, 4, 10}; unsigned counter = 0; - for (auto const &iteration : read.readIterations()) + for (auto iteration : read.readIterations()) { REQUIRE( iteration.iterationIndex == iterationOrder[counter]); + verifyIteration(iteration); ++counter; } - REQUIRE(counter == 6); - // Cannot do listSeries here because the Series is already - // drained - REQUIRE_THROWS_AS( - helper::listSeries(read), error::WrongAPIUsage); + REQUIRE(counter == 5); } break; case ParseMode::WithSnapshot: { // in variable-based encodings, iterations are not parsed ahead // of time but as they go unsigned counter = 0; - uint64_t iterationOrder[] = {0, 1, 2, 3, 4, 10, 7, 5}; - for (auto const &iteration : read.readIterations()) + uint64_t iterationOrder[] = {0, 1, 3, 2, 4, 10, 7, 5}; + for (auto iteration : read.readIterations()) { REQUIRE( iteration.iterationIndex == iterationOrder[counter]); + verifyIteration(iteration); ++counter; } REQUIRE(counter == 8); - // Cannot do listSeries here because the Series is already - // drained - REQUIRE_THROWS_AS( - helper::listSeries(read), error::WrongAPIUsage); } break; - case ParseMode::NoSteps: - case ParseMode::AheadOfTimeWithoutSnapshot: + default: throw std::runtime_error("Test configured wrong."); break; } } + if (!variableBased) + { + Series read(filename, Access::READ_ONLY); + uint64_t iterationOrder[] = {0, 1, 2, 3, 4, 5, 7, 10}; + unsigned counter = 0; + for (auto const &iteration : read.readIterations()) + { + REQUIRE(iteration.iterationIndex == iterationOrder[counter]); + ++counter; + } + REQUIRE(counter == 8); + // Cannot do listSeries here because the Series is already + // drained + REQUIRE_THROWS_AS(helper::listSeries(read), error::WrongAPIUsage); + } } #endif } @@ -6850,46 +6960,24 @@ TEST_CASE("append_mode", "[serial]") } } })END"; - if (t == "bp5") - { - append_mode( - "../samples/append/groupbased." + t, - false, - ParseMode::LinearWithoutSnapshot, - jsonConfigOld); - append_mode( - "../samples/append/groupbased_newschema." + t, - false, - ParseMode::WithSnapshot, - jsonConfigNew); - append_mode( - "../samples/append/variablebased." + t, - true, - ParseMode::WithSnapshot, - jsonConfigOld); - append_mode( - "../samples/append/variablebased_newschema." + t, - true, - ParseMode::WithSnapshot, - jsonConfigNew); - } - else if (t == "bp" || t == "bp4") + if (t == "bp" || t == "bp4" || t == "bp5") { append_mode( "../samples/append/append_groupbased." + t, false, - ParseMode::AheadOfTimeWithoutSnapshot, + ParseMode::LinearWithoutSnapshot, jsonConfigOld); append_mode( "../samples/append/append_groupbased." + t, false, ParseMode::WithSnapshot, jsonConfigNew); - append_mode( - "../samples/append/append_variablebased." + t, - true, - ParseMode::WithSnapshot, - jsonConfigOld); + // This test config does not make sense + // append_mode( + // "../samples/append/append_variablebased." + t, + // true, + // ParseMode::WithSnapshot, + // jsonConfigOld); append_mode( "../samples/append/append_variablebased." + t, true, From 00286470849c62646e1c3e808673c84b0ccb65e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Tue, 21 Feb 2023 19:26:48 +0100 Subject: [PATCH 39/82] Deactivate warning (#1368) "Series constructor called with explicit iteration suggests loading a single file with groupBased iteration encoding. Loaded file is fileBased." --> Opening a single file of a file-based Series is a valid workflow, warnings are not necessary here. --- src/Series.cpp | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/src/Series.cpp b/src/Series.cpp index 6fe3f53d02..21e8bef234 100644 --- a/src/Series.cpp +++ b/src/Series.cpp @@ -1206,10 +1206,17 @@ void Series::readOneIterationFileBased(std::string const &filePath) else if (encoding == "groupBased") { series.m_iterationEncoding = IterationEncoding::groupBased; - std::cerr << "Series constructor called with iteration " - "regex '%T' suggests loading a " - << "time series with fileBased iteration " - "encoding. Loaded file is groupBased.\n"; + /* + * Opening a single file of a file-based Series is a valid workflow, + * warnings are not necessary here. + * Leaving the old warning as a comment, because we might want to + * add this back in again if we add some kind of verbosity level + * specification or logging. + */ + // std::cerr << "Series constructor called with iteration " + // "regex '%T' suggests loading a " + // << "time series with fileBased iteration " + // "encoding. Loaded file is groupBased.\n"; } else if (encoding == "variableBased") { @@ -1350,10 +1357,18 @@ creating new iterations. else if (encoding == "fileBased") { series.m_iterationEncoding = IterationEncoding::fileBased; - std::cerr << "Series constructor called with explicit " - "iteration suggests loading a " - << "single file with groupBased iteration encoding. " - "Loaded file is fileBased.\n"; + /* + * Opening a single file of a file-based Series is a valid + * workflow, warnings are not necessary here. + * Leaving the old warning as a comment, because we might want + * to add this back in again if we add some kind of verbosity + * level specification or logging. + */ + // std::cerr << "Series constructor called with explicit " + // "iteration suggests loading a " + // << "single file with groupBased iteration encoding. + // " + // "Loaded file is fileBased.\n"; /* * We'll want the openPMD API to continue series.m_name to open * the file instead of piecing the name together via From f883f8e2e7ddc89a1435abe4db672c3c87e221e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Wed, 22 Feb 2023 20:25:04 +0100 Subject: [PATCH 40/82] Update examples (#1371) * Update examples Mainly Series::close, Iteration::close, Series::writeIterations, Series::readIterations, READ_LINEAR * Fix: Python bindings use copy return value policies * Fix: Use explicit ::close() call in WriteIterations --- examples/10_streaming_read.cpp | 11 ++++ examples/10_streaming_read.py | 7 +++ examples/10_streaming_write.cpp | 8 +++ examples/10_streaming_write.py | 7 +++ examples/11_particle_dataframe.py | 2 + examples/12_span_write.cpp | 8 +++ examples/12_span_write.py | 7 +++ examples/13_write_dynamic_configuration.cpp | 8 +++ examples/13_write_dynamic_configuration.py | 7 +++ examples/1_structure.cpp | 11 +++- examples/2_read_serial.cpp | 7 ++- examples/2_read_serial.py | 6 +- examples/2a_read_thetaMode_serial.cpp | 6 ++ examples/2a_read_thetaMode_serial.py | 7 +++ examples/3_write_serial.cpp | 9 ++- examples/3_write_serial.py | 7 ++- examples/3a_write_thetaMode_serial.cpp | 8 ++- examples/3a_write_thetaMode_serial.py | 7 ++- examples/3b_write_resizable_particles.cpp | 14 ++++- examples/3b_write_resizable_particles.py | 5 +- examples/4_read_parallel.cpp | 12 +++- examples/4_read_parallel.py | 6 +- examples/5_write_parallel.cpp | 16 ++++- examples/5_write_parallel.py | 10 ++- examples/6_dump_filebased_series.cpp | 70 ++++++++++++++------- examples/7_extended_write_serial.cpp | 6 ++ examples/7_extended_write_serial.py | 5 ++ examples/9_particle_write_serial.py | 5 ++ include/openPMD/WriteIterations.hpp | 15 ++++- src/Series.cpp | 9 ++- src/WriteIterations.cpp | 25 ++++++-- src/binding/python/Attributable.cpp | 2 +- src/binding/python/Container.cpp | 6 +- src/binding/python/Iteration.cpp | 4 +- src/binding/python/Series.cpp | 9 ++- 35 files changed, 289 insertions(+), 63 deletions(-) diff --git a/examples/10_streaming_read.cpp b/examples/10_streaming_read.cpp index a7e503a055..eae79dd28a 100644 --- a/examples/10_streaming_read.cpp +++ b/examples/10_streaming_read.cpp @@ -39,6 +39,9 @@ int main() extents[i] = rc.getExtent(); } + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. iteration.close(); for (size_t i = 0; i < 3; ++i) @@ -55,6 +58,14 @@ int main() } } + /* The files in 'series' are still open until the object is destroyed, on + * which it cleanly flushes and closes all open file handles. + * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. + */ + series.close(); + return 0; #else std::cout << "The streaming example requires that openPMD has been built " diff --git a/examples/10_streaming_read.py b/examples/10_streaming_read.py index 4cd29c46b2..5d0f688b94 100755 --- a/examples/10_streaming_read.py +++ b/examples/10_streaming_read.py @@ -53,3 +53,10 @@ print("dim: {}".format(dim)) chunk = loadedChunks[i] print(chunk) + + # The files in 'series' are still open until the object is destroyed, on + # which it cleanly flushes and closes all open file handles. + # When running out of scope on return, the 'Series' destructor is called. + # Alternatively, one can call `series.close()` to the same effect as + # calling the destructor, including the release of file handles. + series.close() diff --git a/examples/10_streaming_write.cpp b/examples/10_streaming_write.cpp index 1c12e034f1..57bbcb6287 100644 --- a/examples/10_streaming_write.cpp +++ b/examples/10_streaming_write.cpp @@ -45,6 +45,14 @@ int main() iteration.close(); } + /* The files in 'series' are still open until the object is destroyed, on + * which it cleanly flushes and closes all open file handles. + * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. + */ + series.close(); + return 0; #else std::cout << "The streaming example requires that openPMD has been built " diff --git a/examples/10_streaming_write.py b/examples/10_streaming_write.py index 514b815202..956b683b05 100755 --- a/examples/10_streaming_write.py +++ b/examples/10_streaming_write.py @@ -80,3 +80,10 @@ # If not closing an iteration explicitly, it will be implicitly closed # upon creating the next iteration. iteration.close() + + # The files in 'series' are still open until the object is destroyed, on + # which it cleanly flushes and closes all open file handles. + # When running out of scope on return, the 'Series' destructor is called. + # Alternatively, one can call `series.close()` to the same effect as + # calling the destructor, including the release of file handles. + series.close() diff --git a/examples/11_particle_dataframe.py b/examples/11_particle_dataframe.py index 9b5e626705..7e0cad065c 100755 --- a/examples/11_particle_dataframe.py +++ b/examples/11_particle_dataframe.py @@ -96,3 +96,5 @@ idx_max * E.grid_spacing + E.grid_global_offset) print("maximum intensity I={} at index={} z={}mu".format( Intensity_max, idx_max, pos_max[2])) + + s.close() diff --git a/examples/12_span_write.cpp b/examples/12_span_write.cpp index 6afcb18fe4..f60746bff9 100644 --- a/examples/12_span_write.cpp +++ b/examples/12_span_write.cpp @@ -84,6 +84,14 @@ void span_write(std::string const &filename) } iteration.close(); } + + /* The files in 'series' are still open until the object is destroyed, on + * which it cleanly flushes and closes all open file handles. + * When running out of scope on return, the 'Series' destructor is called. + # Alternatively, one can call `series.close()` to the same effect as + # calling the destructor, including the release of file handles. + */ + series.close(); } int main() diff --git a/examples/12_span_write.py b/examples/12_span_write.py index c776bd04a7..bfe0f69784 100644 --- a/examples/12_span_write.py +++ b/examples/12_span_write.py @@ -27,6 +27,13 @@ def span_write(filename): j += 1 iteration.close() + # The files in 'series' are still open until the object is destroyed, on + # which it cleanly flushes and closes all open file handles. + # When running out of scope on return, the 'Series' destructor is called. + # Alternatively, one can call `series.close()` to the same effect as + # calling the destructor, including the release of file handles. + series.close() + if __name__ == "__main__": for ext in io.file_extensions: diff --git a/examples/13_write_dynamic_configuration.cpp b/examples/13_write_dynamic_configuration.cpp index 06ef1e8e77..a398eccf27 100644 --- a/examples/13_write_dynamic_configuration.cpp +++ b/examples/13_write_dynamic_configuration.cpp @@ -128,5 +128,13 @@ chunks = "auto" iteration.close(); } + /* The files in 'series' are still open until the object is destroyed, on + * which it cleanly flushes and closes all open file handles. + * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. + */ + series.close(); + return 0; } diff --git a/examples/13_write_dynamic_configuration.py b/examples/13_write_dynamic_configuration.py index ce96456f03..8670961592 100644 --- a/examples/13_write_dynamic_configuration.py +++ b/examples/13_write_dynamic_configuration.py @@ -146,6 +146,13 @@ def main(): # upon creating the next iteration. iteration.close() + # The files in 'series' are still open until the object is destroyed, on + # which it cleanly flushes and closes all open file handles. + # When running out of scope on return, the 'Series' destructor is called. + # Alternatively, one can call `series.close()` to the same effect as + # calling the destructor, including the release of file handles. + series.close() + if __name__ == "__main__": main() diff --git a/examples/1_structure.cpp b/examples/1_structure.cpp index dc5056a6c4..fe4381884f 100644 --- a/examples/1_structure.cpp +++ b/examples/1_structure.cpp @@ -39,7 +39,8 @@ int main() * to the openPMD standard. Creation of new elements happens on access * inside the tree-like structure. Required attributes are initialized to * reasonable defaults for every object. */ - ParticleSpecies electrons = series.iterations[1].particles["electrons"]; + ParticleSpecies electrons = + series.writeIterations()[1].particles["electrons"]; /* Data to be moved from memory to persistent storage is structured into * Records, each holding an unbounded number of RecordComponents. If a @@ -59,9 +60,17 @@ int main() electrons["positionOffset"]["x"].resetDataset(dataset); electrons["positionOffset"]["x"].makeConstant(22.0); + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + series.writeIterations()[1].close(); + /* The files in 'series' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. */ + series.close(); return 0; } diff --git a/examples/2_read_serial.cpp b/examples/2_read_serial.cpp index e944ef12bf..8fb3ccb190 100644 --- a/examples/2_read_serial.cpp +++ b/examples/2_read_serial.cpp @@ -91,7 +91,11 @@ int main() } auto all_data = E_x.loadChunk(); - series.flush(); + + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + i.close(); cout << "Full E/x starts with:\n\t{"; for (size_t col = 0; col < extent[1] && col < 5; ++col) cout << all_data.get()[col] << ", "; @@ -103,5 +107,6 @@ int main() * Alternatively, one can call `series.close()` to the same effect as * calling the destructor, including the release of file handles. */ + series.close(); return 0; } diff --git a/examples/2_read_serial.py b/examples/2_read_serial.py index d24841775a..87b5568306 100644 --- a/examples/2_read_serial.py +++ b/examples/2_read_serial.py @@ -61,7 +61,11 @@ # print("") all_data = E_x.load_chunk() - series.flush() + + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + i.close() print("Full E/x is of shape {0} and starts with:".format(all_data.shape)) print(all_data[0, 0, :5]) diff --git a/examples/2a_read_thetaMode_serial.cpp b/examples/2a_read_thetaMode_serial.cpp index 8085e242b2..a796e66447 100644 --- a/examples/2a_read_thetaMode_serial.cpp +++ b/examples/2a_read_thetaMode_serial.cpp @@ -69,11 +69,17 @@ int main() // toCartesianSliceYZ(E_z_modes).loadChunk(); # (y, z) // series.flush(); + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + i.close(); + /* The files in 'series' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. * When running out of scope on return, the 'Series' destructor is called. * Alternatively, one can call `series.close()` to the same effect as * calling the destructor, including the release of file handles. */ + series.close(); return 0; } diff --git a/examples/2a_read_thetaMode_serial.py b/examples/2a_read_thetaMode_serial.py index 907f6634aa..07021c1f36 100644 --- a/examples/2a_read_thetaMode_serial.py +++ b/examples/2a_read_thetaMode_serial.py @@ -51,6 +51,13 @@ # E_z_yz = toCartesianSliceYZ(E_z_modes)[:, :] # (y, z) # series.flush() + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + # Alternatively, one can call `series.close()` to the same effect as + # calling the destructor, including the release of file handles. + i.close() + # The files in 'series' are still open until the series is closed, at which # time it cleanly flushes and closes all open file handles. # One can close the object explicitly to trigger this. diff --git a/examples/3_write_serial.cpp b/examples/3_write_serial.cpp index 155425eaaa..a66db6c080 100644 --- a/examples/3_write_serial.cpp +++ b/examples/3_write_serial.cpp @@ -45,7 +45,7 @@ int main(int argc, char *argv[]) cout << "Created an empty " << series.iterationEncoding() << " Series\n"; MeshRecordComponent rho = - series.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR]; + series.writeIterations()[1].meshes["rho"][MeshRecordComponent::SCALAR]; cout << "Created a scalar mesh Record with all required openPMD " "attributes\n"; @@ -67,7 +67,11 @@ int main(int argc, char *argv[]) cout << "Stored the whole Dataset contents as a single chunk, " "ready to write content\n"; - series.flush(); + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + series.writeIterations()[1].close(); + cout << "Dataset content has been fully written\n"; /* The files in 'series' are still open until the object is destroyed, on @@ -76,5 +80,6 @@ int main(int argc, char *argv[]) * Alternatively, one can call `series.close()` to the same effect as * calling the destructor, including the release of file handles. */ + series.close(); return 0; } diff --git a/examples/3_write_serial.py b/examples/3_write_serial.py index 320acd027e..8e136f9512 100644 --- a/examples/3_write_serial.py +++ b/examples/3_write_serial.py @@ -28,7 +28,7 @@ print("Created an empty {0} Series".format(series.iteration_encoding)) print(len(series.iterations)) - rho = series.iterations[1]. \ + rho = series.write_iterations()[1]. \ meshes["rho"][io.Mesh_Record_Component.SCALAR] dataset = io.Dataset(data.dtype, data.shape) @@ -47,7 +47,10 @@ print("Stored the whole Dataset contents as a single chunk, " + "ready to write content") - series.flush() + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + series.write_iterations()[1].close() print("Dataset content has been fully written") # The files in 'series' are still open until the series is closed, at which diff --git a/examples/3a_write_thetaMode_serial.cpp b/examples/3a_write_thetaMode_serial.cpp index 56fd703799..9367e43f70 100644 --- a/examples/3a_write_thetaMode_serial.cpp +++ b/examples/3a_write_thetaMode_serial.cpp @@ -51,7 +51,7 @@ int main() geos << "m=" << num_modes << ";imag=+"; std::string const geometryParameters = geos.str(); - Mesh E = series.iterations[0].meshes["E"]; + Mesh E = series.writeIterations()[0].meshes["E"]; E.setGeometry(Mesh::Geometry::thetaMode); E.setGeometryParameters(geometryParameters); E.setDataOrder(Mesh::DataOrder::C); @@ -84,7 +84,10 @@ int main() E_t.resetDataset(Dataset(Datatype::FLOAT, {num_fields, N_r, N_z})); E_t.storeChunk(E_t_data, Offset{0, 0, 0}, Extent{num_fields, N_r, N_z}); - series.flush(); + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + series.writeIterations()[0].close(); /* The files in 'series' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. @@ -92,5 +95,6 @@ int main() * Alternatively, one can call `series.close()` to the same effect as * calling the destructor, including the release of file handles. */ + series.close(); return 0; } diff --git a/examples/3a_write_thetaMode_serial.py b/examples/3a_write_thetaMode_serial.py index e5c4419505..ec81435558 100644 --- a/examples/3a_write_thetaMode_serial.py +++ b/examples/3a_write_thetaMode_serial.py @@ -30,7 +30,7 @@ geometry_parameters = "m={0};imag=+".format(num_modes) - E = series.iterations[0].meshes["E"] + E = series.write_iterations()[0].meshes["E"] E.geometry = io.Geometry.thetaMode E.geometry_parameters = geometry_parameters E.grid_spacing = [1.0, 1.0] @@ -62,7 +62,10 @@ E_t.reset_dataset(io.Dataset(E_t_data.dtype, E_t_data.shape)) E_t.store_chunk(E_t_data) - series.flush() + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + series.write_iterations()[0].close() # The files in 'series' are still open until the series is closed, at which # time it cleanly flushes and closes all open file handles. diff --git a/examples/3b_write_resizable_particles.cpp b/examples/3b_write_resizable_particles.cpp index 7cd424ee2a..d4be87a0fc 100644 --- a/examples/3b_write_resizable_particles.cpp +++ b/examples/3b_write_resizable_particles.cpp @@ -32,7 +32,8 @@ int main() Series series = Series("../samples/3b_write_resizable_particles.h5", Access::CREATE); - ParticleSpecies electrons = series.iterations[0].particles["electrons"]; + ParticleSpecies electrons = + series.writeIterations()[0].particles["electrons"]; // our initial data to write std::vector x{0., 1., 2., 3., 4.}; @@ -78,8 +79,14 @@ int main() rc_xo.resetDataset(dataset); rc_yo.resetDataset(dataset); - // after this call, the provided data buffers can be used again or deleted - series.flush(); + // Attributable::seriesFlush() can be used alternatively if the Series + // handle is not currently in scope + rc_yo.seriesFlush(); + + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + series.writeIterations()[0].close(); // rinse and repeat as needed :) @@ -89,5 +96,6 @@ int main() * Alternatively, one can call `series.close()` to the same effect as * calling the destructor, including the release of file handles. */ + series.close(); return 0; } diff --git a/examples/3b_write_resizable_particles.py b/examples/3b_write_resizable_particles.py index 227ce06977..440fac7de6 100644 --- a/examples/3b_write_resizable_particles.py +++ b/examples/3b_write_resizable_particles.py @@ -60,8 +60,11 @@ rc_xo.reset_dataset(dataset) rc_yo.reset_dataset(dataset) + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. # after this call, the provided data buffers can be used again or deleted - series.flush() + series.write_iterations()[0].close() # rinse and repeat as needed :) diff --git a/examples/4_read_parallel.cpp b/examples/4_read_parallel.cpp index 75f19f4be1..477177cec6 100644 --- a/examples/4_read_parallel.cpp +++ b/examples/4_read_parallel.cpp @@ -55,7 +55,11 @@ int main(int argc, char *argv[]) cout << "Queued the loading of a single chunk per MPI rank from " "disk, " "ready to execute\n"; - series.flush(); + + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + series.iterations[100].close(); if (0 == mpi_rank) cout << "Chunks have been read from disk\n"; @@ -78,6 +82,12 @@ int main(int argc, char *argv[]) // this barrier is not necessary but structures the example output MPI_Barrier(MPI_COMM_WORLD); } + // The files in 'series' are still open until the series is closed, at which + // time it cleanly flushes and closes all open file handles. + // One can close the object explicitly to trigger this. + // Alternatively, this will automatically happen once the garbage collector + // claims (every copy of) the series object. + // In any case, this must happen before MPI_Finalize() is called series.close(); // openPMD::Series MUST be destructed or closed at this point diff --git a/examples/4_read_parallel.py b/examples/4_read_parallel.py index f30d6ffa2d..b36625798f 100644 --- a/examples/4_read_parallel.py +++ b/examples/4_read_parallel.py @@ -36,7 +36,11 @@ if 0 == comm.rank: print("Queued the loading of a single chunk per MPI rank from disk, " "ready to execute") - series.flush() + + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + series.iterations[100].close() if 0 == comm.rank: print("Chunks have been read from disk") diff --git a/examples/5_write_parallel.cpp b/examples/5_write_parallel.cpp index 666de4a3cd..bfe737d9be 100644 --- a/examples/5_write_parallel.cpp +++ b/examples/5_write_parallel.cpp @@ -54,6 +54,10 @@ int main(int argc, char *argv[]) cout << "Created an empty series in parallel with " << mpi_size << " MPI ranks\n"; + // In parallel contexts, it's important to explicitly open iterations. + // This is done automatically when using `Series::writeIterations()`, + // or in read mode `Series::readIterations()`. + series.iterations[1].open(); MeshRecordComponent mymesh = series.iterations[1].meshes["mymesh"][MeshRecordComponent::SCALAR]; @@ -80,10 +84,20 @@ int main(int argc, char *argv[]) "contribution, " "ready to write content to disk\n"; - series.flush(); + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + series.iterations[100].close(); + if (0 == mpi_rank) cout << "Dataset content has been fully written to disk\n"; + /* The files in 'series' are still open until the object is destroyed, on + * which it cleanly flushes and closes all open file handles. + * When running out of scope on return, the 'Series' destructor is called. + * Alternatively, one can call `series.close()` to the same effect as + * calling the destructor, including the release of file handles. + */ series.close(); // openPMD::Series MUST be destructed or closed at this point diff --git a/examples/5_write_parallel.py b/examples/5_write_parallel.py index d925251834..c956b6eed1 100644 --- a/examples/5_write_parallel.py +++ b/examples/5_write_parallel.py @@ -37,6 +37,10 @@ print("Created an empty series in parallel with {} MPI ranks".format( comm.size)) + # In parallel contexts, it's important to explicitly open iterations. + # This is done automatically when using `Series.write_iterations()`, + # or in read mode `Series.read_iterations()`. + series.iterations[1].open() mymesh = series.iterations[1]. \ meshes["mymesh"][io.Mesh_Record_Component.SCALAR] @@ -59,7 +63,11 @@ print("Registered a single chunk per MPI rank containing its " "contribution, ready to write content to disk") - series.flush() + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + series.iterations[1].close() + if 0 == comm.rank: print("Dataset content has been fully written to disk") diff --git a/examples/6_dump_filebased_series.cpp b/examples/6_dump_filebased_series.cpp index 1b2964a5d4..7e67233f15 100644 --- a/examples/6_dump_filebased_series.cpp +++ b/examples/6_dump_filebased_series.cpp @@ -32,43 +32,56 @@ int main() std::cout << '\n'; std::cout << "Read iterations in basePath:\n"; + /* + * A classical loop over the C++-style container + * Direct access to o.iterations allows random-access into all data. + */ for (auto const &i : o.iterations) std::cout << '\t' << i.first << '\n'; std::cout << '\n'; - for (auto const &i : o.iterations) + /* + * A loop that uses o.readIterations(). + * This loop is MPI collective and will open and close iterations + * automatically (closing manually is still recommended before long compute + * operations in order to release data as soon as possible). + * An iteration once closed can not (yet) be re-opened. + */ + for (auto i : o.readIterations()) { - std::cout << "Read attributes in iteration " << i.first << ":\n"; - for (auto const &val : i.second.attributes()) + std::cout << "Read attributes in iteration " << i.iterationIndex + << ":\n"; + for (auto const &val : i.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; - std::cout << i.first << ".time - " << i.second.time() << '\n' - << i.first << ".dt - " << i.second.dt() << '\n' - << i.first << ".timeUnitSI - " << i.second.timeUnitSI() + std::cout << i.iterationIndex << ".time - " << i.time() << '\n' + << i.iterationIndex << ".dt - " << i.dt() << '\n' + << i.iterationIndex << ".timeUnitSI - " << i.timeUnitSI() << '\n' << '\n'; - std::cout << "Read attributes in meshesPath in iteration " << i.first - << ":\n"; - for (auto const &a : i.second.meshes.attributes()) + std::cout << "Read attributes in meshesPath in iteration " + << i.iterationIndex << ":\n"; + for (auto const &a : i.meshes.attributes()) std::cout << '\t' << a << '\n'; std::cout << '\n'; - std::cout << "Read meshes in iteration " << i.first << ":\n"; - for (auto const &m : i.second.meshes) + std::cout << "Read meshes in iteration " << i.iterationIndex << ":\n"; + for (auto const &m : i.meshes) std::cout << '\t' << m.first << '\n'; std::cout << '\n'; - for (auto const &m : i.second.meshes) + for (auto const &m : i.meshes) { std::cout << "Read attributes for mesh " << m.first - << " in iteration " << i.first << ":\n"; + << " in iteration " << i.iterationIndex << ":\n"; for (auto const &val : m.second.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; - std::string meshPrefix = std::to_string(i.first) + '.' + m.first; + std::string meshPrefix = + std::to_string(i.iterationIndex) + '.' + m.first; std::string axisLabels = ""; for (auto const &val : m.second.axisLabels()) axisLabels += val + ", "; @@ -110,8 +123,8 @@ int main() std::cout << '\t' << val << '\n'; std::cout << '\n'; - std::string componentPrefix = - std::to_string(i.first) + '.' + m.first + '.' + rc.first; + std::string componentPrefix = std::to_string(i.iterationIndex) + + '.' + m.first + '.' + rc.first; std::string position = ""; for (auto const &val : rc.second.position()) position += std::to_string(val) + ", "; @@ -123,27 +136,29 @@ int main() } } - std::cout << "Read attributes in particlesPath in iteration " << i.first - << ":\n"; - for (auto const &a : i.second.particles.attributes()) + std::cout << "Read attributes in particlesPath in iteration " + << i.iterationIndex << ":\n"; + for (auto const &a : i.particles.attributes()) std::cout << '\t' << a << '\n'; std::cout << '\n'; - std::cout << "Read particleSpecies in iteration " << i.first << ":\n"; - for (auto const &val : i.second.particles) + std::cout << "Read particleSpecies in iteration " << i.iterationIndex + << ":\n"; + for (auto const &val : i.particles) std::cout << '\t' << val.first << '\n'; std::cout << '\n'; - for (auto const &p : i.second.particles) + for (auto const &p : i.particles) { std::cout << "Read attributes for particle species " << p.first - << " in iteration " << i.first << ":\n"; + << " in iteration " << i.iterationIndex << ":\n"; for (auto const &val : p.second.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; std::cout << "Read particle records for particle species " - << p.first << " in iteration " << i.first << ":\n"; + << p.first << " in iteration " << i.iterationIndex + << ":\n"; for (auto const &r : p.second) std::cout << '\t' << r.first << '\n'; std::cout << '\n'; @@ -167,6 +182,13 @@ int main() } } } + + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + // Since we're using `Series::readIterations()`, this would also happen + // automatically upon the next iteration. + i.close(); } /* The files in 'o' are still open until the object is destroyed, on diff --git a/examples/7_extended_write_serial.cpp b/examples/7_extended_write_serial.cpp index da866eac65..bfb64e1fff 100644 --- a/examples/7_extended_write_serial.cpp +++ b/examples/7_extended_write_serial.cpp @@ -226,6 +226,11 @@ int main() // constant records mesh["y"].makeConstant(constant_value); + // The iteration can be closed in order to help free up resources. + // The iteration's content will be flushed automatically. + // An iteration once closed cannot (yet) be reopened. + cur_it.close(); + /* The files in 'f' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. * When running out of scope on return, the 'Series' destructor is @@ -233,6 +238,7 @@ int main() * effect as calling the destructor, including the release of file * handles. */ + f.close(); } // namespace ; return 0; diff --git a/examples/7_extended_write_serial.py b/examples/7_extended_write_serial.py index 884311f92d..84ca5002db 100755 --- a/examples/7_extended_write_serial.py +++ b/examples/7_extended_write_serial.py @@ -206,6 +206,11 @@ # constant records mesh["y"].make_constant(constant_value) + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + cur_it.close() + # The files in 'f' are still open until the series is closed, at which # time it cleanly flushes and closes all open file handles. # One can close the object explicitly to trigger this. diff --git a/examples/9_particle_write_serial.py b/examples/9_particle_write_serial.py index 5dc842918e..aebd266528 100644 --- a/examples/9_particle_write_serial.py +++ b/examples/9_particle_write_serial.py @@ -68,5 +68,10 @@ # files) f.flush() + # The iteration can be closed in order to help free up resources. + # The iteration's content will be flushed automatically. + # An iteration once closed cannot (yet) be reopened. + cur_it.close() + # now the file is closed f.close() diff --git a/include/openPMD/WriteIterations.hpp b/include/openPMD/WriteIterations.hpp index 134abe0519..3099af7025 100644 --- a/include/openPMD/WriteIterations.hpp +++ b/include/openPMD/WriteIterations.hpp @@ -44,9 +44,16 @@ class Series; * not possible once it has been closed. * */ + +namespace internal +{ + class SeriesData; +} + class WriteIterations { friend class Series; + friend class internal::SeriesData; private: using IterationsContainer_t = @@ -62,6 +69,7 @@ class WriteIterations struct SharedResources { IterationsContainer_t iterations; + //! Index of the last opened iteration std::optional currentlyOpen; SharedResources(IterationsContainer_t); @@ -70,8 +78,11 @@ class WriteIterations WriteIterations(IterationsContainer_t); explicit WriteIterations() = default; - //! Index of the last opened iteration - std::shared_ptr shared; + // std::optional so that a single instance is able to close this without + // needing to wait for all instances to deallocate + std::shared_ptr> shared; + + void close(); public: mapped_type &operator[](key_type const &key); diff --git a/src/Series.cpp b/src/Series.cpp index 21e8bef234..779ba97906 100644 --- a/src/Series.cpp +++ b/src/Series.cpp @@ -2250,7 +2250,10 @@ namespace internal void SeriesData::close() { // WriteIterations gets the first shot at flushing - this->m_writeIterations = std::optional(); + if (this->m_writeIterations.has_value()) + { + this->m_writeIterations.value().close(); + } /* * Scenario: A user calls `Series::flush()` but does not check for * thrown exceptions. The exception will propagate further up, @@ -2266,10 +2269,6 @@ namespace internal impl.flush(); impl.flushStep(/* doFlush = */ true); } - if (m_writeIterations.has_value()) - { - m_writeIterations = std::optional(); - } // Not strictly necessary, but clear the map of iterations // This releases the openPMD hierarchy iterations.container().clear(); diff --git a/src/WriteIterations.cpp b/src/WriteIterations.cpp index 872342dfbe..2bc34f0416 100644 --- a/src/WriteIterations.cpp +++ b/src/WriteIterations.cpp @@ -20,6 +20,7 @@ */ #include "openPMD/WriteIterations.hpp" +#include "openPMD/Error.hpp" #include "openPMD/Series.hpp" @@ -45,9 +46,15 @@ WriteIterations::SharedResources::~SharedResources() } WriteIterations::WriteIterations(IterationsContainer_t iterations) - : shared{std::make_shared(std::move(iterations))} + : shared{std::make_shared>( + std::move(iterations))} {} +void WriteIterations::close() +{ + *shared = std::nullopt; +} + WriteIterations::mapped_type &WriteIterations::operator[](key_type const &key) { // make a copy @@ -56,17 +63,23 @@ WriteIterations::mapped_type &WriteIterations::operator[](key_type const &key) } WriteIterations::mapped_type &WriteIterations::operator[](key_type &&key) { - if (shared->currentlyOpen.has_value()) + if (!shared || !shared->has_value()) + { + throw error::WrongAPIUsage( + "[WriteIterations] Trying to access after closing Series."); + } + auto &s = shared->value(); + if (s.currentlyOpen.has_value()) { - auto lastIterationIndex = shared->currentlyOpen.value(); - auto &lastIteration = shared->iterations.at(lastIterationIndex); + auto lastIterationIndex = s.currentlyOpen.value(); + auto &lastIteration = s.iterations.at(lastIterationIndex); if (lastIterationIndex != key && !lastIteration.closed()) { lastIteration.close(); } } - shared->currentlyOpen = key; - auto &res = shared->iterations[std::move(key)]; + s.currentlyOpen = key; + auto &res = s.iterations[std::move(key)]; if (res.getStepStatus() == StepStatus::NoStep) { res.beginStep(/* reread = */ false); diff --git a/src/binding/python/Attributable.cpp b/src/binding/python/Attributable.cpp index b60bd8cb48..61f1376b94 100644 --- a/src/binding/python/Attributable.cpp +++ b/src/binding/python/Attributable.cpp @@ -380,7 +380,7 @@ void init_Attributable(py::module &m) "attributes", [](Attributable &attr) { return attr.attributes(); }, // ref + keepalive - py::return_value_policy::reference_internal) + py::return_value_policy::move) // C++ pass-through API: Setter // note that the order of overloads is important! diff --git a/src/binding/python/Container.cpp b/src/binding/python/Container.cpp index 137260d9f1..28bda651ff 100644 --- a/src/binding/python/Container.cpp +++ b/src/binding/python/Container.cpp @@ -110,8 +110,10 @@ bind_container(py::handle scope, std::string const &name, Args &&...args) cl.def( "__getitem__", [](Map &m, KeyType const &k) -> MappedType & { return m[k]; }, - // ref + keepalive - py::return_value_policy::reference_internal); + // copy + keepalive + // All objects in the openPMD object model are handles, so using a copy + // is safer and still performant. + py::return_value_policy::copy); // Assignment provided only if the type is copyable py::detail::map_assignment(cl); diff --git a/src/binding/python/Iteration.cpp b/src/binding/python/Iteration.cpp index 98f0f7c87f..0ac290f7ff 100644 --- a/src/binding/python/Iteration.cpp +++ b/src/binding/python/Iteration.cpp @@ -74,13 +74,13 @@ void init_Iteration(py::module &m) .def_readwrite( "meshes", &Iteration::meshes, - py::return_value_policy::reference, + py::return_value_policy::copy, // garbage collection: return value must be freed before Iteration py::keep_alive<1, 0>()) .def_readwrite( "particles", &Iteration::particles, - py::return_value_policy::reference, + py::return_value_policy::copy, // garbage collection: return value must be freed before Iteration py::keep_alive<1, 0>()); } diff --git a/src/binding/python/Series.cpp b/src/binding/python/Series.cpp index 3dbaaa034c..cdff83fd43 100644 --- a/src/binding/python/Series.cpp +++ b/src/binding/python/Series.cpp @@ -61,8 +61,8 @@ void init_Series(py::module &m) [](WriteIterations writeIterations, Series::IterationIndex_t key) { return writeIterations[key]; }, - // keep container alive while iterator exists - py::keep_alive<0, 1>()); + // copy + keepalive + py::return_value_policy::copy); py::class_(m, "IndexedIteration") .def_readonly("iteration_index", &IndexedIteration::iterationIndex); py::class_(m, "ReadIterations") @@ -224,6 +224,11 @@ this method. .def_readwrite( "iterations", &Series::iterations, + /* + * Need to keep reference return policy here for now to further + * support legacy `del series` workflows that works despite children + * still being alive. + */ py::return_value_policy::reference, // garbage collection: return value must be freed before Series py::keep_alive<1, 0>()) From 9a215b20b48abc9275ef53306cfd5e357bf86ed5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Wed, 22 Feb 2023 23:59:49 +0100 Subject: [PATCH 41/82] Add unique_ptr overloads for storeChunk calls (#1294) * Add OpenpmdUniquePtr class * Prepare IOTask.hpp for a non-copyable parameter Refactor ::clone() to ::to_heap(), also make copy/move constructors/operators in AbstractParameter protected instead of deleting them. * Backend fully prepared for accepting unique_ptr buffers No optimizations based on that yet * Add storeChunk(OpenpmdUniquePtr, ...) overload * Fix invasive tests * Add test for ADIOS2 backend optimization Not yet implemented, so test fails * Implement ADIOS2 backend optimization * Support also regular std::unique_ptr * CI fixes * Use OpenpmdUniquePtr with storeChunk in an example * Rename OpenpmdUniquePtr -> UniquePtrWithLambda --- examples/12_span_write.cpp | 30 ++ include/openPMD/Datatype.hpp | 47 +-- include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp | 42 +- .../IO/ADIOS/CommonADIOS1IOHandler.hpp | 4 +- include/openPMD/IO/AbstractIOHandlerImpl.hpp | 2 +- include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp | 4 +- include/openPMD/IO/IOTask.hpp | 368 ++++++++---------- include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp | 4 +- include/openPMD/RecordComponent.hpp | 35 ++ include/openPMD/RecordComponent.tpp | 76 ++-- include/openPMD/auxiliary/Memory.hpp | 56 ++- include/openPMD/auxiliary/TypeTraits.hpp | 47 ++- include/openPMD/auxiliary/UniquePtr.hpp | 176 +++++++++ .../openPMD/backend/PatchRecordComponent.hpp | 2 +- src/IO/ADIOS/ADIOS2IOHandler.cpp | 162 +++++--- src/IO/ADIOS/CommonADIOS1IOHandler.cpp | 2 +- src/IO/HDF5/HDF5IOHandler.cpp | 6 +- src/IO/JSON/JSONIOHandlerImpl.cpp | 2 +- src/RecordComponent.cpp | 45 +++ test/CoreTest.cpp | 55 ++- test/ParallelIOTest.cpp | 3 +- test/SerialIOTest.cpp | 112 +++++- 22 files changed, 905 insertions(+), 375 deletions(-) create mode 100644 include/openPMD/auxiliary/UniquePtr.hpp diff --git a/examples/12_span_write.cpp b/examples/12_span_write.cpp index f60746bff9..d53181cea0 100644 --- a/examples/12_span_write.cpp +++ b/examples/12_span_write.cpp @@ -82,6 +82,36 @@ void span_write(std::string const &filename) } ++j; } + + using mesh_type = position_t; + + RecordComponent chargeDensity = + iteration.meshes["e_chargeDensity"][RecordComponent::SCALAR]; + + /* + * A similar memory optimization is possible by using a unique_ptr type + * in the call to storeChunk(). + * Unlike the Span API, the buffer here is user-created, but in both + * approaches, the backend will manage the memory after the call to + * storeChunk(). + * Some backends (especially: ADIOS2 BP5) will benefit from being able + * to avoid memcopies since they know that they can just keep the memory + * and noone else is reading it. + */ + chargeDensity.resetDataset(dataset); + /* + * The class template UniquePtrWithLambda (subclass of std::unique_ptr) + * can be used to specify custom destructors, e.g. for deallocating + * GPU pointers. + * Normal std::unique_ptr types can also be used, even with custom + * destructors. + */ + UniquePtrWithLambda data( + new mesh_type[length](), [](auto const *ptr) { delete[] ptr; }); + /* + * Move the unique_ptr into openPMD. It must now no longer be accessed. + */ + chargeDensity.storeChunk(std::move(data), {0}, extent); iteration.close(); } diff --git a/include/openPMD/Datatype.hpp b/include/openPMD/Datatype.hpp index 9ca2cb0675..f9661b60e8 100644 --- a/include/openPMD/Datatype.hpp +++ b/include/openPMD/Datatype.hpp @@ -21,6 +21,7 @@ #pragma once #include "openPMD/auxiliary/TypeTraits.hpp" +#include "openPMD/auxiliary/UniquePtr.hpp" #include #include @@ -276,31 +277,26 @@ inline constexpr Datatype determineDatatype() return Datatype::UNDEFINED; } -template -inline constexpr Datatype determineDatatype(std::shared_ptr) -{ - return determineDatatype(); -} - -template -inline constexpr Datatype determineDatatype(T *) -{ - return determineDatatype(); -} - -/* - * Catch-all overload for unsupported types, with static_assert errors - * triggered at compile-time. +/** + * @brief Determine datatype of passed value + * + * @param val Value whose type to evaluate + * @tparam T Type of the passed value + * @return If T is of a pointer type, then the type of the contained value. + * Otherwise, a compile-time error detailing the use of this function. */ -template -inline constexpr Datatype determineDatatype(T_ContiguousContainer &&) +template +inline constexpr Datatype determineDatatype(T &&val) { - using T_ContiguousContainer_stripped = - std::remove_reference_t; - if constexpr (auxiliary::IsContiguousContainer_v< - T_ContiguousContainer_stripped>) + (void)val; // don't need this, it only has a name for Doxygen + using T_stripped = std::remove_cv_t>; + if constexpr (auxiliary::IsPointer_v) + { + return determineDatatype>(); + } + else if constexpr (auxiliary::IsContiguousContainer_v) { - static_assert(auxiliary::dependent_false_v, R"( + static_assert(auxiliary::dependent_false_v, R"( Error: Passed a contiguous container type to determineDatatype<>(). These types are not directly supported due to colliding semantics. Assuming a vector object `std::vector vec;`, @@ -322,15 +318,14 @@ use one of the following alternatives: } else { - static_assert(auxiliary::dependent_false_v, R"( + static_assert(auxiliary::dependent_false_v, R"( Error: Unknown datatype passed to determineDatatype<>(). For a direct translation from C++ type to the openPMD::Datatype enum, use: `auto determineDatatype() -> Datatype`. For detecting the contained datatpye of a pointer type (shared or raw pointer), -use either of the following overloads: -`auto determineDatatype(std::shared_ptr) -> Datatype` or -`auto determineDatatype(T *) -> Datatype`. +use this following template (i.e. `auto determineDatatype(T &&) -> Datatype`) +which accepts pointer-type parameters (raw, shared or unique). )"); } // Unreachable, but C++ does not know it diff --git a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp index 309648b782..6804d60ed7 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp @@ -82,6 +82,7 @@ namespace detail struct BufferedGet; struct BufferedAttributeRead; struct BufferedAttributeWrite; + struct RunUniquePtrPut; } // namespace detail namespace ADIOS2Schema @@ -124,6 +125,7 @@ class ADIOS2IOHandlerImpl friend struct detail::WriteDataset; friend struct detail::BufferedActions; friend struct detail::BufferedAttributeRead; + friend struct detail::RunUniquePtrPut; public: #if openPMD_HAVE_MPI @@ -188,8 +190,8 @@ class ADIOS2IOHandlerImpl void deleteAttribute( Writable *, Parameter const &) override; - void writeDataset( - Writable *, Parameter const &) override; + void + writeDataset(Writable *, Parameter &) override; void writeAttribute( Writable *, Parameter const &) override; @@ -544,11 +546,7 @@ namespace detail struct WriteDataset { template - static void call( - ADIOS2IOHandlerImpl *impl, - BufferedPut &bp, - adios2::IO &IO, - adios2::Engine &engine); + static void call(BufferedActions &ba, BufferedPut &bp); template static void call(Params &&...); @@ -916,6 +914,17 @@ namespace detail void run(BufferedActions &) override; }; + struct BufferedUniquePtrPut + { + std::string name; + Offset offset; + Extent extent; + UniquePtrWithLambda data; + Datatype dtype; + + void run(BufferedActions &); + }; + struct OldBufferedAttributeRead : BufferedAction { Parameter param; @@ -967,6 +976,8 @@ namespace detail { friend struct BufferedGet; friend struct BufferedPut; + friend struct RunUniquePtrPut; + friend struct WriteDataset; using FlushTarget = ADIOS2IOHandlerImpl::FlushTarget; @@ -1023,6 +1034,13 @@ namespace detail * penalty, once preloadAttributes has been filled. */ std::vector m_attributeReads; + /** + * When receiving a unique_ptr, we know that the buffer is ours and + * ours alone. So, for performance reasons, show the buffer to ADIOS2 as + * late as possible and avoid unnecessary data copies in BP5 triggered + * by PerformDataWrites(). + */ + std::vector m_uniquePtrPuts; /** * This contains deferred actions that have already been enqueued into * ADIOS2, but not yet performed in ADIOS2. @@ -1116,8 +1134,10 @@ namespace detail * * adios2::Engine::EndStep * * adios2::Engine::Perform(Puts|Gets) * * adios2::Engine::Close - * @param writeAttributes If using the new attribute layout, perform - * deferred attribute writes now. + * @param writeLatePuts Some things are deferred until right before + * Engine::EndStep() or Engine::Close(): + * 1) Writing attributes in new ADIOS2 schema. + * 2) Running unique_ptr Put()s. * @param flushUnconditionally Whether to run the functor even if no * deferred IO tasks had been queued. */ @@ -1125,7 +1145,7 @@ namespace detail void flush_impl( ADIOS2FlushParams flushParams, F &&performPutsGets, - bool writeAttributes, + bool writeLatePuts, bool flushUnconditionally); /** @@ -1133,7 +1153,7 @@ namespace detail * and does not flush unconditionally. * */ - void flush_impl(ADIOS2FlushParams, bool writeAttributes = false); + void flush_impl(ADIOS2FlushParams, bool writeLatePuts = false); /** * @brief Begin or end an ADIOS step. diff --git a/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp b/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp index 9fbe0e0e87..d1a079b17f 100644 --- a/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp @@ -74,8 +74,8 @@ class CommonADIOS1IOHandlerImpl : public AbstractIOHandlerImpl Writable *, Parameter const &) override; void deleteAttribute( Writable *, Parameter const &) override; - void writeDataset( - Writable *, Parameter const &) override; + void + writeDataset(Writable *, Parameter &) override; void writeAttribute( Writable *, Parameter const &) override; void readDataset(Writable *, Parameter &) override; diff --git a/include/openPMD/IO/AbstractIOHandlerImpl.hpp b/include/openPMD/IO/AbstractIOHandlerImpl.hpp index 925c5f83a0..f382a73258 100644 --- a/include/openPMD/IO/AbstractIOHandlerImpl.hpp +++ b/include/openPMD/IO/AbstractIOHandlerImpl.hpp @@ -456,7 +456,7 @@ class AbstractIOHandlerImpl * storage after the operation completes successfully. */ virtual void - writeDataset(Writable *, Parameter const &) = 0; + writeDataset(Writable *, Parameter &) = 0; /** Get a view into a dataset buffer that can be filled by a user. * diff --git a/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp b/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp index d8d564c281..77f9cadb60 100644 --- a/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp +++ b/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp @@ -65,8 +65,8 @@ class HDF5IOHandlerImpl : public AbstractIOHandlerImpl Writable *, Parameter const &) override; void deleteAttribute( Writable *, Parameter const &) override; - void writeDataset( - Writable *, Parameter const &) override; + void + writeDataset(Writable *, Parameter &) override; void writeAttribute( Writable *, Parameter const &) override; void readDataset(Writable *, Parameter &) override; diff --git a/include/openPMD/IO/IOTask.hpp b/include/openPMD/IO/IOTask.hpp index cf2cf520e5..0ee6cd66d7 100644 --- a/include/openPMD/IO/IOTask.hpp +++ b/include/openPMD/IO/IOTask.hpp @@ -25,6 +25,7 @@ #include "openPMD/IterationEncoding.hpp" #include "openPMD/Streaming.hpp" #include "openPMD/auxiliary/Export.hpp" +#include "openPMD/auxiliary/Memory.hpp" #include "openPMD/auxiliary/Variant.hpp" #include "openPMD/backend/Attribute.hpp" #include "openPMD/backend/ParsePreference.hpp" @@ -33,6 +34,7 @@ #include #include #include +#include #include namespace openPMD @@ -92,12 +94,17 @@ struct OPENPMDAPI_EXPORT AbstractParameter { virtual ~AbstractParameter() = default; AbstractParameter() = default; - // AbstractParameter(AbstractParameter&&) = default; + virtual std::unique_ptr to_heap() && = 0; + +protected: // avoid object slicing - AbstractParameter(const AbstractParameter &) = delete; - AbstractParameter &operator=(const AbstractParameter &) = delete; - virtual std::unique_ptr clone() const = 0; + // by allow only child classes to use these things for defining their own + // copy/move constructors/assignment operators + AbstractParameter(const AbstractParameter &) = default; + AbstractParameter &operator=(const AbstractParameter &) = default; + AbstractParameter(AbstractParameter &&) = default; + AbstractParameter &operator=(AbstractParameter &&) = default; }; /** @brief Typesafe description of all required arguments for a specified @@ -122,14 +129,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) - : AbstractParameter(), name(p.name), encoding(p.encoding) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::string name = ""; @@ -141,14 +149,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) - : AbstractParameter(), name(p.name), fileExists(p.fileExists) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::string name = ""; @@ -167,17 +176,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) - : AbstractParameter() - , name(p.name) - , encoding(p.encoding) - , out_parsePreference(p.out_parsePreference) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::string name = ""; @@ -197,13 +204,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &) : AbstractParameter() - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } }; @@ -212,13 +221,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) : AbstractParameter(), name(p.name) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::string name = ""; @@ -229,13 +240,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) : AbstractParameter(), path(p.path) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::string path = ""; @@ -246,18 +259,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &) : AbstractParameter() - {} - - Parameter &operator=(Parameter const &) - { - return *this; - } + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } }; @@ -266,13 +276,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) : AbstractParameter(), path(p.path) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::string path = ""; @@ -283,13 +295,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) : AbstractParameter(), path(p.path) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::string path = ""; @@ -300,13 +314,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) : AbstractParameter(), paths(p.paths) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::shared_ptr> paths = @@ -318,18 +334,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) - : AbstractParameter() - , name(p.name) - , extent(p.extent) - , dtype(p.dtype) - , options(p.options) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::string name = ""; @@ -355,13 +368,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) : AbstractParameter(), extent(p.extent) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } Extent extent = {}; @@ -372,14 +387,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) - : AbstractParameter(), name(p.name), dtype(p.dtype), extent(p.extent) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::string name = ""; @@ -392,13 +408,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) : AbstractParameter(), name(p.name) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::string name = ""; @@ -409,33 +427,22 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) - : AbstractParameter() - , extent(p.extent) - , offset(p.offset) - , dtype(p.dtype) - , data(p.data) - {} - Parameter &operator=(const Parameter &p) - { - this->extent = p.extent; - this->offset = p.offset; - this->dtype = p.dtype; - this->data = p.data; - return *this; - } + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = delete; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = delete; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } Extent extent = {}; Offset offset = {}; Datatype dtype = Datatype::UNDEFINED; - std::shared_ptr data = nullptr; + auxiliary::WriteBuffer data; }; template <> @@ -443,27 +450,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) - : AbstractParameter() - , extent(p.extent) - , offset(p.offset) - , dtype(p.dtype) - , data(p.data) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - Parameter &operator=(const Parameter &p) - { - this->extent = p.extent; - this->offset = p.offset; - this->dtype = p.dtype; - this->data = p.data; - return *this; - } - - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } Extent extent = {}; @@ -477,13 +472,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) : AbstractParameter(), datasets(p.datasets) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::shared_ptr> datasets = @@ -495,28 +492,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) - : AbstractParameter() - , offset(p.offset) - , extent(p.extent) - , dtype(p.dtype) - , update(p.update) - , out(p.out) - {} - Parameter &operator=(Parameter const &p) - { - offset = p.offset; - extent = p.extent; - dtype = p.dtype; - update = p.update; - out = p.out; - return *this; - } + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } // in parameters @@ -539,13 +523,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) : AbstractParameter(), name(p.name) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::string name = ""; @@ -556,18 +542,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) - : AbstractParameter() - , name(p.name) - , dtype(p.dtype) - , changesOverSteps(p.changesOverSteps) - , resource(p.resource) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::string name = ""; @@ -587,25 +570,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) - : AbstractParameter() - , name(p.name) - , dtype(p.dtype) - , resource(p.resource) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - Parameter &operator=(const Parameter &p) - { - this->name = p.name; - this->dtype = p.dtype; - this->resource = p.resource; - return *this; - } - - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::string name = ""; @@ -619,14 +592,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) - : AbstractParameter(), attributes(p.attributes) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } std::shared_ptr> attributes = @@ -638,14 +612,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) - : AbstractParameter(), mode(p.mode), status(p.status) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } //! input parameter @@ -660,19 +635,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) : AbstractParameter(), chunks(p.chunks) - {} - - Parameter &operator=(Parameter const &p) - { - chunks = p.chunks; - return *this; - } + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { return std::unique_ptr( - new Parameter(*this)); + new Parameter(std::move(*this))); } // output parameter @@ -684,13 +655,15 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter(Parameter const &p) - : AbstractParameter(), otherWritable(p.otherWritable) - {} + Parameter(Parameter &&) = default; + Parameter(Parameter const &) = default; + Parameter &operator=(Parameter &&) = default; + Parameter &operator=(Parameter const &) = default; - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { - return std::make_unique>(*this); + return std::make_unique>( + std::move(*this)); } Writable *otherWritable; @@ -704,9 +677,10 @@ struct OPENPMDAPI_EXPORT Parameter Parameter(Parameter const &) : AbstractParameter() {} - std::unique_ptr clone() const override + std::unique_ptr to_heap() && override { - return std::make_unique>(*this); + return std::make_unique>( + std::move(*this)); } }; @@ -730,13 +704,15 @@ class OPENPMDAPI_EXPORT IOTask * parameters to the operation. */ template - explicit IOTask(Writable *w, Parameter const &p) - : writable{w}, operation{op}, parameter{p.clone()} + explicit IOTask(Writable *w, Parameter p) + : writable{w}, operation{op}, parameter{std::move(p).to_heap()} {} template - explicit IOTask(Attributable *a, Parameter const &p) - : writable{getWritable(a)}, operation{op}, parameter{p.clone()} + explicit IOTask(Attributable *a, Parameter p) + : writable{getWritable(a)} + , operation{op} + , parameter{std::move(p).to_heap()} {} explicit IOTask(IOTask const &other) diff --git a/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp b/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp index 4c68004bc7..7f10f62cd9 100644 --- a/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp +++ b/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp @@ -195,8 +195,8 @@ class JSONIOHandlerImpl : public AbstractIOHandlerImpl void deleteAttribute( Writable *, Parameter const &) override; - void writeDataset( - Writable *, Parameter const &) override; + void + writeDataset(Writable *, Parameter &) override; void writeAttribute( Writable *, Parameter const &) override; diff --git a/include/openPMD/RecordComponent.hpp b/include/openPMD/RecordComponent.hpp index 5ef9585520..a4bc694e16 100644 --- a/include/openPMD/RecordComponent.hpp +++ b/include/openPMD/RecordComponent.hpp @@ -21,7 +21,9 @@ #pragma once #include "openPMD/Dataset.hpp" +#include "openPMD/auxiliary/ShareRaw.hpp" #include "openPMD/auxiliary/TypeTraits.hpp" +#include "openPMD/auxiliary/UniquePtr.hpp" #include "openPMD/backend/BaseRecordComponent.hpp" #include @@ -291,6 +293,36 @@ class RecordComponent : public BaseRecordComponent template void storeChunk(std::shared_ptr data, Offset offset, Extent extent); + /** Store a chunk of data from a chunk of memory, unique pointer version. + * + * @param data Preallocated, contiguous buffer, large enough to read the + * the specified data from it. + * The unique pointer must own and manage the buffer. + * Optimizations might be implemented based on this + * assumption (e.g. further deferring the operation if the + * backend is the unique owner). + * For raw pointers, use storeChunkRaw(). + * @param offset Offset within the dataset. + * @param extent Extent within the dataset, counted from the offset. + */ + template + void storeChunk(UniquePtrWithLambda data, Offset offset, Extent extent); + + /** Store a chunk of data from a chunk of memory, unique pointer version. + * + * @param data Preallocated, contiguous buffer, large enough to read the + * the specified data from it. + * The unique pointer must own and manage the buffer. + * Optimizations might be implemented based on this + * assumption (e.g. further deferring the operation if the + * backend is the unique owner). + * For raw pointers, use storeChunkRaw(). + * @param offset Offset within the dataset. + * @param extent Extent within the dataset, counted from the offset. + */ + template + void storeChunk(std::unique_ptr data, Offset offset, Extent extent); + /** Store a chunk of data from a chunk of memory, raw pointer version. * * @param data Preallocated, contiguous buffer, large enough to read the @@ -387,6 +419,9 @@ class RecordComponent : public BaseRecordComponent */ RecordComponent &makeEmpty(Dataset d); + void storeChunk( + auxiliary::WriteBuffer buffer, Datatype datatype, Offset o, Extent e); + /** * @brief Check recursively whether this RecordComponent is dirty. * It is dirty if any attribute or dataset is read from or written to diff --git a/include/openPMD/RecordComponent.tpp b/include/openPMD/RecordComponent.tpp index 8e6d2c8775..1df93875b5 100644 --- a/include/openPMD/RecordComponent.tpp +++ b/include/openPMD/RecordComponent.tpp @@ -23,8 +23,10 @@ #include "openPMD/RecordComponent.hpp" #include "openPMD/Span.hpp" +#include "openPMD/auxiliary/Memory.hpp" #include "openPMD/auxiliary/ShareRawInternal.hpp" #include "openPMD/auxiliary/TypeTraits.hpp" +#include "openPMD/auxiliary/UniquePtr.hpp" namespace openPMD { @@ -194,51 +196,41 @@ template< typename T > inline void RecordComponent::storeChunk(std::shared_ptr data, Offset o, Extent e) { - if( constant() ) - throw std::runtime_error("Chunks cannot be written for a constant RecordComponent."); - if( empty() ) - throw std::runtime_error("Chunks cannot be written for an empty RecordComponent."); - if( !data ) - throw std::runtime_error("Unallocated pointer passed during chunk store."); + if (!data) + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); Datatype dtype = determineDatatype(data); - if( dtype != getDatatype() ) - { - std::ostringstream oss; - oss << "Datatypes of chunk data (" - << dtype - << ") and record component (" - << getDatatype() - << ") do not match."; - throw std::runtime_error(oss.str()); - } - uint8_t dim = getDimensionality(); - if( e.size() != dim || o.size() != dim ) - { - std::ostringstream oss; - oss << "Dimensionality of chunk (" - << "offset=" << o.size() << "D, " - << "extent=" << e.size() << "D) " - << "and record component (" - << int(dim) << "D) " - << "do not match."; - throw std::runtime_error(oss.str()); - } - Extent dse = getExtent(); - for( uint8_t i = 0; i < dim; ++i ) - if( dse[i] < o[i] + e[i] ) - throw std::runtime_error("Chunk does not reside inside dataset (Dimension on index " + std::to_string(i) - + ". DS: " + std::to_string(dse[i]) - + " - Chunk: " + std::to_string(o[i] + e[i]) - + ")"); - Parameter< Operation::WRITE_DATASET > dWrite; - dWrite.offset = o; - dWrite.extent = e; - dWrite.dtype = dtype; /* std::static_pointer_cast correctly reference-counts the pointer */ - dWrite.data = std::static_pointer_cast< void const >(data); - auto & rc = get(); - rc.m_chunks.push(IOTask(this, dWrite)); + storeChunk( + auxiliary::WriteBuffer(std::static_pointer_cast(data)), + dtype, + std::move(o), + std::move(e)); +} + +template +inline void +RecordComponent::storeChunk(UniquePtrWithLambda data, Offset o, Extent e) +{ + if (!data) + throw std::runtime_error( + "Unallocated pointer passed during chunk store."); + Datatype dtype = determineDatatype<>(data); + + storeChunk( + auxiliary::WriteBuffer{std::move(data).template static_cast_()}, + dtype, + std::move(o), + std::move(e)); +} + +template +inline void +RecordComponent::storeChunk(std::unique_ptr data, Offset o, Extent e) +{ + storeChunk( + UniquePtrWithLambda(std::move(data)), std::move(o), std::move(e)); } template diff --git a/include/openPMD/auxiliary/Memory.hpp b/include/openPMD/auxiliary/Memory.hpp index 11a47d215f..66ebb5fd34 100644 --- a/include/openPMD/auxiliary/Memory.hpp +++ b/include/openPMD/auxiliary/Memory.hpp @@ -22,17 +22,21 @@ #include "openPMD/Dataset.hpp" #include "openPMD/Datatype.hpp" +#include "openPMD/auxiliary/UniquePtr.hpp" #include #include +#include #include +#include #include +#include namespace openPMD { namespace auxiliary { - inline std::unique_ptr > + inline std::unique_ptr> allocatePtr(Datatype dtype, uint64_t numPoints) { void *data = nullptr; @@ -151,10 +155,10 @@ namespace auxiliary "Unknown Attribute datatype (Pointer allocation)"); } - return std::unique_ptr >(data, del); + return std::unique_ptr>(data, del); } - inline std::unique_ptr > + inline std::unique_ptr> allocatePtr(Datatype dtype, Extent const &e) { uint64_t numPoints = 1u; @@ -163,5 +167,51 @@ namespace auxiliary return allocatePtr(dtype, numPoints); } + /* + * A buffer for the WRITE_DATASET task that can either be a std::shared_ptr + * or a std::unique_ptr. + */ + struct WriteBuffer + { + using EligibleTypes = std:: + variant, UniquePtrWithLambda>; + EligibleTypes m_buffer; + + WriteBuffer() : m_buffer(UniquePtrWithLambda()) + {} + + template + explicit WriteBuffer(Args &&...args) + : m_buffer(std::forward(args)...) + {} + + WriteBuffer(WriteBuffer &&) = default; + WriteBuffer(WriteBuffer const &) = delete; + WriteBuffer &operator=(WriteBuffer &&) = default; + WriteBuffer &operator=(WriteBuffer const &) = delete; + + WriteBuffer const &operator=(std::shared_ptr ptr) + { + m_buffer = std::move(ptr); + return *this; + } + + WriteBuffer const &operator=(UniquePtrWithLambda ptr) + { + m_buffer = std::move(ptr); + return *this; + } + + inline void const *get() const + { + return std::visit( + [](auto const &arg) { + // unique_ptr and shared_ptr both have the get() member, so + // we're being sneaky and don't distinguish the types here + return static_cast(arg.get()); + }, + m_buffer); + } + }; } // namespace auxiliary } // namespace openPMD diff --git a/include/openPMD/auxiliary/TypeTraits.hpp b/include/openPMD/auxiliary/TypeTraits.hpp index b7333cfdfe..923cfb5be2 100644 --- a/include/openPMD/auxiliary/TypeTraits.hpp +++ b/include/openPMD/auxiliary/TypeTraits.hpp @@ -21,8 +21,11 @@ #pragma once +#include "openPMD/auxiliary/UniquePtr.hpp" + #include #include // size_t +#include #include namespace openPMD::auxiliary @@ -36,7 +39,7 @@ namespace detail }; template - struct IsVector > + struct IsVector> { static constexpr bool value = true; }; @@ -48,10 +51,44 @@ namespace detail }; template - struct IsArray > + struct IsArray> { static constexpr bool value = true; }; + + template + struct IsPointer + { + constexpr static bool value = false; + }; + + template + struct IsPointer + { + constexpr static bool value = true; + using type = T; + }; + + template + struct IsPointer> + { + constexpr static bool value = true; + using type = T; + }; + + template + struct IsPointer> + { + constexpr static bool value = true; + using type = T; + }; + + template + struct IsPointer> + { + constexpr static bool value = true; + using type = T; + }; } // namespace detail template @@ -60,6 +97,12 @@ inline constexpr bool IsVector_v = detail::IsVector::value; template inline constexpr bool IsArray_v = detail::IsArray::value; +template +inline constexpr bool IsPointer_v = detail::IsPointer::value; + +template +using IsPointer_t = typename detail::IsPointer::type; + /** Emulate in the C++ concept ContiguousContainer * * Users can implement this trait for a type to signal it can be used as diff --git a/include/openPMD/auxiliary/UniquePtr.hpp b/include/openPMD/auxiliary/UniquePtr.hpp new file mode 100644 index 0000000000..4b99f36790 --- /dev/null +++ b/include/openPMD/auxiliary/UniquePtr.hpp @@ -0,0 +1,176 @@ +#pragma once + +#include +#include +#include +#include + +namespace openPMD +{ + +namespace auxiliary +{ + /** + * @brief Custom deleter type based on std::function. + * + * No need to interact with this class directly, used implicitly + * by UniquePtrWithLambda. + * + * Has some special treatment for array types and falls back + * to std::default_delete by default. + * + * @tparam T The to-be-deleted type, possibly an array. + */ + template + class CustomDelete : public std::function *)> + { + private: + using T_decayed = std::remove_extent_t; + + public: + using deleter_type = std::function; + + deleter_type const &get_deleter() const + { + return *this; + } + deleter_type &get_deleter() + { + return *this; + } + + /* + * Default constructor: Use std::default_delete. + * This ensures correct destruction of arrays by using delete[]. + */ + CustomDelete() + : deleter_type{[](T_decayed *ptr) { + if constexpr (std::is_void_v) + { + (void)ptr; + std::cerr << "[Warning] Cannot standard-delete a void-type " + "pointer. Please specify a custom destructor. " + "Will let the memory leak." + << std::endl; + } + else + { + std::default_delete{}(ptr); + } + }} + {} + + CustomDelete(deleter_type func) : deleter_type(std::move(func)) + {} + }; +} // namespace auxiliary + +/** + * @brief Unique Pointer class that uses a dynamic destructor type. + * + * Unlike std::shared_ptr, std::unique_ptr has a second type parameter for the + * destructor, in order to have as little runtime overhead as possible over + * raw pointers. + * This unique pointer class behaves like a std::unique_ptr with a std::function + * based deleter type, making it possible to have one single unique_ptr-like + * class that still enables user to specify custom destruction behavior, e.g. + * for GPU buffers. + * + * If not specifying a custom deleter explicitly, this class emulates the + * behavior of a std::unique_ptr with std::default_delete. + * This also means that array types are supported as expected. + * + * @tparam T The pointer type, as in std::unique_ptr. + */ +template +class UniquePtrWithLambda + : public std::unique_ptr< + T, + /* Deleter = */ auxiliary::CustomDelete> +{ +private: + using BasePtr = std::unique_ptr>; + +public: + using T_decayed = std::remove_extent_t; + + UniquePtrWithLambda() = default; + + UniquePtrWithLambda(UniquePtrWithLambda &&) = default; + UniquePtrWithLambda &operator=(UniquePtrWithLambda &&) = default; + + UniquePtrWithLambda(UniquePtrWithLambda const &) = delete; + UniquePtrWithLambda &operator=(UniquePtrWithLambda const &) = delete; + + /** + * Conversion constructor from std::unique_ptr with default deleter. + */ + UniquePtrWithLambda(std::unique_ptr); + + /** + * Conversion constructor from std::unique_ptr with custom deleter. + * + * @tparam Del Custom deleter type. + */ + template + UniquePtrWithLambda(std::unique_ptr); + + /** + * Construct from raw pointer with default deleter. + */ + UniquePtrWithLambda(T_decayed *); + + /** + * Construct from raw pointer with custom deleter. + */ + UniquePtrWithLambda(T_decayed *, std::function); + + /** + * Like std::static_pointer_cast. + * The dynamic destructor type makes this possible to implement in this + * case. + * + * @tparam U Convert to unique pointer of this type. + */ + template + UniquePtrWithLambda static_cast_() &&; +}; + +template +UniquePtrWithLambda::UniquePtrWithLambda(std::unique_ptr stdPtr) + : BasePtr{stdPtr.release()} +{} + +template +template +UniquePtrWithLambda::UniquePtrWithLambda(std::unique_ptr ptr) + : BasePtr{ + ptr.release(), + auxiliary::CustomDelete{ + [deleter = std::move(ptr.get_deleter())](T_decayed *del_ptr) { + deleter.get_deleter()(del_ptr); + }}} +{} + +template +UniquePtrWithLambda::UniquePtrWithLambda(T_decayed *ptr) : BasePtr{ptr} +{} + +template +UniquePtrWithLambda::UniquePtrWithLambda( + T_decayed *ptr, std::function deleter) + : BasePtr{ptr, std::move(deleter)} +{} + +template +template +UniquePtrWithLambda UniquePtrWithLambda::static_cast_() && +{ + using other_type = std::remove_extent_t; + return UniquePtrWithLambda{ + static_cast(this->release()), + [deleter = std::move(this->get_deleter())](other_type *ptr) { + deleter.get_deleter()(static_cast(ptr)); + }}; +} +} // namespace openPMD diff --git a/include/openPMD/backend/PatchRecordComponent.hpp b/include/openPMD/backend/PatchRecordComponent.hpp index 718adee7d5..51537b0071 100644 --- a/include/openPMD/backend/PatchRecordComponent.hpp +++ b/include/openPMD/backend/PatchRecordComponent.hpp @@ -218,6 +218,6 @@ inline void PatchRecordComponent::store(uint64_t idx, T data) dWrite.dtype = dtype; dWrite.data = std::make_shared(data); auto &rc = get(); - rc.m_chunks.push(IOTask(this, dWrite)); + rc.m_chunks.push(IOTask(this, std::move(dWrite))); } } // namespace openPMD diff --git a/src/IO/ADIOS/ADIOS2IOHandler.cpp b/src/IO/ADIOS/ADIOS2IOHandler.cpp index 76bbddd708..266063b722 100644 --- a/src/IO/ADIOS/ADIOS2IOHandler.cpp +++ b/src/IO/ADIOS/ADIOS2IOHandler.cpp @@ -463,7 +463,7 @@ ADIOS2IOHandlerImpl::flush(internal::ParsedFlushParams &flushParams) { if (m_dirty.find(p.first) != m_dirty.end()) { - p.second->flush(adios2FlushParams, /* writeAttributes = */ false); + p.second->flush(adios2FlushParams, /* writeLatePuts = */ false); } else { @@ -744,7 +744,7 @@ void ADIOS2IOHandlerImpl::closeFile( [](detail::BufferedActions &ba, adios2::Engine &) { ba.finalize(); }, - /* writeAttributes = */ true, + /* writeLatePuts = */ true, /* flushUnconditionally = */ false); m_fileData.erase(it); } @@ -816,7 +816,7 @@ void ADIOS2IOHandlerImpl::deleteAttribute( } void ADIOS2IOHandlerImpl::writeDataset( - Writable *writable, const Parameter ¶meters) + Writable *writable, Parameter ¶meters) { VERIFY_ALWAYS( access::write(m_handler->m_backendAccess), @@ -826,7 +826,7 @@ void ADIOS2IOHandlerImpl::writeDataset( detail::BufferedActions &ba = getFileData(file, IfFileNotOpen::ThrowError); detail::BufferedPut bp; bp.name = nameOfVariable(writable); - bp.param = parameters; + bp.param = std::move(parameters); ba.enqueue(std::move(bp)); m_dirty.emplace(std::move(file)); writable->written = true; // TODO erst nach dem Schreiben? @@ -1936,23 +1936,59 @@ namespace detail shape.begin(), shape.end(), std::back_inserter(*parameters.extent)); } + template + inline constexpr bool always_false_v = false; + template - void WriteDataset::call( - ADIOS2IOHandlerImpl *impl, - detail::BufferedPut &bp, - adios2::IO &IO, - adios2::Engine &engine) + void WriteDataset::call(BufferedActions &ba, detail::BufferedPut &bp) { VERIFY_ALWAYS( - access::write(impl->m_handler->m_backendAccess), + access::write(ba.m_impl->m_handler->m_backendAccess), "[ADIOS2] Cannot write data in read-only mode."); - auto ptr = std::static_pointer_cast(bp.param.data).get(); + std::visit( + [&](auto &&arg) { + using ptr_type = std::decay_t; + if constexpr (std::is_same_v< + ptr_type, + std::shared_ptr>) + { + auto ptr = static_cast(arg.get()); - adios2::Variable var = impl->verifyDataset( - bp.param.offset, bp.param.extent, IO, bp.name); + adios2::Variable var = ba.m_impl->verifyDataset( + bp.param.offset, bp.param.extent, ba.m_IO, bp.name); - engine.Put(var, ptr); + ba.getEngine().Put(var, ptr); + } + else if constexpr (std::is_same_v< + ptr_type, + UniquePtrWithLambda>) + { + BufferedUniquePtrPut bput; + bput.name = std::move(bp.name); + bput.offset = std::move(bp.param.offset); + bput.extent = std::move(bp.param.extent); + /* + * Note: Moving is required here since it's a unique_ptr. + * std::forward<>() would theoretically work, but it + * requires the type parameter and we don't have that + * inside the lambda. + * (ptr_type does not work for this case). + */ + // clang-format off + bput.data = std::move(arg); // NOLINT(bugprone-move-forwarding-reference) + // clang-format on + bput.dtype = bp.param.dtype; + ba.m_uniquePtrPuts.push_back(std::move(bput)); + } + else + { + static_assert( + always_false_v, + "Unhandled std::variant branch"); + } + }, + bp.param.data.m_buffer); } template @@ -2388,8 +2424,29 @@ namespace detail void BufferedPut::run(BufferedActions &ba) { - switchAdios2VariableType( - param.dtype, ba.m_impl, *this, ba.m_IO, ba.getEngine()); + switchAdios2VariableType(param.dtype, ba, *this); + } + + struct RunUniquePtrPut + { + template + static void call(BufferedUniquePtrPut &bufferedPut, BufferedActions &ba) + { + auto ptr = static_cast(bufferedPut.data.get()); + adios2::Variable var = ba.m_impl->verifyDataset( + bufferedPut.offset, + bufferedPut.extent, + ba.m_IO, + bufferedPut.name); + ba.getEngine().Put(var, ptr); + } + + static constexpr char const *errorMsg = "RunUniquePtrPut"; + }; + + void BufferedUniquePtrPut::run(BufferedActions &ba) + { + switchAdios2VariableType(dtype, *this, ba); } void OldBufferedAttributeRead::run(BufferedActions &ba) @@ -2481,20 +2538,20 @@ namespace detail } // if write accessing, ensure that the engine is opened // and that all attributes are written - // (attributes are written upon closing a step or a file - // which users might never do) - bool needToWriteAttributes = !m_attributeWrites.empty(); - if ((needToWriteAttributes || !m_engine) && - m_mode != adios2::Mode::Read) + // (attributes and unique_ptr datasets are written upon closing a step + // or a file which users might never do) + bool needToWrite = + !m_attributeWrites.empty() || !m_uniquePtrPuts.empty(); + if ((needToWrite || !m_engine) && m_mode != adios2::Mode::Read) { - auto &engine = getEngine(); - if (needToWriteAttributes) + getEngine(); + for (auto &pair : m_attributeWrites) { - for (auto &pair : m_attributeWrites) - { - pair.second.run(*this); - } - engine.PerformPuts(); + pair.second.run(*this); + } + for (auto &entry : m_uniquePtrPuts) + { + entry.run(*this); } } if (m_engine) @@ -3260,7 +3317,7 @@ namespace detail void BufferedActions::flush_impl( ADIOS2FlushParams flushParams, F &&performPutGets, - bool writeAttributes, + bool writeLatePuts, bool flushUnconditionally) { auto level = flushParams.level; @@ -3280,7 +3337,8 @@ namespace detail if (streamStatus == StreamStatus::OutsideOfStep) { if (m_buffer.empty() && - (!writeAttributes || m_attributeWrites.empty()) && + (!writeLatePuts || + (m_attributeWrites.empty() && m_uniquePtrPuts.empty())) && m_attributeReads.empty()) { if (flushUnconditionally) @@ -3306,12 +3364,16 @@ namespace detail initializedDefaults = true; } - if (writeAttributes) + if (writeLatePuts) { for (auto &pair : m_attributeWrites) { pair.second.run(*this); } + for (auto &entry : m_uniquePtrPuts) + { + entry.run(*this); + } } #if HAS_ADIOS_2_8 @@ -3331,9 +3393,10 @@ namespace detail m_updateSpans.clear(); m_buffer.clear(); m_alreadyEnqueued.clear(); - if (writeAttributes) + if (writeLatePuts) { m_attributeWrites.clear(); + m_uniquePtrPuts.clear(); } for (BufferedAttributeRead &task : m_attributeReads) @@ -3355,16 +3418,11 @@ namespace detail { m_alreadyEnqueued.emplace_back(std::move(task)); } - if (writeAttributes) + if (writeLatePuts) { - for (auto &task : m_attributeWrites) - { - m_alreadyEnqueued.emplace_back( - std::unique_ptr{ - new BufferedAttributeWrite{ - std::move(task.second)}}); - } - m_attributeWrites.clear(); + throw error::Internal( + "ADIOS2 backend: Flush of late writes was requested at the " + "wrong time."); } m_buffer.clear(); break; @@ -3372,7 +3430,7 @@ namespace detail } void BufferedActions::flush_impl( - ADIOS2FlushParams flushParams, bool writeAttributes) + ADIOS2FlushParams flushParams, bool writeLatePuts) { auto decideFlushAPICall = [this, flushTarget = flushParams.flushTarget]( adios2::Engine &engine) { @@ -3395,7 +3453,21 @@ namespace detail if (performDataWrite) { + /* + * Deliberately don't write buffered attributes now since + * readers won't be able to see them before EndStep anyway, + * so there's no use. In fact, writing them now is harmful + * because they can't be overwritten after this anymore in the + * current step. + * Draining the uniquePtrPuts now is good however, since we + * should use this chance to free the memory. + */ + for (auto &entry : m_uniquePtrPuts) + { + entry.run(*this); + } engine.PerformDataWrite(); + m_uniquePtrPuts.clear(); } else { @@ -3429,7 +3501,7 @@ namespace detail break; } }, - writeAttributes, + writeLatePuts, /* flushUnconditionally = */ false); } @@ -3454,7 +3526,7 @@ namespace detail } flush( ADIOS2FlushParams{FlushLevel::UserFlush}, - /* writeAttributes = */ false); + /* writeLatePuts = */ false); return AdvanceStatus::RANDOMACCESS; } @@ -3499,7 +3571,7 @@ namespace detail flush( ADIOS2FlushParams{FlushLevel::UserFlush}, [](BufferedActions &, adios2::Engine &eng) { eng.EndStep(); }, - /* writeAttributes = */ true, + /* writeLatePuts = */ true, /* flushUnconditionally = */ true); uncommittedAttributes.clear(); m_updateSpans.clear(); diff --git a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp index 6b1630e854..f15ab0fdd4 100644 --- a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp +++ b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp @@ -1097,7 +1097,7 @@ int64_t CommonADIOS1IOHandlerImpl::GetFileHandle(Writable *writable) template void CommonADIOS1IOHandlerImpl::writeDataset( - Writable *writable, Parameter const ¶meters) + Writable *writable, Parameter ¶meters) { if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( diff --git a/src/IO/HDF5/HDF5IOHandler.cpp b/src/IO/HDF5/HDF5IOHandler.cpp index 499b2e6e69..5421187f03 100644 --- a/src/IO/HDF5/HDF5IOHandler.cpp +++ b/src/IO/HDF5/HDF5IOHandler.cpp @@ -1235,7 +1235,7 @@ void HDF5IOHandlerImpl::deleteAttribute( } void HDF5IOHandlerImpl::writeDataset( - Writable *writable, Parameter const ¶meters) + Writable *writable, Parameter ¶meters) { if (access::readOnly(m_handler->m_backendAccess)) throw std::runtime_error( @@ -1277,7 +1277,7 @@ void HDF5IOHandlerImpl::writeDataset( "[HDF5] Internal error: Failed to select hyperslab during dataset " "write"); - std::shared_ptr data = parameters.data; + void const *data = parameters.data.get(); GetH5DataType getH5DataType({ {typeid(bool).name(), m_H5T_BOOL_ENUM}, @@ -1321,7 +1321,7 @@ void HDF5IOHandlerImpl::writeDataset( memspace, filespace, m_datasetTransferProperty, - data.get()); + data); VERIFY( status == 0, "[HDF5] Internal error: Failed to write dataset " + diff --git a/src/IO/JSON/JSONIOHandlerImpl.cpp b/src/IO/JSON/JSONIOHandlerImpl.cpp index 13f20c193d..a9e86f0cde 100644 --- a/src/IO/JSON/JSONIOHandlerImpl.cpp +++ b/src/IO/JSON/JSONIOHandlerImpl.cpp @@ -781,7 +781,7 @@ void JSONIOHandlerImpl::deleteAttribute( } void JSONIOHandlerImpl::writeDataset( - Writable *writable, Parameter const ¶meters) + Writable *writable, Parameter ¶meters) { VERIFY_ALWAYS( access::write(m_handler->m_backendAccess), diff --git a/src/RecordComponent.cpp b/src/RecordComponent.cpp index bf46587ee2..164b38d127 100644 --- a/src/RecordComponent.cpp +++ b/src/RecordComponent.cpp @@ -377,4 +377,49 @@ bool RecordComponent::dirtyRecursive() const } return !get().m_chunks.empty(); } + +void RecordComponent::storeChunk( + auxiliary::WriteBuffer buffer, Datatype dtype, Offset o, Extent e) +{ + if (constant()) + throw std::runtime_error( + "Chunks cannot be written for a constant RecordComponent."); + if (empty()) + throw std::runtime_error( + "Chunks cannot be written for an empty RecordComponent."); + if (dtype != getDatatype()) + { + std::ostringstream oss; + oss << "Datatypes of chunk data (" << dtype + << ") and record component (" << getDatatype() << ") do not match."; + throw std::runtime_error(oss.str()); + } + uint8_t dim = getDimensionality(); + if (e.size() != dim || o.size() != dim) + { + std::ostringstream oss; + oss << "Dimensionality of chunk (" + << "offset=" << o.size() << "D, " + << "extent=" << e.size() << "D) " + << "and record component (" << int(dim) << "D) " + << "do not match."; + throw std::runtime_error(oss.str()); + } + Extent dse = getExtent(); + for (uint8_t i = 0; i < dim; ++i) + if (dse[i] < o[i] + e[i]) + throw std::runtime_error( + "Chunk does not reside inside dataset (Dimension on index " + + std::to_string(i) + ". DS: " + std::to_string(dse[i]) + + " - Chunk: " + std::to_string(o[i] + e[i]) + ")"); + + Parameter dWrite; + dWrite.offset = o; + dWrite.extent = e; + dWrite.dtype = dtype; + /* std::static_pointer_cast correctly reference-counts the pointer */ + dWrite.data = std::move(buffer); + auto &rc = get(); + rc.m_chunks.push(IOTask(this, std::move(dWrite))); +} } // namespace openPMD diff --git a/test/CoreTest.cpp b/test/CoreTest.cpp index 821c29b614..496cb0a36f 100644 --- a/test/CoreTest.cpp +++ b/test/CoreTest.cpp @@ -7,6 +7,7 @@ #include "openPMD/auxiliary/Filesystem.hpp" #include "openPMD/auxiliary/JSON.hpp" +#include "openPMD/auxiliary/UniquePtr.hpp" #include @@ -68,11 +69,11 @@ TEST_CASE("attribute_dtype_test", "[core]") REQUIRE(Datatype::DOUBLE == a.dtype); a = Attribute(static_cast(0.)); REQUIRE(Datatype::LONG_DOUBLE == a.dtype); - a = Attribute(static_cast >(0.)); + a = Attribute(static_cast>(0.)); REQUIRE(Datatype::CFLOAT == a.dtype); - a = Attribute(static_cast >(0.)); + a = Attribute(static_cast>(0.)); REQUIRE(Datatype::CDOUBLE == a.dtype); - a = Attribute(static_cast >(0.)); + a = Attribute(static_cast>(0.)); REQUIRE(Datatype::CLONG_DOUBLE == a.dtype); a = Attribute(std::string("")); REQUIRE(Datatype::STRING == a.dtype); @@ -989,9 +990,11 @@ TEST_CASE("use_count_test", "[core]") pprc.resetDataset(Dataset(determineDatatype(), {4})); pprc.store(0, static_cast(1)); REQUIRE( - static_cast *>( - pprc.get().m_chunks.front().parameter.get()) - ->data.use_count() == 1); + std::get>( + static_cast *>( + pprc.get().m_chunks.front().parameter.get()) + ->data.m_buffer) + .use_count() == 1); #endif } @@ -1267,12 +1270,12 @@ TEST_CASE("DoConvert_single_value_to_vector", "[core]") REQUIRE(attr.get() == 'x'); REQUIRE(attr.get() == 'x'); // all the previous ones, but make them single-element vectors now - REQUIRE(attr.get >() == std::vector{'x'}); + REQUIRE(attr.get>() == std::vector{'x'}); REQUIRE( - attr.get >() == + attr.get>() == std::vector{'x'}); REQUIRE( - attr.get >() == + attr.get>() == std::vector{'x'}); } { @@ -1280,14 +1283,14 @@ TEST_CASE("DoConvert_single_value_to_vector", "[core]") Attribute attr{array}; // the following conversions should be possible - REQUIRE(attr.get >() == array); + REQUIRE(attr.get>() == array); // we don't need array-to-array conversions, // so array< int, 7 > cannot be loaded here REQUIRE( - attr.get >() == + attr.get>() == std::vector{0, 1, 2, 3, 4, 5, 6}); REQUIRE( - attr.get >() == + attr.get>() == std::vector{0, 1, 2, 3, 4, 5, 6}); } { @@ -1297,17 +1300,17 @@ TEST_CASE("DoConvert_single_value_to_vector", "[core]") Attribute attr{vector}; // the following conversions should be possible - REQUIRE(attr.get >() == arraydouble); - REQUIRE(attr.get >() == arrayint); + REQUIRE(attr.get>() == arraydouble); + REQUIRE(attr.get>() == arrayint); REQUIRE_THROWS_WITH( - (attr.get >()), + (attr.get>()), Catch::Equals("getCast: no vector to array conversion possible " "(wrong requested array size).")); REQUIRE( - attr.get >() == + attr.get>() == std::vector{0, 1, 2, 3, 4, 5, 6}); REQUIRE( - attr.get >() == + attr.get>() == std::vector{0, 1, 2, 3, 4, 5, 6}); } } @@ -1359,3 +1362,21 @@ TEST_CASE("unavailable_backend", "[core]") } #endif } + +TEST_CASE("unique_ptr", "[core]") +{ + auto stdptr = std::make_unique(5); + UniquePtrWithLambda ptr = std::move(stdptr); + auto stdptr_with_custom_del = + std::unique_ptr>{ + new int{5}, auxiliary::CustomDelete{[](int const *del_ptr) { + delete del_ptr; + }}}; + UniquePtrWithLambda ptr2 = std::move(stdptr_with_custom_del); + + UniquePtrWithLambda arrptr; + // valgrind can detect mismatched new/delete pairs + UniquePtrWithLambda arrptrFilled{new int[5]{}}; + UniquePtrWithLambda arrptrFilledCustom{ + new int[5]{}, [](int const *p) { delete[] p; }}; +} diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index a334c4b241..5ace4a2cd4 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -52,7 +52,8 @@ std::vector testedFileExtensions() allExtensions.begin(), allExtensions.end(), [](std::string const &ext) { // sst and ssc need a receiver for testing // bp4 is already tested via bp - return ext == "sst" || ext == "ssc" || ext == "bp4" | ext == "json"; + return ext == "sst" || ext == "ssc" || ext == "bp4" || + ext == "toml" || ext == "json"; }); return {allExtensions.begin(), newEnd}; } diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index 2e20c53676..e1296d4a89 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -661,7 +661,7 @@ TEST_CASE("close_iteration_interleaved_test", "[serial]") void close_and_copy_attributable_test(std::string file_ending) { - using position_t = double; + using position_t = int; // open file for writing Series series("electrons." + file_ending, Access::CREATE); @@ -691,16 +691,80 @@ void close_and_copy_attributable_test(std::string file_ending) iteration_ptr->particles["e"]["positionOffset"]; std::iota(local_data.get(), local_data.get() + length, i * length); - for (auto const &dim : {"x", "y", "z"}) - { - RecordComponent pos = electronPositions[dim]; - pos.resetDataset(dataset); - pos.storeChunk(local_data, Offset{0}, global_extent); + /* + * Hijack this test to additionally test the unique_ptr storeChunk API + */ + // scalar unique_ptr, default delete + auto pos_x = electronPositions["x"]; + pos_x.resetDataset(Dataset{datatype, {1}}); + pos_x.storeChunk(std::make_unique(5), {0}, {1}); + + // array unique_ptr, default delete + auto posOff_x = electronPositionsOffset["x"]; + posOff_x.resetDataset(dataset); + posOff_x.storeChunk( + std::unique_ptr{new int[10]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}}, + {0}, + {global_extent}); + + using CD = auxiliary::CustomDelete; + CD array_deleter{[](int const *ptr) { delete[] ptr; }}; + + // scalar unique_ptr, custom delete + auto pos_y = electronPositions["y"]; + pos_y.resetDataset(dataset); + pos_y.storeChunk( + std::unique_ptr{ + new int[10]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, array_deleter}, + {0}, + {global_extent}); + + // array unique_ptr, custom delete + auto posOff_y = electronPositionsOffset["y"]; + posOff_y.resetDataset(dataset); + posOff_y.storeChunk( + std::unique_ptr{ + new int[10]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, array_deleter}, + {0}, + {global_extent}); + + // scalar UniquePtrWithLambda, default delete + auto pos_z = electronPositions["z"]; + pos_z.resetDataset(dataset); + pos_z.storeChunk( + UniquePtrWithLambda{ + new int[10]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, array_deleter}, + {0}, + {global_extent}); + + // array UniquePtrWithLambda, default delete + auto posOff_z = electronPositionsOffset["z"]; + posOff_z.resetDataset(dataset); + posOff_z.storeChunk( + UniquePtrWithLambda{ + new int[10]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, array_deleter}, + {0}, + {global_extent}); + + // scalar UniquePtrWithLambda, custom delete + // we're playing 4D now + auto pos_w = electronPositions["w"]; + pos_w.resetDataset(dataset); + pos_w.storeChunk( + UniquePtrWithLambda{ + new int[10]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, array_deleter}, + {0}, + {global_extent}); + + // array UniquePtrWithLambda, custom delete + auto posOff_w = electronPositionsOffset["w"]; + posOff_w.resetDataset(dataset); + posOff_w.storeChunk( + UniquePtrWithLambda{ + new int[10]{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, array_deleter}, + {0}, + {global_extent}); - RecordComponent posOff = electronPositionsOffset[dim]; - posOff.resetDataset(dataset); - posOff.makeConstant(position_t(0.0)); - } iteration_ptr->close(); // force re-flush of previous iterations series.flush(); @@ -899,11 +963,11 @@ inline void constant_scalar(std::string file_ending) E_x.makeConstant(static_cast(13.37)); auto E_y = s.iterations[1].meshes["E"]["y"]; E_y.resetDataset(Dataset(Datatype::UINT, {1, 2, 3})); - std::shared_ptr E( + UniquePtrWithLambda E( new unsigned int[6], [](unsigned int const *p) { delete[] p; }); unsigned int e{0}; std::generate(E.get(), E.get() + 6, [&e] { return e++; }); - E_y.storeChunk(E, {0, 0, 0}, {1, 2, 3}); + E_y.storeChunk(std::move(E), {0, 0, 0}, {1, 2, 3}); // store a number of predefined attributes in E Mesh &E_mesh = s.iterations[1].meshes["E"]; @@ -934,12 +998,12 @@ inline void constant_scalar(std::string file_ending) vel_x.makeConstant(static_cast(-1)); auto vel_y = s.iterations[1].particles["e"]["velocity"]["y"]; vel_y.resetDataset(Dataset(Datatype::ULONGLONG, {3, 2, 1})); - std::shared_ptr vel( + UniquePtrWithLambda vel( new unsigned long long[6], [](unsigned long long const *p) { delete[] p; }); unsigned long long v{0}; std::generate(vel.get(), vel.get() + 6, [&v] { return v++; }); - vel_y.storeChunk(vel, {0, 0, 0}, {3, 2, 1}); + vel_y.storeChunk(std::move(vel), {0, 0, 0}, {3, 2, 1}); } { @@ -4104,6 +4168,7 @@ void adios2_bp5_flush(std::string const &cfg, FlushDuringStep flushDuringStep) return res; }; std::vector data(size, 10); + Datatype dtype = determineDatatype(); { Series write("../samples/bp5_flush.bp", Access::CREATE, cfg); @@ -4111,7 +4176,7 @@ void adios2_bp5_flush(std::string const &cfg, FlushDuringStep flushDuringStep) auto component = write.writeIterations()[0] .meshes["e_chargeDensity"][RecordComponent::SCALAR]; - component.resetDataset({Datatype::INT, {size}}); + component.resetDataset({dtype, {size}}); component.storeChunk(data, {0}, {size}); // component.seriesFlush(FlushMode::NonCollective); component.seriesFlush(); @@ -4134,7 +4199,7 @@ void adios2_bp5_flush(std::string const &cfg, FlushDuringStep flushDuringStep) auto component = write.writeIterations()[0] .meshes["i_chargeDensity"][RecordComponent::SCALAR]; - component.resetDataset({Datatype::INT, {size}}); + component.resetDataset({dtype, {size}}); component.storeChunk(data, {0}, {size}); } @@ -4169,7 +4234,7 @@ void adios2_bp5_flush(std::string const &cfg, FlushDuringStep flushDuringStep) auto component = write.writeIterations()[0] .meshes["temperature"][RecordComponent::SCALAR]; - component.resetDataset({Datatype::INT, {size}}); + component.resetDataset({dtype, {size}}); component.storeChunk(data, {0}, {size}); // component.seriesFlush(FlushMode::NonCollective); component.seriesFlush( @@ -4192,12 +4257,19 @@ void adios2_bp5_flush(std::string const &cfg, FlushDuringStep flushDuringStep) REQUIRE(currentSize <= 4096); } + bool has_been_deleted = false; + UniquePtrWithLambda copied_as_unique( + new int[size], [&has_been_deleted](int const *ptr) { + delete[] ptr; + has_been_deleted = true; + }); + std::copy_n(data.data(), size, copied_as_unique.get()); { auto component = write.writeIterations()[0] .meshes["temperature"][RecordComponent::SCALAR]; - component.resetDataset({Datatype::INT, {size}}); - component.storeChunk(data, {0}, {size}); + component.resetDataset({dtype, {size}}); + component.storeChunk(std::move(copied_as_unique), {0}, {size}); // component.seriesFlush(FlushMode::NonCollective); component.seriesFlush( "adios2.engine.preferred_flush_target = \"disk\""); @@ -4207,11 +4279,13 @@ void adios2_bp5_flush(std::string const &cfg, FlushDuringStep flushDuringStep) { // should now be roughly within 1% of 16Mb REQUIRE(std::abs(1 - double(currentSize) / (16 * size)) <= 0.01); + REQUIRE(has_been_deleted); } else { // should be roughly zero REQUIRE(currentSize <= 4096); + REQUIRE(!has_been_deleted); } } auto currentSize = getsize(); From 0f8c3aa7396f02fb8e7679f5a8626db0f192a294 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Tue, 7 Mar 2023 19:27:04 +0100 Subject: [PATCH 42/82] Close HFD5 handles in availableChunks task (#1386) --- src/IO/HDF5/HDF5IOHandler.cpp | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/IO/HDF5/HDF5IOHandler.cpp b/src/IO/HDF5/HDF5IOHandler.cpp index 5421187f03..9d66af38b2 100644 --- a/src/IO/HDF5/HDF5IOHandler.cpp +++ b/src/IO/HDF5/HDF5IOHandler.cpp @@ -769,6 +769,19 @@ void HDF5IOHandlerImpl::availableChunks( } parameters.chunks->push_back( WrittenChunkInfo(std::move(offset), std::move(extent))); + + herr_t status; + status = H5Sclose(dataset_space); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset space during " + "availableChunks task"); + + status = H5Dclose(dataset_id); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset during " + "availableChunks task"); } void HDF5IOHandlerImpl::openFile( From 3cd7df7e88772c65b2e47f8be73970120ee472c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Tue, 7 Mar 2023 19:28:18 +0100 Subject: [PATCH 43/82] Some fixes for openpmd-pipe (#1379) --- src/binding/python/openpmd_api/pipe/__main__.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/binding/python/openpmd_api/pipe/__main__.py b/src/binding/python/openpmd_api/pipe/__main__.py index 436bd233f1..c5312d71b5 100644 --- a/src/binding/python/openpmd_api/pipe/__main__.py +++ b/src/binding/python/openpmd_api/pipe/__main__.py @@ -223,6 +223,9 @@ def run(self): self.outconfig) print("Opened input and output on rank {}.".format(self.comm.rank)) sys.stdout.flush() + # In Linear read mode, global attributes are only present after calling + # this method to access the first iteration + inseries.read_iterations() self.__copy(inseries, outseries) def __copy(self, src, dest, current_path="/data/"): From 6ba68c8f53ceda2008fdc4f3a1508e6515ff561c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Tue, 7 Mar 2023 19:32:07 +0100 Subject: [PATCH 44/82] Some fixes for char signs (#1378) * Add signed char to HDF5 attribute types * Numpy: return CHAR depending on system char type * Support schar in HDF5 datasets --- include/openPMD/binding/python/Numpy.hpp | 19 +++++++++++++++++-- src/IO/HDF5/HDF5IOHandler.cpp | 15 +++++++++++++++ 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/include/openPMD/binding/python/Numpy.hpp b/include/openPMD/binding/python/Numpy.hpp index 8eead95e0d..3601ba7081 100644 --- a/include/openPMD/binding/python/Numpy.hpp +++ b/include/openPMD/binding/python/Numpy.hpp @@ -28,6 +28,7 @@ #include #include +#include namespace openPMD { @@ -36,9 +37,23 @@ inline Datatype dtype_from_numpy(pybind11::dtype const dt) // ref: https://docs.scipy.org/doc/numpy/user/basics.types.html // ref: https://github.com/numpy/numpy/issues/10678#issuecomment-369363551 if (dt.char_() == pybind11::dtype("b").char_()) - return Datatype::CHAR; + if constexpr (std::is_signed_v) + { + return Datatype::CHAR; + } + else + { + return Datatype::SCHAR; + } else if (dt.char_() == pybind11::dtype("B").char_()) - return Datatype::UCHAR; + if constexpr (std::is_unsigned_v) + { + return Datatype::CHAR; + } + else + { + return Datatype::UCHAR; + } else if (dt.char_() == pybind11::dtype("short").char_()) return Datatype::SHORT; else if (dt.char_() == pybind11::dtype("intc").char_()) diff --git a/src/IO/HDF5/HDF5IOHandler.cpp b/src/IO/HDF5/HDF5IOHandler.cpp index 9d66af38b2..00f2f0245e 100644 --- a/src/IO/HDF5/HDF5IOHandler.cpp +++ b/src/IO/HDF5/HDF5IOHandler.cpp @@ -991,6 +991,8 @@ void HDF5IOHandlerImpl::openDataset( d = DT::CHAR; else if (H5Tequal(dataset_type, H5T_NATIVE_UCHAR)) d = DT::UCHAR; + else if (H5Tequal(dataset_type, H5T_NATIVE_SCHAR)) + d = DT::SCHAR; else if (H5Tequal(dataset_type, H5T_NATIVE_SHORT)) d = DT::SHORT; else if (H5Tequal(dataset_type, H5T_NATIVE_INT)) @@ -1742,6 +1744,7 @@ void HDF5IOHandlerImpl::readDataset( case DT::ULONGLONG: case DT::CHAR: case DT::UCHAR: + case DT::SCHAR: case DT::BOOL: break; case DT::UNDEFINED: @@ -1876,6 +1879,12 @@ void HDF5IOHandlerImpl::readAttribute( status = H5Aread(attr_id, attr_type, &u); a = Attribute(u); } + else if (H5Tequal(attr_type, H5T_NATIVE_SCHAR)) + { + signed char u; + status = H5Aread(attr_id, attr_type, &u); + a = Attribute(u); + } else if (H5Tequal(attr_type, H5T_NATIVE_SHORT)) { short i; @@ -2102,6 +2111,12 @@ void HDF5IOHandlerImpl::readAttribute( status = H5Aread(attr_id, attr_type, vu.data()); a = Attribute(vu); } + else if (H5Tequal(attr_type, H5T_NATIVE_SCHAR)) + { + std::vector vu(dims[0], 0); + status = H5Aread(attr_id, attr_type, vu.data()); + a = Attribute(vu); + } else if (H5Tequal(attr_type, H5T_NATIVE_SHORT)) { std::vector vint16(dims[0], 0); From bf385dd3615ffa6925cd70919dc00c1e75c6b513 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 20 Mar 2023 11:25:28 -0700 Subject: [PATCH 45/82] CMake: Multi-Config Generator (#1384) * CMake: Multi-Config Generator Quickfix Set target properties for multi-config generators. * CMake: Multi-Config Dir Suffixes * Set: `COMPILE_PDB_NAME[_CFG]` Contrary to `PDB_NAME`, the default here is unspecified and seems to default to `vc143.pdb` --- CMakeLists.txt | 210 +++++++++++++++++++++++++++++++++++++------------ 1 file changed, 158 insertions(+), 52 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6ea49f586c..c1e2f6e041 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -42,6 +42,8 @@ endif() # Project structure ########################################################### # +get_property(isMultiConfig GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) + # temporary build directories if(NOT openPMD_ARCHIVE_OUTPUT_DIRECTORY) if(CMAKE_ARCHIVE_OUTPUT_DIRECTORY) @@ -564,6 +566,7 @@ add_library(openPMD::openPMD ALIAS openPMD) # properties openpmd_cxx_required(openPMD) set_target_properties(openPMD PROPERTIES + COMPILE_PDB_NAME openPMD ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} @@ -573,6 +576,20 @@ set_target_properties(openPMD PROPERTIES POSITION_INDEPENDENT_CODE ON WINDOWS_EXPORT_ALL_SYMBOLS ON ) +# note: same as above, but for Multi-Config generators +if(isMultiConfig) + foreach(CFG IN LISTS CMAKE_CONFIGURATION_TYPES) + string(TOUPPER "${CFG}" CFG_UPPER) + set_target_properties(openPMD PROPERTIES + COMPILE_PDB_NAME_${CFG_UPPER} openPMD + ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_ARCHIVE_OUTPUT_DIRECTORY}/${CFG} + LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_LIBRARY_OUTPUT_DIRECTORY}/${CFG} + RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} + PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PDB_OUTPUT_DIRECTORY}/${CFG} + COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY}/${CFG} + ) + endforeach() +endif() set(_cxx_msvc "$,$>") set(_msvc_1914 "$,19.14>") set(_msvc_options) @@ -644,6 +661,7 @@ if(openPMD_HAVE_ADIOS1) $) set_target_properties(openPMD.ADIOS1.Serial PROPERTIES + COMPILE_PDB_NAME openPMD.ADIOS1.Serial ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} @@ -654,6 +672,20 @@ if(openPMD_HAVE_ADIOS1) CXX_VISIBILITY_PRESET hidden VISIBILITY_INLINES_HIDDEN ON ) + # note: same as above, but for Multi-Config generators + if(isMultiConfig) + foreach(CFG IN LISTS CMAKE_CONFIGURATION_TYPES) + string(TOUPPER "${CFG}" CFG_UPPER) + set_target_properties(openPMD.ADIOS1.Serial PROPERTIES + COMPILE_PDB_NAME_${CFG_UPPER} openPMD.ADIOS1.Serial + ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_ARCHIVE_OUTPUT_DIRECTORY}/${CFG} + LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_LIBRARY_OUTPUT_DIRECTORY}/${CFG} + RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${CMAKE_PYTHON_OUTPUT_DIRECTORY}/${CFG} + PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PDB_OUTPUT_DIRECTORY}/${CFG} + COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY}/${CFG} + ) + endforeach() + endif() if("${CMAKE_SYSTEM_NAME}" MATCHES "Linux") set_target_properties(openPMD.ADIOS1.Serial PROPERTIES LINK_FLAGS "-Wl,--exclude-libs,ALL") @@ -678,6 +710,7 @@ if(openPMD_HAVE_ADIOS1) if(openPMD_HAVE_MPI) set_target_properties(openPMD.ADIOS1.Parallel PROPERTIES + COMPILE_PDB_NAME openPMD.ADIOS1.Parallel ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} @@ -688,6 +721,20 @@ if(openPMD_HAVE_ADIOS1) CXX_VISIBILITY_PRESET hidden VISIBILITY_INLINES_HIDDEN 1 ) + # note: same as above, but for Multi-Config generators + if(isMultiConfig) + foreach(CFG IN LISTS CMAKE_CONFIGURATION_TYPES) + string(TOUPPER "${CFG}" CFG_UPPER) + set_target_properties(openPMD.ADIOS1.Parallel PROPERTIES + COMPILE_PDB_NAME_${CFG_UPPER} opemPMD.ADIOS1.Parallel + ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_ARCHIVE_OUTPUT_DIRECTORY}/${CFG} + LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_LIBRARY_OUTPUT_DIRECTORY}/${CFG} + RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} + PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PDB_OUTPUT_DIRECTORY}/${CFG} + COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY}/${CFG} + ) + endforeach() + endif() if("${CMAKE_SYSTEM_NAME}" MATCHES "Linux") set_target_properties(openPMD.ADIOS1.Parallel PROPERTIES LINK_FLAGS "-Wl,--exclude-libs,ALL") @@ -835,12 +882,27 @@ if(openPMD_HAVE_PYTHON) set_target_properties(openPMD.py PROPERTIES ARCHIVE_OUTPUT_NAME openpmd_api_cxx LIBRARY_OUTPUT_NAME openpmd_api_cxx + COMPILE_PDB_NAME openpmd_api_cxx ARCHIVE_OUTPUT_DIRECTORY ${openPMD_PYTHON_OUTPUT_DIRECTORY}/openpmd_api LIBRARY_OUTPUT_DIRECTORY ${openPMD_PYTHON_OUTPUT_DIRECTORY}/openpmd_api RUNTIME_OUTPUT_DIRECTORY ${openPMD_PYTHON_OUTPUT_DIRECTORY}/openpmd_api PDB_OUTPUT_DIRECTORY ${openPMD_PYTHON_OUTPUT_DIRECTORY}/openpmd_api COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_PYTHON_OUTPUT_DIRECTORY}/openpmd_api ) + # note: same as above, but for Multi-Config generators + if(isMultiConfig) + foreach(CFG IN LISTS CMAKE_CONFIGURATION_TYPES) + string(TOUPPER "${CFG}" CFG_UPPER) + set_target_properties(openPMD.py PROPERTIES + COMPILE_PDB_NAME_${CFG_UPPER} openpmd_api_cxx + ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}/${CFG}/openpmd_api + LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}/${CFG}/openpmd_api + RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}/${CFG}/openpmd_api + PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}/${CFG}/openpmd_api + COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}/${CFG}/openpmd_api + ) + endforeach() + endif() function(copy_aux_py) set(AUX_PY_SRC_DIR ${openPMD_SOURCE_DIR}/src/binding/python/openpmd_api/) set(AUX_PY_DSR_DIR ${openPMD_PYTHON_OUTPUT_DIRECTORY}/openpmd_api/) @@ -934,6 +996,30 @@ if(openPMD_BUILD_TESTING) POSITION_INDEPENDENT_CODE ON WINDOWS_EXPORT_ALL_SYMBOLS ON ) + set_target_properties(CatchRunner PROPERTIES COMPILE_PDB_NAME CatchRunner) + set_target_properties(CatchMain PROPERTIES COMPILE_PDB_NAME CatchMain) + # note: same as above, but for Multi-Config generators + if(isMultiConfig) + foreach(CFG IN LISTS CMAKE_CONFIGURATION_TYPES) + string(TOUPPER "${CFG}" CFG_UPPER) + set_target_properties(CatchRunner PROPERTIES + COMPILE_PDB_NAME_${CFG_UPPER} CatchRunner + ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_ARCHIVE_OUTPUT_DIRECTORY}/${CFG} + LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_LIBRARY_OUTPUT_DIRECTORY}/${CFG} + RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} + PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PDB_OUTPUT_DIRECTORY}/${CFG} + COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY}/${CFG} + ) + set_target_properties(CatchMain PROPERTIES + COMPILE_PDB_NAME_${CFG_UPPER} CatchMain + ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_ARCHIVE_OUTPUT_DIRECTORY}/${CFG} + LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_LIBRARY_OUTPUT_DIRECTORY}/${CFG} + RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} + PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PDB_OUTPUT_DIRECTORY}/${CFG} + COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY}/${CFG} + ) + endforeach() + endif() target_compile_options(CatchRunner PUBLIC ${_msvc_options}) target_compile_options(CatchMain PUBLIC ${_msvc_options}) target_link_libraries(CatchRunner PUBLIC openPMD::thirdparty::Catch2) @@ -947,12 +1033,27 @@ if(openPMD_BUILD_TESTING) add_executable(${testname}Tests test/${testname}Test.cpp) openpmd_cxx_required(${testname}Tests) set_target_properties(${testname}Tests PROPERTIES + COMPILE_PDB_NAME ${testname}Tests ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} PDB_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) + # note: same as above, but for Multi-Config generators + if(isMultiConfig) + foreach(CFG IN LISTS CMAKE_CONFIGURATION_TYPES) + string(TOUPPER "${CFG}" CFG_UPPER) + set_target_properties(${testname}Tests PROPERTIES + COMPILE_PDB_NAME_${CFG_UPPER} ${testname}Tests + ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_ARCHIVE_OUTPUT_DIRECTORY}/${CFG} + LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_LIBRARY_OUTPUT_DIRECTORY}/${CFG} + RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} + PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} + COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} + ) + endforeach() + endif() if(openPMD_USE_INVASIVE_TESTS) target_compile_definitions(${testname}Tests PRIVATE openPMD_USE_INVASIVE_TESTS=1) @@ -977,12 +1078,27 @@ if(openPMD_BUILD_CLI_TOOLS) add_executable(openpmd-${toolname} src/cli/${toolname}.cpp) openpmd_cxx_required(openpmd-${toolname}) set_target_properties(openpmd-${toolname} PROPERTIES + COMPILE_PDB_NAME openpmd-${toolname} ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} PDB_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) + # note: same as above, but for Multi-Config generators + if(isMultiConfig) + foreach(CFG IN LISTS CMAKE_CONFIGURATION_TYPES) + string(TOUPPER "${CFG}" CFG_UPPER) + set_target_properties(openpmd-${toolname} PROPERTIES + COMPILE_PDB_NAME_${CFG_UPPER} openpmd-${toolname} + ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_ARCHIVE_OUTPUT_DIRECTORY}/${CFG} + LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_LIBRARY_OUTPUT_DIRECTORY}/${CFG} + RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} + PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} + COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} + ) + endforeach() + endif() target_link_libraries(openpmd-${toolname} PRIVATE openPMD) endforeach() @@ -1004,12 +1120,27 @@ if(openPMD_BUILD_EXAMPLES) openpmd_cxx_required(${examplename}) endif() set_target_properties(${examplename} PROPERTIES + COMPILE_PDB_NAME ${examplename} ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} PDB_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) + # note: same as above, but for Multi-Config generators + if(isMultiConfig) + foreach(CFG IN LISTS CMAKE_CONFIGURATION_TYPES) + string(TOUPPER "${CFG}" CFG_UPPER) + set_target_properties(${examplename} PROPERTIES + COMPILE_PDB_NAME_${CFG_UPPER} ${examplename} + ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_ARCHIVE_OUTPUT_DIRECTORY}/${CFG} + LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_LIBRARY_OUTPUT_DIRECTORY}/${CFG} + RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} + PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} + COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} + ) + endforeach() + endif() target_link_libraries(${examplename} PRIVATE openPMD) endforeach() endif() @@ -1291,6 +1422,30 @@ if(openPMD_BUILD_TESTING) # Python Unit tests if(openPMD_HAVE_PYTHON) + function(test_set_pythonpath test_name) + if(WIN32) + if(isMultiConfig) + string(REGEX REPLACE "/" "\\\\" WIN_BUILD_BASEDIR ${openPMD_BINARY_DIR}/$) + string(REGEX REPLACE "/" "\\\\" WIN_BUILD_BINDIR ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/$) + else() + string(REGEX REPLACE "/" "\\\\" WIN_BUILD_BASEDIR ${openPMD_BINARY_DIR}) + string(REGEX REPLACE "/" "\\\\" WIN_BUILD_BINDIR ${openPMD_RUNTIME_OUTPUT_DIRECTORY}) + endif() + string(REPLACE ";" "\\;" WIN_PATH "$ENV{PATH}") + string(REPLACE ";" "\\;" WIN_PYTHONPATH "$ENV{PYTHONPATH}") + set_property(TEST ${test_name} + PROPERTY ENVIRONMENT + "PATH=${WIN_BUILD_BINDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PATH}\n" + "PYTHONPATH=${WIN_BUILD_BASEDIR}\\${openPMD_INSTALL_PYTHONDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PYTHONPATH}" + ) + else() + set_tests_properties(${test_name} + PROPERTIES ENVIRONMENT + "PYTHONPATH=${openPMD_BINARY_DIR}/${openPMD_INSTALL_PYTHONDIR}:$ENV{PYTHONPATH}" + ) + endif() + endfunction() + if(openPMD_HAVE_HDF5) if(EXAMPLE_DATA_FOUND) add_test(NAME Unittest.py @@ -1299,22 +1454,7 @@ if(openPMD_BUILD_TESTING) WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) - if(WIN32) - string(REGEX REPLACE "/" "\\\\" WIN_BUILD_BASEDIR ${openPMD_BINARY_DIR}) - string(REGEX REPLACE "/" "\\\\" WIN_BUILD_BINDIR ${openPMD_RUNTIME_OUTPUT_DIRECTORY}) - string(REPLACE ";" "\\;" WIN_PATH "$ENV{PATH}") - string(REPLACE ";" "\\;" WIN_PYTHONPATH "$ENV{PYTHONPATH}") - set_property(TEST Unittest.py - PROPERTY ENVIRONMENT - "PATH=${WIN_BUILD_BINDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PATH}\n" - "PYTHONPATH=${WIN_BUILD_BASEDIR}\\${openPMD_INSTALL_PYTHONDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PYTHONPATH}" - ) - else() - set_tests_properties(Unittest.py - PROPERTIES ENVIRONMENT - "PYTHONPATH=${openPMD_BINARY_DIR}/${openPMD_INSTALL_PYTHONDIR}:$ENV{PYTHONPATH}" - ) - endif() + test_set_pythonpath(Unittest.py) endif() endif() endif() @@ -1368,21 +1508,6 @@ if(openPMD_BUILD_TESTING) endif() endif() - function(test_set_pythonpath test_name) - if(WIN32) - set_property(TEST ${test_name} - PROPERTY ENVIRONMENT - "PATH=${WIN_BUILD_BINDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PATH}\n" - "PYTHONPATH=${WIN_BUILD_BASEDIR}\\${openPMD_INSTALL_PYTHONDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PYTHONPATH}" - ) - else() - set_tests_properties(${test_name} - PROPERTIES ENVIRONMENT - "PYTHONPATH=${openPMD_BINARY_DIR}/${openPMD_INSTALL_PYTHONDIR}:$ENV{PYTHONPATH}" - ) - endif() - endfunction() - # Python CLI Modules if(openPMD_HAVE_PYTHON) # (Note that during setuptools install, these are furthermore installed as @@ -1469,25 +1594,6 @@ if(openPMD_BUILD_TESTING) endif() endif() - function(configure_python_test testname) - if(WIN32) - string(REGEX REPLACE "/" "\\\\" WIN_BUILD_BASEDIR ${openPMD_BINARY_DIR}) - string(REGEX REPLACE "/" "\\\\" WIN_BUILD_BINDIR ${openPMD_RUNTIME_OUTPUT_DIRECTORY}) - string(REPLACE ";" "\\;" WIN_PATH "$ENV{PATH}") - string(REPLACE ";" "\\;" WIN_PYTHONPATH "$ENV{PYTHONPATH}") - set_property(TEST ${testname} - PROPERTY ENVIRONMENT - "PATH=${WIN_BUILD_BINDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PATH}\n" - "PYTHONPATH=${WIN_BUILD_BASEDIR}\\${openPMD_INSTALL_PYTHONDIR}\\${CMAKE_BUILD_TYPE}\;${WIN_PYTHONPATH}" - ) - else() - set_tests_properties(${testname} - PROPERTIES ENVIRONMENT - "PYTHONPATH=${openPMD_BINARY_DIR}/${openPMD_INSTALL_PYTHONDIR}:$ENV{PYTHONPATH}" - ) - endif() - endfunction() - # Python Examples # Current examples all use HDF5, elaborate if other backends are used @@ -1522,14 +1628,14 @@ if(openPMD_BUILD_TESTING) ${openPMD_RUNTIME_OUTPUT_DIRECTORY} ) endif() - configure_python_test(Example.py.${examplename}) + test_set_pythonpath(Example.py.${examplename}) endif() endforeach() if(openPMD_HAVE_ADIOS2 AND openPMD_BUILD_TESTING AND NOT WIN32) add_test(NAME Asynchronous.10_streaming.py COMMAND sh -c "${Python_EXECUTABLE} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/10_streaming_write.py & sleep 1; ${Python_EXECUTABLE} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/10_streaming_read.py" WORKING_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY}) - configure_python_test(Asynchronous.10_streaming.py) + test_set_pythonpath(Asynchronous.10_streaming.py) endif() endif() endif() From 102e90790245f1c3cea31658055b30ffeae5a8d1 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Sat, 25 Mar 2023 15:40:54 -0700 Subject: [PATCH 46/82] Release: 0.15.0 (#1391) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Update version and write announcement text in changelog. * Changelog: Until 93befd7476974d6bfa1a5f69d228c133cadf4ef6 * Add remaining items to changelog TODO: Cleanup, sorting, grouping * Group changelog items * Fix formatting * Upgrading guide * Add summary * Cleanup * Add 0.14.4 and 0.14.5 changelog * Shorter Release Title, Date * Revert: `examples/7_extended_write_serial.cpp` --------- Co-authored-by: Franz Pöschel --- CHANGELOG.rst | 297 +++++++++++++++++++++++++++++++++++- CITATION.cff | 2 +- NEWS.rst | 86 +++++++++++ docs/source/conf.py | 2 +- docs/source/index.rst | 2 +- include/openPMD/version.hpp | 2 +- setup.py | 2 +- 7 files changed, 383 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 86a08b082c..c08be42008 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -5,11 +5,18 @@ Changelog 0.15.0 ------ -**Date:** TBA +**Date:** 2023-03-25 -[Title] +C++17, Error Recovery, ADIOS2 BP5, Append & Read-Linear Modes, Performance & Memory -[Summary] +This release adds error recovery mechanisms, in order to access erroneous datasets, created e.g. by crashing simulations. +The BP5 engine of ADIOS2 v2.9 is fully supported by this release, including access to its various features for more fine-grained control of memory usage. +Various I/O performance improvements for HDF5 are activated by default. +Runtime configuration of openPMD and its backends, e.g. selection of backends and compression, is now consistently done via JSON, and alternatively via TOML for better readability. +The data storage/retrieval API now consistently supports all common C++ pointer types (raw and smart pointers), implementing automatic memory optimizations for ADIOS2 BP5 if using unique pointers. + +The miminum required C++ version is now C++17. +Supported Python versions are Python 3.10 and 3.11. Changes to "0.14.0" ^^^^^^^^^^^^^^^^^^^ @@ -17,20 +24,300 @@ Changes to "0.14.0" Features """""""" -- Python: support of 3.10 and 3.11, removal of 3.6 #1323 +- Python: support of 3.10 and 3.11, removal of 3.6 #1323 #1139 - include internally shipped toml11 v3.7.1 #1148 #1227 - pybind11: require version 2.10.1+ #1220 #1322 +- Switch to C++17 #1103 #1128 #1140 #1157 #1164 #1183 #1185 +- Error-recovery during parsing #1150 #1179 #1237 +- Extensive update for JSON/TOML configuration #1043 + - TOML as an alternative to JSON #1146 + - compression configuration via JSON# 1043 + - case insensitivity #1043 + - datatype conversion for string values #1043 + - ``json::merge`` public function #1043 #1333 + - better warnings for unused values #1043 + - new JSON options: ``backend`` and ``iteration_encoding`` #1043 + - ADIOS1 compression configuration via JSON #1043 #1162 +- New access types: + - ``APPEND``: Add new iterations without reading, supports ADIOS2 Append mode #1007 #1302 + - ``READ_LINEAR``: For reading through ADIOS2 steps, for full support of ADIOS2 BP5 #1291 #1379 +- ADIOS2: + + - Support for ADIOS 2.8 and newer #1166 + - Support for ADIOS2 BP5 engine #1119 #1215 #1258 #1262 #1291 + - Support for selecting flush targets (buffer/disk) in ADIOS2 BP5 for more fine-grained memory control #1226 #1207 + - Add file extensions for ADIOS2: ``.bp4``, ``.bp5`` and furthers, make them behave more as expected #1218 + - ADIOS2: Support for operator specification at read time #1191 + - ADIOS2: Automatic (de)activation of span API depending on compression configuration #1155 + - Optionally explicitly map ADIOS2 steps to openPMD iterations via modifiable attributes (only supported in experimental ADIOS2 modes) #949 + +- HDF5: + + - I/O optimizations for HDF5 #1129 #1133 #1192 + - Improve write time by disabling fill #1192 + +- Miscellaneous API additions: + + - Support for all char types (CHAR SCHAR UCHAR) #1275 #1378 + - Header for openPMD-defined error types #1080 #1355 + - Add ``Series::close()`` API call #1324 + - Support for array specializations of C++ smart pointer types #1296 + - Direct support for raw pointer types in ``store/loadChunk()`` API, replacing former ``shareRaw()`` #1229 + - Support for and backend optimizations (ADIOS2 BP5) based on unique pointer types in ``store/loadChunk()`` #1294 + - Use C++ ``std::optional`` types in public Attribute API (``Attribute::getOptional()``) for dynamic attribute type conversion #1278 + +- Support for empty string attributes #1087 #1223 #1338 +- Support for inconsistent and overflowing padding of filenames in file-based encoding #1118 #1173 #1253 Bug Fixes """"""""" +- HDF5 + + - Support attribute reads from HDF5 Vlen Strings #1084 + - Close HFD5 handles in availableChunks task #1386 +- ADIOS1 + + - Fix use-after-free issue in ``ADIOS1IOHandler`` #1224 +- ADIOS2 + + - Don't apply compression operators multiple times #1152 + - Fix logic for associating openPMD objects to files and paths therein (needed for interleaved write and close) #1073 + - Fix precedence of environment variable vs. JSON configuration + - Detect changing datatypes and warn/fail accordingly #1356 + - Remove deprecated debug parameter in ADIOS2 #1269 +- HDF5 + + - missing HDF5 include #1236 - CMake: - MPI: prefer HDF5 in Config package, too #1340 + - ADIOS1: do not include as ``-isystem`` #1076 + - Remove caching of global CMake variables #1313 + - Fix Build & Install Option Names #1326 + - Prefer parallel HDF5 in find_package in downstream use #1340 + - CMake: Multi-Config Generator #1384 +- Warnings: + + - Avoid copying std::string in for loop #1268 + - SerialIOTest: Fix GCC Pragma Check #1213 #1260 + - Fix ``-Wsign-compare`` #1202 +- Python: + + - Fix ``__repr__`` (time and Iteration) #1242 #1149 + - Python Tests: Fix ``long`` Numpy Type #1348 + - use ``double`` as standard for attributes #1290 #1369kk + - Fix ``dtype_from_numpy`` #1357 + - Wheels: Fix macOS arm64 (M1) builds #1233 + - Avoid use-after-free in Python bindings #1225 + - Patch MSVC pybind11 debug bug #1209 + - sign compare warning #1198 +- Don't forget closing unmodified files #1083 +- Diverse relaxations on attribute type conversions #1085 #1096 #1137 +- Performance bug: Don't reread iterations that are already parsed #1089 +- Performance bug: Don't flush prematurely #1264 +- Avoid object slicing in Series class #1107 +- Logical fixes for opening iterations #1239 + +Breaking Changes +"""""""""""""""" + +- Deprecations + + - ``Iteration::closedByWriter()`` attribute #1088 + - ``shareRaw`` (replaced with raw- and unique-ptr overloads, see features section) #1229 + - ADIOS1 backend (deprecation notice has hints on upgrading to ADIOS2) #1314 +- Redesign of public class structure + + - Apply frontend redesign to Container and deriving classes #1115 #1159 +- Removal of APIs + - ``Dataset::transform``, ``Dataset::compression`` and ``Dataset::chunksize`` #1043 + +.. note:: + + See :ref:`NEWS.rst ` for a more detailed upgrade guide. + +Other +""""" +- Catch2: updated to 2.13.10 #1299 #1344 +- Tests & Examples: + + - Test: Interleaved Write and Close #1073 #1078 + - Extend and fix examples 8a and 8b (bench write/read parallel) #1131 #1144 #1231 #1359 #1240 + - support variable encoding #1131 + - block located at top left corner was mistaken to read a block in the center #1131 + - GPU support in example 8a #1240 + - Extensive Python example for Streaming API #1141 + - General overhaul of examples to newest API standards #1371 +- CI + + - URL Check for broken links #1086 + - CI savings (abort prior push, draft skips most) #1116 + - Appveyor fixes for Python Executable #1127 + - Pre-commit and clang-format #1142 #1175 #1178 #1032 #1222 #1370 + - ADIOS1: Fix Serial Builds, CI: Clang 10->12 #1167 + - Upgrade NVHPC Apt repository #1241 + - Spack upgrade to v0.17.1 and further fixes #1244 + - Update CUDA repository key #1256 + - Switch from Conda to Mamba #1261 + - Remove ``-Wno-deprecated-declarations`` where possible #1246 + - Expand read-only permission tests #1272 + - Ensure that the CI also build against ADIOS2 v2.7.1 #1271 + - Build(deps): Bump s-weigand/setup-conda from 1.1.0 to 1.1.1 #1284 + - Style w/ Ubuntu 22.04 #1346 + - Add CodeQL workflow for GitHub code scanning #1345 + - Cache Action v3 #1358 #1362 + - Spack: No More ``load -r`` #1125 +- CMake + + - Extra CMake Arg Control in ``setup.py`` #1199 + - Do not strip Python symbols in Debug #1219 + - Disable in-source builds #1079 + - Fixes for NVCC #1102 #1103 #1184 + - Set RPATHs on installed targets #1105 + - CMake 3.22+: Policy ``CMP0127`` #1165 + - Warning Flags First in ``CXXFLAGS`` #1172 +- Docs + + - More easily findable documentation for ``-DPython_EXECUTABLE`` #1104 and lazy parsing #1111 + - HDF5 performance tuning and known issues #1129 #1132 + - HDF5: Document ``HDF5_USE_FILE_LOCKING`` #1106 + - SST/libfabric installation notes for Cray systems #1134 + - OpenMPI: Document ``OMPI_MCA_io`` Control #1114 + - Update Citation & Add BibTeX (#1168) + - Fix CLI Highlighting #1171 + - HDF5 versions that support collective metadata #1250 + - Recommend Static Build for Superbuilds #1325 + - Latest Sphinx, Docutils, RTD #1341 +- Tooling + - ``openpmd-pipe``: better optional support for MPI #1186 #1336 + - ``openpmd-ls``: use lazy parsing #1111 +- Enable use of ``Series::setName()`` and ``Series::setIterationEncoding()`` in combination with file-based encoding 1081 +- Remove ``DATATYPE``, ``HIGHEST_DATATYPE`` AND ``LOWEST_DATATYPE`` from Datatype enumeration #1100 +- Check for undefined datatypes in dataset definitions #1099 +- Include ``StringManip`` header into public headers #1124 +- Add default constructor for ``DynamicMemoryView`` class #1156 +- Helpful error message upon wrong backend specification #1214 +- Helpful error message for errors in ``loadChunk`` API #1373 +- No warning when opening a single file of a file-based Series #1368 +- Add ``IterationIndex_t`` type alias #1285 + + +0.14.5 +------ +**Date:** 2022-06-07 + +Improve Series Parsing, Python & Fix Backend Bugs + +This release improves reading back iterations that overflow the specified zero-pattern. +ADIOS1, ADIOS2 and HDF5 backend stability and performance were improved. +Python bindings got additional wheel platform support and various smaller issues were fixed. + +Changes to "0.14.4" +^^^^^^^^^^^^^^^^^^^ + +Bug Fixes +""""""""" + +- Series and iterations: + + - fix read of overflowing zero patterns #1173 #1253 + - fix for opening an iteration #1239 +- ADIOS1: + + - fix use-after-free in ``ADIOS1IOHandler`` #1224 + - Remove task from IO queue if it fails with exception #1179 +- ADIOS2: + + - Remove deprecated debug parameter in ADIOS2 #1269 + - Add memory leak suppression: ``ps_make_timer_name_`` #1235 + - Don't safeguard empty strings while reading #1223 +- HDF5: + + - missing HDF5 include #1236 +- Python: + + - Wheels: Fix macOS arm64 (M1) builds #1233 + - Python Iteration: Fix ``__repr__`` (time) #1242 + - Increase reference count also in other ``load_chunk`` overload #1225 + - Do Not Strip Symbols In Debug #1219 + - Patch MSVC pybind11 debug bug #1209 Other """"" -- Catch2: updated to 2.13.10 #1299 #... + +- HDF5: + + - Improve write time by disabling fill #1192 + - Update documented HDF5 versions with collective metadata issues #1250 +- Print warning if mpi4py is not found in ``openpmd-pipe`` #1186 +- Pass-through flushing parameters #1226 +- Clang-Format #1032 #1222 +- Warnings: + + - Avoid copying std::string in for loop #1268 + - SerialIOTest: Fix GCC Pragma Check #1213 #1260 + - Fix ``-Wsign-compare`` #1202 +- CI: + + - Fix Conda Build - <3 Mamba #1261 + - Fix Spack #1244 + - Update CUDA repo key #1256 + - NVHPC New Apt Repo #1241 +- Python: + + - ``setup.py``: Extra CMake Arg Control #1199 + - sign compare warning #1198 + + +0.14.4 +------ +**Date:** 2022-01-21 + +Increased Compatibility & Python Install Bug + +This release fixes various read/parsing bugs and increases compatibility with upcoming versions of ADIOS and old releases of Intel ``icpc``. +An installation issue for pip-based installs from source in the last release was fixed and Python 3.10 support added. +Various documentation and installation warnings have been fixed. + +Changes to "0.14.3" +^^^^^^^^^^^^^^^^^^^ + +Bug Fixes +""""""""" + +- ADIOS2: + + - automatically deactivate ``span`` based ``Put`` API when operators are present #1155 + - solve incompatibilities w/ post-``2.7.1`` ``master``-branch #1166 +- ICC 19.1.2: C++17 work-arounds (``variant``) #1157 +- Don't apply compression operators multiple times in variable-based iteration encoding #1152 +- Reading/parsing: + + - remove invalid records from data structures entirely #1150 + - fix grid spacing with type long double #1137 +- Python: + + - fix ``Iteration`` ``__repr__`` typo #1149 + - add ``cmake/`` to ``MANIFEST.in`` #1140 + +Other +""""" + +- add simple ``.pre-commit-config.yaml`` +- Python: + + - support Python 3.10 #1139 +- CMake: + + - warning flags first in ``CXXFLAGS`` #1172 + - add policy CMP0127 (v3.22+) #1165 +- Docs: + + - fix CLI highlighting #1171 + - update citation & add BibTeX #1168 + - fix HDF5 JSON File #1169 + - minor warnings #1170 0.14.3 diff --git a/CITATION.cff b/CITATION.cff index 6dbf0c1728..ebb82b34d7 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -25,7 +25,7 @@ contact: orcid: https://orcid.org/0000-0003-1943-7141 email: axelhuebl@lbl.gov title: "openPMD-api: C++ & Python API for Scientific I/O with openPMD" -version: 0.15.0-dev +version: 0.15.0 repository-code: https://github.com/openPMD/openPMD-api doi: 10.14278/rodare.27 license: LGPL-3.0-or-later diff --git a/NEWS.rst b/NEWS.rst index 707d1cb712..40246be635 100644 --- a/NEWS.rst +++ b/NEWS.rst @@ -17,6 +17,92 @@ Catch2 2.13.10 is now the minimally supported version for tests. The following backend-specific members of the ``Dataset`` class have been removed: ``Dataset::setChunkSize()``, ``Dataset::setCompression()``, ``Dataset::setCustomTransform()``, ``Dataset::chunkSize``, ``Dataset::compression``, ``Dataset::transform``. They are replaced by backend-specific options in the JSON-based backend configuration. This can be passed in ``Dataset::options``. +The following configuration shows a compression configuration for ADIOS1 and ADIOS2: + +.. code-block:: json + + { + "adios1": { + "dataset": { + "transform": "blosc:compressor=zlib,shuffle=bit,lvl=1;nometa" + } + }, + "adios2": { + "dataset": { + "operators": [ + { + "type": "zlib", + "parameters": { + "clevel": 9 + } + } + ] + } + } + } + +Or alternatively, in TOML: + +.. code-block:: toml + + [adios1.dataset] + transform = "blosc:compressor=zlib,shuffle=bit,lvl=1;nometa" + + [[adios2.dataset.operators]] + type = "zlib" + parameters.clevel = 9 + + +The helper function ``shareRaw`` of the C++ API has been deprecated. +In its stead, there are now new API calls ``RecordComponent::storeChunkRaw()`` and ``RecordComponent::loadChunkRaw``. + +The **ADIOS1 backend** is now deprecated, to be replaced fully with ADIOS2. +Now is a good time to check if ADIOS2 is able to read old ADIOS1 datasets that you might have. Otherwise, ``openpmd-pipe`` can be used for conversion: + +.. code-block:: bash + + openpmd-pipe --infile adios1_dataset_%T.bp --inconfig 'backend = "adios1"' --outfile adios2_dataset_%T.bp --outconfig 'backend = "adios2"' + +The class structure of ``Container`` and deriving classes has been reworked. +Usage of the API generally stays the same, but code that relies on the concrete class structure might break. + +The ``Iteration::closedByWriter()`` attribute has been deprecated as a leftover from the early streaming implementation. + +Old: + +.. code-block:: cpp + + double const * data; + recordComponent.storeChunk(shareRaw(data), offset, extent); + +New: + +.. code-block:: cpp + + double const * data; + recordComponent.storeChunkRaw(data, offset, extent); + +Additionally, ``determineDatatype`` now accepts pointer types (raw and smart pointers): + +Old: + +.. code-block:: cpp + + std::vector data; + Datatype dt = determineDatatype(shareRaw(data)); + +New: + +.. code-block:: cpp + + std::vector data; + Datatype dt = determineDatatype(data.data()); + +.. note:: + + ``determineDatatype`` does not directly accept ``determineDatatype(data)``, since it's unclear if the result from that call would be ``Datatype::DOUBLE`` or ``Datatype::VEC_DOUBLE``. + + In order to get the direct mapping between C++ type and openPMD datatype, use the template parameter of ``determineDatatype``: ``determineDatatype()`` or ``determineDatatype>()``. 0.14.0 diff --git a/docs/source/conf.py b/docs/source/conf.py index ca3f41e65a..03d8d7bbf7 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -87,7 +87,7 @@ # The short X.Y version. version = u'0.15.0' # The full version, including alpha/beta/rc tags. -release = u'0.15.0-dev' +release = u'0.15.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/source/index.rst b/docs/source/index.rst index 1abdadebaf..c28dacb6c6 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -42,7 +42,7 @@ openPMD-api version supported openPMD standard versions ======================== =================================== ``2.0.0+`` ``2.0.0+`` (not released yet) ``1.0.0+`` ``1.0.1-1.1.0`` (not released yet) -``0.13.1-0.14.0`` (beta) ``1.0.0-1.1.0`` +``0.13.1-0.15.0`` (beta) ``1.0.0-1.1.0`` ``0.1.0-0.12.0`` (alpha) ``1.0.0-1.1.0`` ======================== =================================== diff --git a/include/openPMD/version.hpp b/include/openPMD/version.hpp index 031c5085e7..29a4f7d5ba 100644 --- a/include/openPMD/version.hpp +++ b/include/openPMD/version.hpp @@ -30,7 +30,7 @@ #define OPENPMDAPI_VERSION_MAJOR 0 #define OPENPMDAPI_VERSION_MINOR 15 #define OPENPMDAPI_VERSION_PATCH 0 -#define OPENPMDAPI_VERSION_LABEL "dev" +#define OPENPMDAPI_VERSION_LABEL "" /** @} */ /** maximum supported version of the openPMD standard (read & write, diff --git a/setup.py b/setup.py index c73dd3c024..cea00f5805 100644 --- a/setup.py +++ b/setup.py @@ -168,7 +168,7 @@ def build_extension(self, ext): setup( name='openPMD-api', # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version='0.15.0.dev', + version='0.15.0', author='Axel Huebl, Franz Poeschel, Fabian Koller, Junmin Gu', author_email='axelhuebl@lbl.gov, f.poeschel@hzdr.de', maintainer='Axel Huebl', From 31e3c42eb6687269adfb0e63c35269db328ea6ec Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Sat, 25 Mar 2023 23:37:32 -0700 Subject: [PATCH 47/82] CMake: Fix Python Install Directory (#1393) Fix regressions in 0.15.0 that showed up during packaging. --- CMakeLists.txt | 10 +++++----- setup.py | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c1e2f6e041..d737552e65 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -680,7 +680,7 @@ if(openPMD_HAVE_ADIOS1) COMPILE_PDB_NAME_${CFG_UPPER} openPMD.ADIOS1.Serial ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_ARCHIVE_OUTPUT_DIRECTORY}/${CFG} LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_LIBRARY_OUTPUT_DIRECTORY}/${CFG} - RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${CMAKE_PYTHON_OUTPUT_DIRECTORY}/${CFG} + RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PDB_OUTPUT_DIRECTORY}/${CFG} COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY}/${CFG} ) @@ -875,10 +875,10 @@ if(openPMD_HAVE_PYTHON) "${CMAKE_INSTALL_LIBDIR}/python${Python_VERSION_MAJOR}.${Python_VERSION_MINOR}/site-packages" ) endif() - # Location for installed python package - set(openPMD_INSTALL_PYTHONDIR "${openPMD_INSTALL_PYTHONDIR_DEFAULT}") - # Build directory for python modules - set(openPMD_PYTHON_OUTPUT_DIRECTORY "${openPMD_BINARY_DIR}/${openPMD_INSTALL_PYTHONDIR}") + set(openPMD_INSTALL_PYTHONDIR "${openPMD_INSTALL_PYTHONDIR_DEFAULT}" + CACHE STRING "Location for installed python package") + set(openPMD_PYTHON_OUTPUT_DIRECTORY "${openPMD_BINARY_DIR}/${openPMD_INSTALL_PYTHONDIR}" + CACHE STRING "Build directory for python modules") set_target_properties(openPMD.py PROPERTIES ARCHIVE_OUTPUT_NAME openpmd_api_cxx LIBRARY_OUTPUT_NAME openpmd_api_cxx diff --git a/setup.py b/setup.py index cea00f5805..a56ea38feb 100644 --- a/setup.py +++ b/setup.py @@ -47,7 +47,7 @@ def build_extension(self, ext): '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + os.path.join(extdir, "openpmd_api"), # '-DCMAKE_RUNTIME_OUTPUT_DIRECTORY=' + extdir, - '-DCMAKE_PYTHON_OUTPUT_DIRECTORY=' + extdir, + '-DopenPMD_PYTHON_OUTPUT_DIRECTORY=' + extdir, '-DPython_EXECUTABLE=' + sys.executable, '-DopenPMD_USE_PYTHON:BOOL=ON', # variants From c9b0f70294ef8d9ac89018c9b439815be9e77b96 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Sat, 25 Mar 2023 23:49:49 -0700 Subject: [PATCH 48/82] macOS AppleClang12 Fixes (#1395) * CI: macOS AppleClang12 Fixes CI already runs on macOS-12. Adds an older runner. * Fix: Older AppleClang Issue seen with AppleClang 12.0 and not seen with 14.0. Assume 13 might be affected, too. * macOS CI: Numpy Install --- .github/workflows/macos.yml | 36 ++++++++++++++++--- include/openPMD/RecordComponent.tpp | 9 +++-- .../openPMD/backend/PatchRecordComponent.hpp | 4 ++- 3 files changed, 41 insertions(+), 8 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 56603c4c8a..d1ab637f3e 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -12,7 +12,7 @@ jobs: # appleclang10_py37_h5_ad2_libcpp # appleclang11_nopy_nompi_h5_ad2 - appleclang12_py_mpi_h5_ad2: + appleclang14_py_mpi_h5_ad2: runs-on: macos-latest if: github.event.pull_request.draft == false steps: @@ -28,11 +28,10 @@ jobs: python3 -m pip install -U mpi4py numpy pandas set -e - name: Build - env: {CXXFLAGS: -Werror -DTOML11_DISABLE_STD_FILESYSTEM, MACOSX_DEPLOYMENT_TARGET: 10.13} - # C++11 & 14 support in macOS 10.9+ - # C++17 support in macOS 10.13+/10.14+ + env: {CXXFLAGS: -Werror, MACOSX_DEPLOYMENT_TARGET: 10.15} + # 10.14+ due to std::visit + # 10.15+ due to std::filesystem in toml11 # https://cibuildwheel.readthedocs.io/en/stable/cpp_standards/#macos-and-deployment-target-versions - # std::filesystem needs macOS 10.15 run: | share/openPMD/download_samples.sh build cmake -S . -B build \ @@ -44,5 +43,32 @@ jobs: cmake --build build --parallel 2 ctest --test-dir build --verbose + appleclang12_py: + runs-on: macos-10.15 + # next: macOS-11 + if: github.event.pull_request.draft == false + steps: + - uses: actions/checkout@v3 + - name: Install + run: | + set +e + python3 -m pip install -U numpy pandas + set -e + - name: Build + env: {CXXFLAGS: -Werror -DTOML11_DISABLE_STD_FILESYSTEM, MACOSX_DEPLOYMENT_TARGET: 10.14} + # 10.14+ due to std::visit + # std::filesystem in toml11 needs macOS 10.15 + # https://cibuildwheel.readthedocs.io/en/stable/cpp_standards/#macos-and-deployment-target-versions + run: | + share/openPMD/download_samples.sh build + cmake -S . -B build \ + -DopenPMD_USE_PYTHON=ON \ + -DopenPMD_USE_MPI=OFF \ + -DopenPMD_USE_HDF5=OFF \ + -DopenPMD_USE_ADIOS2=OFF \ + -DopenPMD_USE_INVASIVE_TESTS=ON + cmake --build build --parallel 2 + ctest --test-dir build --verbose + # TODO: apple_conda_ompi_all (similar to conda_ompi_all on Linux) # both OpenMPI and MPICH cause startup (MPI_Init) issues on GitHub Actions diff --git a/include/openPMD/RecordComponent.tpp b/include/openPMD/RecordComponent.tpp index 1df93875b5..2f12a5cf2d 100644 --- a/include/openPMD/RecordComponent.tpp +++ b/include/openPMD/RecordComponent.tpp @@ -28,6 +28,9 @@ #include "openPMD/auxiliary/TypeTraits.hpp" #include "openPMD/auxiliary/UniquePtr.hpp" +#include + + namespace openPMD { template< typename T > @@ -80,7 +83,8 @@ inline std::shared_ptr< T > RecordComponent::loadChunk( for( auto const& dimensionSize : extent ) numPoints *= dimensionSize; -#if defined(__clang_major__) && __clang_major__ < 7 +#if (defined(__clang_major__) && __clang_major__ < 7) || \ + (defined(__apple_build_version__) && __clang_major__ < 14) auto newData = std::shared_ptr(new T[numPoints], [](T *p) { delete[] p; }); loadChunk(newData, offset, extent); @@ -374,7 +378,8 @@ RecordComponent::storeChunk( Offset offset, Extent extent ) std::move( extent ), []( size_t size ) { -#if defined(__clang_major__) && __clang_major__ < 7 +#if (defined(__clang_major__) && __clang_major__ < 7) || \ + (defined(__apple_build_version__) && __clang_major__ < 14) return std::shared_ptr< T >{ new T[ size ], []( auto * ptr ) { delete[] ptr; } }; #else diff --git a/include/openPMD/backend/PatchRecordComponent.hpp b/include/openPMD/backend/PatchRecordComponent.hpp index 51537b0071..dfed6107d8 100644 --- a/include/openPMD/backend/PatchRecordComponent.hpp +++ b/include/openPMD/backend/PatchRecordComponent.hpp @@ -23,6 +23,7 @@ #include "openPMD/auxiliary/ShareRawInternal.hpp" #include "openPMD/backend/BaseRecordComponent.hpp" +#include #include #include #include @@ -144,7 +145,8 @@ template inline std::shared_ptr PatchRecordComponent::load() { uint64_t numPoints = getExtent()[0]; -#if defined(__clang_major__) && __clang_major__ < 7 +#if (defined(__clang_major__) && __clang_major__ < 7) || \ + (defined(__apple_build_version__) && __clang_major__ < 14) auto newData = std::shared_ptr(new T[numPoints], [](T *p) { delete[] p; }); load(newData); From 58ce561a98527e9a17831ddd0c62cb9decf06e5e Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Sun, 26 Mar 2023 12:34:13 -0700 Subject: [PATCH 49/82] Docs: Formatting of Lists in 0.15.0 (#1399) Fix the rst formatting of lists in the 0.15.0 changelog. --- CHANGELOG.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index c08be42008..4ebaacde92 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Features - Switch to C++17 #1103 #1128 #1140 #1157 #1164 #1183 #1185 - Error-recovery during parsing #1150 #1179 #1237 - Extensive update for JSON/TOML configuration #1043 + - TOML as an alternative to JSON #1146 - compression configuration via JSON# 1043 - case insensitivity #1043 @@ -39,6 +40,7 @@ Features - new JSON options: ``backend`` and ``iteration_encoding`` #1043 - ADIOS1 compression configuration via JSON #1043 #1162 - New access types: + - ``APPEND``: Add new iterations without reading, supports ADIOS2 Append mode #1007 #1302 - ``READ_LINEAR``: For reading through ADIOS2 steps, for full support of ADIOS2 BP5 #1291 #1379 - ADIOS2: @@ -54,6 +56,7 @@ Features - HDF5: - I/O optimizations for HDF5 #1129 #1133 #1192 + - Improve write time by disabling fill #1192 - Miscellaneous API additions: @@ -131,6 +134,7 @@ Breaking Changes - Apply frontend redesign to Container and deriving classes #1115 #1159 - Removal of APIs + - ``Dataset::transform``, ``Dataset::compression`` and ``Dataset::chunksize`` #1043 .. note:: @@ -190,6 +194,7 @@ Other - Recommend Static Build for Superbuilds #1325 - Latest Sphinx, Docutils, RTD #1341 - Tooling + - ``openpmd-pipe``: better optional support for MPI #1186 #1336 - ``openpmd-ls``: use lazy parsing #1111 - Enable use of ``Series::setName()`` and ``Series::setIterationEncoding()`` in combination with file-based encoding 1081 From 0616e457081770159a0c934d37986e8ad78a07e7 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Sun, 26 Mar 2023 23:04:14 -0700 Subject: [PATCH 50/82] Fix: Artifact Placement in Windows Wheels (#1400) * Fix: Windows Wheels (`setup.py`) Our `setup.py` wheel logic currently picks up our libs and Python module straight from the build directory. The latest changes then misplaced the artifacts in the final wheel. Add a tweak to CMake that allows to overwrite this for the two targets (lib and python module). This will be modernized at some point using `scikit-build`. * CI: Windows Pip Install & Test --- .github/workflows/windows.yml | 22 +++++++++++++++++++++ CMakeLists.txt | 36 +++++++++++++++++++++++++---------- setup.py | 2 ++ 3 files changed, 50 insertions(+), 10 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index e64249bf5b..22211a4a8d 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -28,6 +28,28 @@ jobs: # add before install, and fix Python path: # ctest --test-dir build -C Debug --output-on-failure + build_win_msvc_pip: + name: MSVC w/o MPI via pip + runs-on: windows-latest + if: github.event.pull_request.draft == false + steps: + - uses: actions/checkout@v3 + - name: Build & Install + run: | + python3.exe -m pip install --upgrade pip setuptools wheel + python3.exe -m pip install --upgrade cmake + python3.exe -m pip install --upgrade numpy + + python3.exe -m pip wheel . + if(!$?) { Exit $LASTEXITCODE } + python3.exe -m pip install openPMD_api-0.15.0-cp39-cp39-win_amd64.whl + if(!$?) { Exit $LASTEXITCODE } + + python3.exe -c "import openpmd_api as api; print(api.variants)" + if(!$?) { Exit $LASTEXITCODE } + + python3.exe -m openpmd_api.ls --help + if(!$?) { Exit $LASTEXITCODE } build_win_clang: name: Clang w/o MPI diff --git a/CMakeLists.txt b/CMakeLists.txt index d737552e65..34c8630bca 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -578,15 +578,26 @@ set_target_properties(openPMD PROPERTIES ) # note: same as above, but for Multi-Config generators if(isMultiConfig) + # this is a tweak for setup.py to pick up our libs & pybind module properly + # this assumes there will only be one config built + option(openPMD_BUILD_NO_CFG_SUBPATH + "For multi-config builds, do not appends the config to build dir" OFF) + mark_as_advanced(openPMD_BUILD_NO_CFG_SUBPATH) + foreach(CFG IN LISTS CMAKE_CONFIGURATION_TYPES) string(TOUPPER "${CFG}" CFG_UPPER) + if(openPMD_BUILD_NO_CFG_SUBPATH) # for setup.py + set(CFG_PATH "") + else() + set(CFG_PATH "/${CFG}") + endif() set_target_properties(openPMD PROPERTIES COMPILE_PDB_NAME_${CFG_UPPER} openPMD - ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_ARCHIVE_OUTPUT_DIRECTORY}/${CFG} - LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_LIBRARY_OUTPUT_DIRECTORY}/${CFG} - RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} - PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PDB_OUTPUT_DIRECTORY}/${CFG} - COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY}/${CFG} + ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_ARCHIVE_OUTPUT_DIRECTORY}${CFG_PATH} + LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_LIBRARY_OUTPUT_DIRECTORY}${CFG_PATH} + RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}${CFG_PATH} + PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PDB_OUTPUT_DIRECTORY}${CFG_PATH} + COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY}${CFG_PATH} ) endforeach() endif() @@ -893,13 +904,18 @@ if(openPMD_HAVE_PYTHON) if(isMultiConfig) foreach(CFG IN LISTS CMAKE_CONFIGURATION_TYPES) string(TOUPPER "${CFG}" CFG_UPPER) + if(openPMD_BUILD_NO_CFG_SUBPATH) # for setup.py + set(CFG_PATH "") + else() + set(CFG_PATH "/${CFG}") + endif() set_target_properties(openPMD.py PROPERTIES COMPILE_PDB_NAME_${CFG_UPPER} openpmd_api_cxx - ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}/${CFG}/openpmd_api - LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}/${CFG}/openpmd_api - RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}/${CFG}/openpmd_api - PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}/${CFG}/openpmd_api - COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}/${CFG}/openpmd_api + ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}${CFG_PATH}/openpmd_api + LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}${CFG_PATH}/openpmd_api + RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}${CFG_PATH}/openpmd_api + PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}${CFG_PATH}/openpmd_api + COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PYTHON_OUTPUT_DIRECTORY}${CFG_PATH}/openpmd_api ) endforeach() endif() diff --git a/setup.py b/setup.py index a56ea38feb..1695046cd6 100644 --- a/setup.py +++ b/setup.py @@ -84,8 +84,10 @@ def build_extension(self, ext): cfg = 'Debug' if self.debug else 'Release' build_args = ['--config', cfg] + # Assumption: Windows builds are always multi-config (MSVC VS) if platform.system() == "Windows": cmake_args += [ + '-DopenPMD_BUILD_NO_CFG_SUBPATH:BOOL=ON', '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format( cfg.upper(), os.path.join(extdir, "openpmd_api") From af7ac2e197a726404a4b08efd01f82cb5eff479d Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Sun, 26 Mar 2023 23:20:34 -0700 Subject: [PATCH 51/82] Releases: Nils Schild (IPP) (#1394) Adding Nils Schild as contributor for his CMake contributions. Thanks, Nils! --- .rodare.json | 6 ++++++ README.md | 2 ++ 2 files changed, 8 insertions(+) diff --git a/.rodare.json b/.rodare.json index 5ceaf51f24..cf9e730e88 100644 --- a/.rodare.json +++ b/.rodare.json @@ -120,6 +120,12 @@ "name": "Gruber, Bernhard Manfred", "orcid": "0000-0001-7848-1690", "type": "Other" + }, + { + "affiliation": "Max Planck Institute for Plasma Physics (IPP)", + "name": "Schild, Nils", + "orcid": "0009-0000-5048-4814", + "type": "Other" } ], "title": "C++ & Python API for Scientific I/O with openPMD", diff --git a/README.md b/README.md index 0973281760..f067242530 100644 --- a/README.md +++ b/README.md @@ -422,6 +422,8 @@ Further thanks go to improvements and contributions from: HDF5 performance tuning * [Bernhard Manfred Gruber (CERN)](https://github.com/bernhardmgruber): CMake fix for parallel HDF5 +* [Nils Schild (IPP)](https://github.com/DerNils-git): + CMake improvements for subprojects ### Grants From 54744759b887954a02a74ff0ac2fad83d0cdc330 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 28 Mar 2023 02:00:43 -0700 Subject: [PATCH 52/82] [pre-commit.ci] pre-commit autoupdate (#1405) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/mirrors-clang-format: v15.0.7 → v16.0.0](https://github.com/pre-commit/mirrors-clang-format/compare/v15.0.7...v16.0.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d66a4662f6..94d4f7b4cf 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,7 +66,7 @@ repos: # clang-format v13 # to run manually, use .github/workflows/clang-format/clang-format.sh - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v15.0.7 + rev: v16.0.0 hooks: - id: clang-format From d15333d7f17fb7d7d7692c0608713a371825cac9 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 28 Mar 2023 02:01:18 -0700 Subject: [PATCH 53/82] Docs: ADIOS1 EOL in Overview (#1398) Mention that ADIOS1 is end-of-life in the backend overview page. --- docs/source/backends/adios2.rst | 20 ++++++++++++++++ docs/source/backends/overview.rst | 38 +++++++++++++++++++------------ 2 files changed, 44 insertions(+), 14 deletions(-) diff --git a/docs/source/backends/adios2.rst b/docs/source/backends/adios2.rst index d639f01897..f5ec078215 100644 --- a/docs/source/backends/adios2.rst +++ b/docs/source/backends/adios2.rst @@ -299,6 +299,26 @@ Known Issues Selected References ------------------- +* William F. Godoy, Norbert Podhorszki, Ruonan Wang, Chuck Atkins, Greg Eisenhauer, Junmin Gu, Philip Davis, Jong Choi, Kai Germaschewski, Kevin Huck, Axel Huebl, Mark Kim, James Kress, Tahsin Kurc, Qing Liu, Jeremy Logan, Kshitij Mehta, George Ostrouchov, Manish Parashar, Franz Poeschel, David Pugmire, Eric Suchyta, Keichi Takahashi, Nick Thompson, Seiji Tsutsumi, Lipeng Wan, Matthew Wolf, Kesheng Wu, and Scott Klasky. + *ADIOS 2: The Adaptable Input Output System. A framework for high-performance data management,* + SoftwareX, vol. 12, 100561, 2020. + `DOI:10.1016/j.softx.2020.100561 `__ + +* Franz Poeschel, Juncheng E, William F. Godoy, Norbert Podhorszki, Scott Klasky, Greg Eisenhauer, Philip E. Davis, Lipeng Wan, Ana Gainaru, Junmin Gu, Fabian Koller, Rene Widera, Michael Bussmann, and Axel Huebl. + *Transitioning from file-based HPC workflows to streaming data pipelines with openPMD and ADIOS2,* + Part of *Driving Scientific and Engineering Discoveries Through the Integration of Experiment, Big Data, and Modeling and Simulation,* SMC 2021, Communications in Computer and Information Science (CCIS), vol 1512, 2022. + `arXiv:2107.06108 `__, `DOI:10.1007/978-3-030-96498-6_6 `__ + +* Lipeng Wan, Axel Huebl, Junmin Gu, Franz Poeschel, Ana Gainaru, Ruonan Wang, Jieyang Chen, Xin Liang, Dmitry Ganyushin, Todd Munson, Ian Foster, Jean-Luc Vay, Norbert Podhorszki, Kesheng Wu, and Scott Klasky. + *Improving I/O Performance for Exascale Applications through Online Data Layout Reorganization,* + IEEE Transactions on Parallel and Distributed Systems, vol. 33, no. 4, pp. 878-890, 2022. + `arXiv:2107.07108 `__, `DOI:10.1109/TPDS.2021.3100784 `__ + +* Junmin Gu, Philip Davis, Greg Eisenhauer, William Godoy, Axel Huebl, Scott Klasky, Manish Parashar, Norbert Podhorszki, Franz Poeschel, Jean-Luc Vay, Lipeng Wan, Ruonan Wang, and Kesheng Wu. + *Organizing Large Data Sets for Efficient Analyses on HPC Systems,* + Journal of Physics: Conference Series, vol. 2224, in *2nd International Symposium on Automation, Information and Computing* (ISAIC 2021), 2022. + `DOI:10.1088/1742-6596/2224/1/012042 `__ + * Hasan Abbasi, Matthew Wolf, Greg Eisenhauer, Scott Klasky, Karsten Schwan, and Fang Zheng. *Datastager: scalable data staging services for petascale applications,* Cluster Computing, 13(3):277–290, 2010. diff --git a/docs/source/backends/overview.rst b/docs/source/backends/overview.rst index 4509f5204a..cb426575d0 100644 --- a/docs/source/backends/overview.rst +++ b/docs/source/backends/overview.rst @@ -5,21 +5,26 @@ Overview This section provides an overview of features in I/O backends. -================== ============= ============= ========= =========== -**Feature** **ADIOS1** **ADIOS2** **HDF5** **JSON** ------------------- ------------- ------------- --------- ----------- +================== ============= =============== ========= ========== +**Feature** **ADIOS1** **ADIOS2** **HDF5** **JSON** +------------------ ------------- --------------- --------- ---------- Operating Systems Linux, OSX Linux, OSX, Windows ------------------- ------------- ----------------------------------- -Serial supported supported supported supported -MPI-parallel supported supported supported no -Dataset deletion no no supported supported -Compression upcoming supported upcoming no -Streaming/Staging not exposed upcoming no no -Portable Files limited awaiting yes yes -PByte-scalable yes yes no no -Performance A TBD B C -Native File Format ``.bp`` (BP3) ``.bp`` (BP4) ``.h5`` ``.json`` -================== ============= ============= ========= =========== +------------------ ------------- ------------------------------------ +Status end-of-life active active active +Serial supported supported supported supported +MPI-parallel supported supported supported no +Dataset deletion no no supported supported +Compression upcoming supported upcoming no +Streaming/Staging not exposed upcoming no no +Portable Files limited awaiting yes yes +PByte-scalable yes yes no no +Memory footprint large medium small small +Performance A- A B C +Native File Format ``.bp`` (BP3) ``.bp`` (BP3-5) ``.h5`` ``.json`` +================== ============= =============== ========= ========== + +:ref:`ADIOS1 is deprecated and will be removed in a future release `. +Please use ADIOS2 instead. * supported/yes: implemented and accessible for users of openPMD-api * upcoming: planned for upcoming releases of openPMD-api @@ -31,6 +36,11 @@ Native File Format ``.bp`` (BP3) ``.bp`` (BP4) ``.h5`` ``.json`` Selected References ------------------- +* Franz Poeschel, Juncheng E, William F. Godoy, Norbert Podhorszki, Scott Klasky, Greg Eisenhauer, Philip E. Davis, Lipeng Wan, Ana Gainaru, Junmin Gu, Fabian Koller, Rene Widera, Michael Bussmann, and Axel Huebl. + *Transitioning from file-based HPC workflows to streaming data pipelines with openPMD and ADIOS2,* + Part of *Driving Scientific and Engineering Discoveries Through the Integration of Experiment, Big Data, and Modeling and Simulation,* SMC 2021, Communications in Computer and Information Science (CCIS), vol 1512, 2022. + `arXiv:2107.06108 `__, `DOI:10.1007/978-3-030-96498-6_6 `__ + * Axel Huebl, Rene Widera, Felix Schmitt, Alexander Matthes, Norbert Podhorszki, Jong Youl Choi, Scott Klasky, and Michael Bussmann. *On the Scalability of Data Reduction Techniques in Current and Upcoming HPC Systems from an Application Perspective,* ISC High Performance 2017: High Performance Computing, pp. 15-29, 2017. From 1ac45e750f01c159fae9e6aa76d0d1066fc06d03 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Tue, 28 Mar 2023 11:03:57 +0200 Subject: [PATCH 54/82] Fix docs post 0.15 (#1404) * Fix adios1 JSON/TOML key * Update best practises --- docs/source/backends/adios2.rst | 31 +++++++++++++++++++++++++------ docs/source/details/adios1.json | 2 +- docs/source/details/adios1.toml | 2 +- 3 files changed, 27 insertions(+), 8 deletions(-) diff --git a/docs/source/backends/adios2.rst b/docs/source/backends/adios2.rst index f5ec078215..807538c2f6 100644 --- a/docs/source/backends/adios2.rst +++ b/docs/source/backends/adios2.rst @@ -108,13 +108,32 @@ The default behavior may be restored by setting the :ref:`JSON parameter .dir/`` directories. -Note that such a tool is not yet available for ADIOS2, but the ``bpmeta`` utility provided by ADIOS1 is capable of processing files written by ADIOS2. +A benefitial configuration depends heavily on: -Further options depend heavily on filesystem type, specific file striping, network infrastructure and available RAM on the aggregator nodes. -A good number for substreams is usually the number of contributing nodes divided by four. +1. Hardware: filesystem type, specific file striping, network infrastructure and available RAM on the aggregator nodes. +2. Software: communication and I/O patterns in the data producer/consumer, ADIOS2 engine being used. + +The BP4 engine optimizes aggressively for I/O efficiency at large scale, while the BP5 engine implements some compromises for tighter control of host memory usage. + +ADIOS2 aggregates at two levels: + +1. Aggregators: These are the processes that actually write data to the filesystem. + In BP5, there must be at least one aggregatore per compute node. +2. Subfiles: In BP5, multiple aggregators might write to the same physical file on the filesystem. + The BP4 engine does not distinguish the number of aggregators from the number of subfiles, each aggregator writes to one file. + +The number of aggregators depends on the actual scale of the application. +At low and mediocre scale, it is generally preferred to have every process write to the filesystem in order to make good use of parallel resources and utilize the full bandwidth. +At higher scale, reducing the number of aggregators is suggested, in order to avoid competition for resources between too many writing processes. +In the latter case, a good number of aggregators is usually the number of contributing nodes. +A file count lower than the number of nodes might be chosen in both BP4 and BP5 with care, file counts of "number of nodes divided by four" have yielded good results in some setups. + +Use of asynchronous I/O functionality (``BurstBufferPath`` in BP4, ``AsyncWrite`` in BP5) depends on the application, and might increase the performance or decrease it. +Asynchronous I/O can compete with MPI for communication resources, impacting the *compute* performance of an application. + +For SST streaming, the default TCP-based backend does not scale well in HPC situations. +Instead, a high-performance backend (``libfabric``, ``ucx`` or ``mpi`` (only supported for well-configured MPICH)) should be chosen. +The preferred backend usually depends on the system's native software stack. For fine-tuning at extreme scale or for exotic systems, please refer to the ADIOS2 manual and talk to your filesystem admins and the ADIOS2 authors. Be aware that extreme-scale I/O is a research topic after all. diff --git a/docs/source/details/adios1.json b/docs/source/details/adios1.json index 5d2cb4df71..95c934c76b 100644 --- a/docs/source/details/adios1.json +++ b/docs/source/details/adios1.json @@ -1,5 +1,5 @@ { - "adios2": { + "adios1": { "dataset": { "transform": "blosc:compressor=zlib,shuffle=bit,lvl=1;nometa" } diff --git a/docs/source/details/adios1.toml b/docs/source/details/adios1.toml index 2cb1fb357b..6d5dca07c3 100644 --- a/docs/source/details/adios1.toml +++ b/docs/source/details/adios1.toml @@ -1,2 +1,2 @@ -[adios.dataset] +[adios1.dataset] transform = "blosc:compressor=zlib,shuffle=bit,lvl=1;nometa" From d2678003a29c5bf9dfc19199786e4b75677fd229 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 28 Mar 2023 02:04:21 -0700 Subject: [PATCH 55/82] README: Remove LGTM Batches (#1402) LGTM is now in GitHub CodeQL. We don't get a fancy "grade" and badge for it anymore. --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index f067242530..f753c0c5fa 100644 --- a/README.md +++ b/README.md @@ -9,9 +9,6 @@ C++ & Python API for Scientific I/O with openPMD [![License](https://img.shields.io/badge/license-LGPLv3-blue)](https://www.gnu.org/licenses/lgpl-3.0.html) [![DOI](https://rodare.hzdr.de/badge/DOI/10.14278/rodare.27.svg)](https://doi.org/10.14278/rodare.27) [![CodeFactor](https://www.codefactor.io/repository/github/openpmd/openpmd-api/badge)](https://www.codefactor.io/repository/github/openpmd/openpmd-api) -[![LGTM: C/C++](https://img.shields.io/lgtm/grade/cpp/g/openPMD/openPMD-api?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/openPMD/openPMD-api/context:cpp) -[![LGTM: Python](https://img.shields.io/lgtm/grade/python/g/openPMD/openPMD-api?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/openPMD/openPMD-api/context:python) -[![LGTM: Total alerts](https://img.shields.io/lgtm/alerts/g/openPMD/openPMD-api?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/openPMD/openPMD-api/alerts/) [![Coverage Status](https://coveralls.io/repos/github/openPMD/openPMD-api/badge)](https://coveralls.io/github/openPMD/openPMD-api) [![Documentation Status](https://readthedocs.org/projects/openpmd-api/badge/?version=latest)](https://openpmd-api.readthedocs.io/en/latest/?badge=latest) [![Linux/OSX Build Status dev](https://travis-ci.com/openPMD/openPMD-api.svg?branch=dev)](https://travis-ci.com/openPMD/openPMD-api) From ee3ff8f384966657678a702d5c4a5ff09ee094eb Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 28 Mar 2023 02:04:39 -0700 Subject: [PATCH 56/82] Docs: Fix HTML5 for Install Logos (#1397) With the update tot HTML5 in docutils, we need to modernize our CSS code that does fancy logos in the user install section. --- docs/source/install/install.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/install/install.rst b/docs/source/install/install.rst index 6df73eaae7..176924e976 100644 --- a/docs/source/install/install.rst +++ b/docs/source/install/install.rst @@ -6,7 +6,7 @@ Installation .. raw:: html + Supported openPMD Standard Versions ----------------------------------- @@ -46,6 +47,18 @@ openPMD-api version supported openPMD standard versions ``0.1.0-0.12.0`` (alpha) ``1.0.0-1.1.0`` ======================== =================================== + +Funding Acknowledgements +------------------------ + +The openPMD-api authors acknowledge support via the following programs. +Supported by the CAMPA collaboration, a project of the U.S. Department of Energy, Office of Science, Office of Advanced Scientific Computing Research and Office of High Energy Physics, Scientific Discovery through Advanced Computing (SciDAC) program. +Previously supported by the Consortium for Advanced Modeling of Particles Accelerators (CAMPA), funded by the U.S. DOE Office of Science under Contract No. DE-AC02-05CH11231. +Supported by the Exascale Computing Project (17-SC-20-SC), a collaborative effort of two U.S. Department of Energy organizations (Office of Science and the National Nuclear Security Administration). +This project has received funding from the European Unions Horizon 2020 research and innovation programme under grant agreement No 654220. +This work was partially funded by the Center of Advanced Systems Understanding (CASUS), which is financed by Germany's Federal Ministry of Education and Research (BMBF) and by the Saxon Ministry for Science, Culture and Tourism (SMWK) with tax funds on the basis of the budget approved by the Saxon State Parliament. + + .. toctree:: :hidden: From f489633cc7ecdbdfec69da356d53c6b4bb661bb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Mon, 3 Apr 2023 06:17:38 +0200 Subject: [PATCH 64/82] Enable clang-format also for .tpp files by using a regex instead of a predefined filter (#1403) * Manually specify file regex in clang-format hook * Some whitespace change to trigger pre-commit * Add a comment on what we did * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * More C++ and Json * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Axel Huebl --- .pre-commit-config.yaml | 8 ++ include/openPMD/RecordComponent.tpp | 199 +++++++++++++--------------- include/openPMD/config.hpp.in | 12 +- 3 files changed, 103 insertions(+), 116 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 94d4f7b4cf..2d6270105d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,6 +69,14 @@ repos: rev: v16.0.0 hooks: - id: clang-format + # By default, the clang-format hook configures: + # 'types_or': [c++, c, c#, cuda, java, javascript, json, objective-c, proto, textproto] + # Unfortunately, the c++ option does not recognize .tpp files, so we need to do this manually + # Since file filters in pre-commit work by logical AND, it's only possible to narrow the filter definition + # So, we add a regex for the type of file that we want and additionally disable the 'types_or' + # option entirely. + 'types_or': [] + files: .*\.(tpp|h|hpp|hpp\.in|cpp|cxx|js|json)$ # Autoremoves unused Python imports - repo: https://github.com/hadialqattan/pycln diff --git a/include/openPMD/RecordComponent.tpp b/include/openPMD/RecordComponent.tpp index 9b8a5882f3..4256fc6fa8 100644 --- a/include/openPMD/RecordComponent.tpp +++ b/include/openPMD/RecordComponent.tpp @@ -30,57 +30,53 @@ #include - namespace openPMD { -template< typename T > -inline RecordComponent& -RecordComponent::makeConstant(T value) +template +inline RecordComponent &RecordComponent::makeConstant(T value) { - if( written() ) - throw std::runtime_error("A recordComponent can not (yet) be made constant after it has been written."); + if (written()) + throw std::runtime_error( + "A recordComponent can not (yet) be made constant after it has " + "been written."); - auto & rc = get(); + auto &rc = get(); rc.m_constantValue = Attribute(value); rc.m_isConstant = true; return *this; } -template< typename T > -inline RecordComponent& -RecordComponent::makeEmpty( uint8_t dimensions ) +template +inline RecordComponent &RecordComponent::makeEmpty(uint8_t dimensions) { - return makeEmpty( Dataset( - determineDatatype< T >(), - Extent( dimensions, 0 ) ) ); + return makeEmpty(Dataset(determineDatatype(), Extent(dimensions, 0))); } -template< typename T > -inline std::shared_ptr< T > RecordComponent::loadChunk( - Offset o, Extent e ) +template +inline std::shared_ptr RecordComponent::loadChunk(Offset o, Extent e) { uint8_t dim = getDimensionality(); // default arguments // offset = {0u}: expand to right dim {0u, 0u, ...} Offset offset = o; - if( o.size() == 1u && o.at(0) == 0u && dim > 1u ) + if (o.size() == 1u && o.at(0) == 0u && dim > 1u) offset = Offset(dim, 0u); // extent = {-1u}: take full size Extent extent(dim, 1u); - if( e.size() == 1u && e.at(0) == -1u ) + if (e.size() == 1u && e.at(0) == -1u) { extent = getExtent(); - for( uint8_t i = 0u; i < dim; ++i ) + for (uint8_t i = 0u; i < dim; ++i) extent[i] -= offset[i]; } else extent = e; uint64_t numPoints = 1u; - for( auto const& dimensionSize : extent ) + for (auto const &dimensionSize : extent) numPoints *= dimensionSize; #if (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 11000) || \ @@ -96,23 +92,23 @@ inline std::shared_ptr< T > RecordComponent::loadChunk( #endif } -template< typename T > -inline void RecordComponent::loadChunk( - std::shared_ptr< T > data, - Offset o, - Extent e ) +template +inline void +RecordComponent::loadChunk(std::shared_ptr data, Offset o, Extent e) { Datatype dtype = determineDatatype(data); - if( dtype != getDatatype() ) - if( !isSameInteger< T >( getDatatype() ) && - !isSameFloatingPoint< T >( getDatatype() ) && - !isSameComplexFloatingPoint< T >( getDatatype() ) ) + if (dtype != getDatatype()) + if (!isSameInteger(getDatatype()) && + !isSameFloatingPoint(getDatatype()) && + !isSameComplexFloatingPoint(getDatatype())) { std::string const data_type_str = datatypeToString(getDatatype()); - std::string const requ_type_str = datatypeToString(determineDatatype()); - std::string err_msg = "Type conversion during chunk loading not yet implemented! "; + std::string const requ_type_str = + datatypeToString(determineDatatype()); + std::string err_msg = + "Type conversion during chunk loading not yet implemented! "; err_msg += "Data: " + data_type_str + "; Load as: " + requ_type_str; - throw std::runtime_error( err_msg ); + throw std::runtime_error(err_msg); } uint8_t dim = getDimensionality(); @@ -120,59 +116,60 @@ inline void RecordComponent::loadChunk( // default arguments // offset = {0u}: expand to right dim {0u, 0u, ...} Offset offset = o; - if( o.size() == 1u && o.at(0) == 0u && dim > 1u ) + if (o.size() == 1u && o.at(0) == 0u && dim > 1u) offset = Offset(dim, 0u); // extent = {-1u}: take full size Extent extent(dim, 1u); - if( e.size() == 1u && e.at(0) == -1u ) + if (e.size() == 1u && e.at(0) == -1u) { extent = getExtent(); - for( uint8_t i = 0u; i < dim; ++i ) + for (uint8_t i = 0u; i < dim; ++i) extent[i] -= offset[i]; } else extent = e; - if( extent.size() != dim || offset.size() != dim ) + if (extent.size() != dim || offset.size() != dim) { std::ostringstream oss; oss << "Dimensionality of chunk (" << "offset=" << offset.size() << "D, " << "extent=" << extent.size() << "D) " - << "and record component (" - << int(dim) << "D) " + << "and record component (" << int(dim) << "D) " << "do not match."; throw std::runtime_error(oss.str()); } Extent dse = getExtent(); - for( uint8_t i = 0; i < dim; ++i ) - if( dse[i] < offset[i] + extent[i] ) - throw std::runtime_error("Chunk does not reside inside dataset (Dimension on index " + std::to_string(i) - + ". DS: " + std::to_string(dse[i]) - + " - Chunk: " + std::to_string(offset[i] + extent[i]) - + ")"); - if( !data ) - throw std::runtime_error("Unallocated pointer passed during chunk loading."); - - auto & rc = get(); - if( constant() ) + for (uint8_t i = 0; i < dim; ++i) + if (dse[i] < offset[i] + extent[i]) + throw std::runtime_error( + "Chunk does not reside inside dataset (Dimension on index " + + std::to_string(i) + ". DS: " + std::to_string(dse[i]) + + " - Chunk: " + std::to_string(offset[i] + extent[i]) + ")"); + if (!data) + throw std::runtime_error( + "Unallocated pointer passed during chunk loading."); + + auto &rc = get(); + if (constant()) { uint64_t numPoints = 1u; - for( auto const& dimensionSize : extent ) + for (auto const &dimensionSize : extent) numPoints *= dimensionSize; - T value = rc.m_constantValue.get< T >(); + T value = rc.m_constantValue.get(); - T* raw_ptr = data.get(); + T *raw_ptr = data.get(); std::fill(raw_ptr, raw_ptr + numPoints, value); - } else + } + else { - Parameter< Operation::READ_DATASET > dRead; + Parameter dRead; dRead.offset = offset; dRead.extent = extent; dRead.dtype = getDatatype(); - dRead.data = std::static_pointer_cast< void >(data); + dRead.data = std::static_pointer_cast(data); rc.m_chunks.push(IOTask(this, dRead)); } } @@ -190,13 +187,10 @@ inline void RecordComponent::loadChunk( template inline void RecordComponent::loadChunkRaw(T *ptr, Offset offset, Extent extent) { - loadChunk( - auxiliary::shareRaw(ptr), - std::move(offset), - std::move(extent)); + loadChunk(auxiliary::shareRaw(ptr), std::move(offset), std::move(extent)); } -template< typename T > +template inline void RecordComponent::storeChunk(std::shared_ptr data, Offset o, Extent e) { @@ -250,13 +244,10 @@ RecordComponent::storeChunk(std::shared_ptr data, Offset o, Extent e) template void RecordComponent::storeChunkRaw(T *ptr, Offset offset, Extent extent) { - storeChunk( - auxiliary::shareRaw(ptr), - std::move(offset), - std::move(extent)); + storeChunk(auxiliary::shareRaw(ptr), std::move(offset), std::move(extent)); } -template< typename T_ContiguousContainer > +template inline typename std::enable_if_t< auxiliary::IsContiguousContainer_v > RecordComponent::storeChunk(T_ContiguousContainer &data, Offset o, Extent e) @@ -266,64 +257,57 @@ RecordComponent::storeChunk(T_ContiguousContainer &data, Offset o, Extent e) // default arguments // offset = {0u}: expand to right dim {0u, 0u, ...} Offset offset = o; - if( o.size() == 1u && o.at(0) == 0u && dim > 1u ) + if (o.size() == 1u && o.at(0) == 0u && dim > 1u) offset = Offset(dim, 0u); // extent = {-1u}: take full size Extent extent(dim, 1u); // avoid outsmarting the user: // - stdlib data container implement 1D -> 1D chunk to write - if( e.size() == 1u && e.at(0) == -1u && dim == 1u ) + if (e.size() == 1u && e.at(0) == -1u && dim == 1u) extent.at(0) = data.size(); else extent = e; - storeChunk( - auxiliary::shareRaw(data.data()), - offset, - extent); + storeChunk(auxiliary::shareRaw(data.data()), offset, extent); } -template< typename T, typename F > -inline DynamicMemoryView< T > -RecordComponent::storeChunk( Offset o, Extent e, F && createBuffer ) +template +inline DynamicMemoryView +RecordComponent::storeChunk(Offset o, Extent e, F &&createBuffer) { - if( constant() ) + if (constant()) throw std::runtime_error( - "Chunks cannot be written for a constant RecordComponent." ); - if( empty() ) + "Chunks cannot be written for a constant RecordComponent."); + if (empty()) throw std::runtime_error( - "Chunks cannot be written for an empty RecordComponent." ); + "Chunks cannot be written for an empty RecordComponent."); Datatype dtype = determineDatatype(); - if( dtype != getDatatype() ) + if (dtype != getDatatype()) { std::ostringstream oss; - oss << "Datatypes of chunk data (" - << dtype - << ") and record component (" - << getDatatype() - << ") do not match."; + oss << "Datatypes of chunk data (" << dtype + << ") and record component (" << getDatatype() << ") do not match."; throw std::runtime_error(oss.str()); } uint8_t dim = getDimensionality(); - if( e.size() != dim || o.size() != dim ) + if (e.size() != dim || o.size() != dim) { std::ostringstream oss; oss << "Dimensionality of chunk (" << "offset=" << o.size() << "D, " << "extent=" << e.size() << "D) " - << "and record component (" - << int(dim) << "D) " + << "and record component (" << int(dim) << "D) " << "do not match."; throw std::runtime_error(oss.str()); } Extent dse = getExtent(); - for( uint8_t i = 0; i < dim; ++i ) - if( dse[i] < o[i] + e[i] ) - throw std::runtime_error("Chunk does not reside inside dataset (Dimension on index " + std::to_string(i) - + ". DS: " + std::to_string(dse[i]) - + " - Chunk: " + std::to_string(o[i] + e[i]) - + ")"); + for (uint8_t i = 0; i < dim; ++i) + if (dse[i] < o[i] + e[i]) + throw std::runtime_error( + "Chunk does not reside inside dataset (Dimension on index " + + std::to_string(i) + ". DS: " + std::to_string(dse[i]) + + " - Chunk: " + std::to_string(o[i] + e[i]) + ")"); /* * The openPMD backend might not yet know about this dataset. @@ -333,7 +317,7 @@ RecordComponent::storeChunk( Offset o, Extent e, F && createBuffer ) seriesFlush({FlushLevel::SkeletonOnly}); size_t size = 1; - for( auto ext : e ) + for (auto ext : e) { size *= ext; } @@ -341,17 +325,17 @@ RecordComponent::storeChunk( Offset o, Extent e, F && createBuffer ) * Flushing the skeleton does not create datasets, * so we might need to do it now. */ - if( !written() ) + if (!written()) { - auto & rc = get(); - Parameter< Operation::CREATE_DATASET > dCreate; + auto &rc = get(); + Parameter dCreate; dCreate.name = rc.m_name; dCreate.extent = getExtent(); dCreate.dtype = getDatatype(); dCreate.options = rc.m_dataset.options; IOHandler()->enqueue(IOTask(this, dCreate)); } - Parameter< Operation::GET_BUFFER_VIEW > getBufferView; + Parameter getBufferView; getBufferView.offset = o; getBufferView.extent = e; getBufferView.dtype = getDatatype(); @@ -369,22 +353,17 @@ RecordComponent::storeChunk( Offset o, Extent e, F && createBuffer ) return DynamicMemoryView{std::move(getBufferView), size, *this}; } -template< typename T > -inline DynamicMemoryView< T > -RecordComponent::storeChunk( Offset offset, Extent extent ) +template +inline DynamicMemoryView +RecordComponent::storeChunk(Offset offset, Extent extent) { - return storeChunk< T >( - std::move( offset ), - std::move( extent ), - []( size_t size ) - { + return storeChunk(std::move(offset), std::move(extent), [](size_t size) { #if (defined(_LIBCPP_VERSION) && _LIBCPP_VERSION < 11000) || \ (defined(__apple_build_version__) && __clang_major__ < 14) - return std::shared_ptr< T >{ - new T[ size ], []( auto * ptr ) { delete[] ptr; } }; + return std::shared_ptr{new T[size], [](auto *ptr) { delete[] ptr; }}; #else return std::shared_ptr< T[] >{ new T[ size ] }; #endif - } ); -} + }); } +} // namespace openPMD diff --git a/include/openPMD/config.hpp.in b/include/openPMD/config.hpp.in index 1989dfcbca..8da1b5e6fc 100644 --- a/include/openPMD/config.hpp.in +++ b/include/openPMD/config.hpp.in @@ -21,27 +21,27 @@ #pragma once #ifndef openPMD_HAS_CXX17 -# cmakedefine01 openPMD_HAS_CXX17 +#cmakedefine01 openPMD_HAS_CXX17 #endif #ifndef openPMD_HAVE_MPI -# cmakedefine01 openPMD_HAVE_MPI +#cmakedefine01 openPMD_HAVE_MPI #endif #define openPMD_HAVE_JSON 1 #ifndef openPMD_HAVE_HDF5 -# cmakedefine01 openPMD_HAVE_HDF5 +#cmakedefine01 openPMD_HAVE_HDF5 #endif #ifndef openPMD_HAVE_ADIOS1 -# cmakedefine01 openPMD_HAVE_ADIOS1 +#cmakedefine01 openPMD_HAVE_ADIOS1 #endif #ifndef openPMD_HAVE_ADIOS2 -# cmakedefine01 openPMD_HAVE_ADIOS2 +#cmakedefine01 openPMD_HAVE_ADIOS2 #endif #ifndef openPMD_HAVE_CUDA_EXAMPLES -# cmakedefine01 openPMD_HAVE_CUDA_EXAMPLES +#cmakedefine01 openPMD_HAVE_CUDA_EXAMPLES #endif From b3c70dd731f3dc584154585c66879065aea5a085 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Sun, 2 Apr 2023 21:17:50 -0700 Subject: [PATCH 65/82] Doc: More HTML Updates (CSS) (#1413) More CSS updates to HTML5 elements in new docutils. Sections: first write and read. Not further ` diff --git a/docs/source/usage/firstwrite.rst b/docs/source/usage/firstwrite.rst index 449f66ec48..193530f09f 100644 --- a/docs/source/usage/firstwrite.rst +++ b/docs/source/usage/firstwrite.rst @@ -10,34 +10,34 @@ Step-by-step: how to write scientific data with openPMD-api? From 32cb87d1b0012493287204d7a8c78af9a1141710 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Sun, 2 Apr 2023 23:43:54 -0700 Subject: [PATCH 66/82] Release 0.15.1 (#1414) Regression fixes for the 0.15.0 release. --- .github/workflows/windows.yml | 2 +- CHANGELOG.rst | 45 +++++++++++++++++++++++++++++++++++ CITATION.cff | 2 +- CMakeLists.txt | 2 +- docs/source/conf.py | 4 ++-- docs/source/index.rst | 2 +- setup.py | 2 +- test/SerialIOTest.cpp | 2 +- 8 files changed, 53 insertions(+), 8 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 22211a4a8d..7b1cb5d9a9 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -42,7 +42,7 @@ jobs: python3.exe -m pip wheel . if(!$?) { Exit $LASTEXITCODE } - python3.exe -m pip install openPMD_api-0.15.0-cp39-cp39-win_amd64.whl + python3.exe -m pip install openPMD_api-0.15.1-cp39-cp39-win_amd64.whl if(!$?) { Exit $LASTEXITCODE } python3.exe -c "import openpmd_api as api; print(api.variants)" diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 4ebaacde92..8d03c5bb98 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -3,6 +3,51 @@ Changelog ========= +0.15.1 +------ +**Date:** 2023-04-02 + +Build Regressions + +This release fixes build regressions and minor documentation updates for the 0.15.0 release. + +Changes to "0.15.0" +^^^^^^^^^^^^^^^^^^^ + +Bug Fixes +""""""""" + +- Build issues: + + - CMake: Fix Python Install Directory #1393 + - Work-Around: libc++ shared_ptr array #1409 + - Artifact Placement in Windows Wheels #1400 + - macOS AppleClang12 Fixes #1395 + - ADIOS1: + + - ADIOS1 on macOS #1396 + - If no ADIOS1, then add ADIOS1 sources to main lib #1407 + - Instantiate only parallel ADIOS1 IO Handler in parallel ADIOS1 lib #1411 + +Other +""""" + +- Docker: CMake 3.24+: ZLIB_USE_STATIC_LIBS (#1410 +- CI: + + - Test on Ubuntu 20.04 #1408 + - clang-format also for ``.tpp`` and ``.hpp.in`` files #1403 +- docs: + + - update funding #1412 + - HTML5: CSS updates #1397 #1413 + - README: Remove LGTM Batches #1402 + - Docs TOML and ADIOS2 best practices #1404 + - Docs: ADIOS1 EOL in Overview #1398 + - Releases: Nils Schild (IPP) #1394 + - Formatting of lists in 0.15.0 changelog #1399 + + 0.15.0 ------ **Date:** 2023-03-25 diff --git a/CITATION.cff b/CITATION.cff index ebb82b34d7..df49273bf9 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -25,7 +25,7 @@ contact: orcid: https://orcid.org/0000-0003-1943-7141 email: axelhuebl@lbl.gov title: "openPMD-api: C++ & Python API for Scientific I/O with openPMD" -version: 0.15.0 +version: 0.15.1 repository-code: https://github.com/openPMD/openPMD-api doi: 10.14278/rodare.27 license: LGPL-3.0-or-later diff --git a/CMakeLists.txt b/CMakeLists.txt index 4567c7c18b..244794c9da 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,7 +2,7 @@ # cmake_minimum_required(VERSION 3.15.0) -project(openPMD VERSION 0.15.0) # LANGUAGES CXX +project(openPMD VERSION 0.15.1) # LANGUAGES CXX # the openPMD "markup"/"schema" standard version set(openPMD_STANDARD_VERSION 1.1.0) diff --git a/docs/source/conf.py b/docs/source/conf.py index 03d8d7bbf7..2096024ad2 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -85,9 +85,9 @@ # built documents. # # The short X.Y version. -version = u'0.15.0' +version = u'0.15.1' # The full version, including alpha/beta/rc tags. -release = u'0.15.0' +release = u'0.15.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/docs/source/index.rst b/docs/source/index.rst index 62fff08c44..37b1e4c630 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -43,7 +43,7 @@ openPMD-api version supported openPMD standard versions ======================== =================================== ``2.0.0+`` ``2.0.0+`` (not released yet) ``1.0.0+`` ``1.0.1-1.1.0`` (not released yet) -``0.13.1-0.15.0`` (beta) ``1.0.0-1.1.0`` +``0.13.1-0.15.1`` (beta) ``1.0.0-1.1.0`` ``0.1.0-0.12.0`` (alpha) ``1.0.0-1.1.0`` ======================== =================================== diff --git a/setup.py b/setup.py index 1695046cd6..09cad27c19 100644 --- a/setup.py +++ b/setup.py @@ -170,7 +170,7 @@ def build_extension(self, ext): setup( name='openPMD-api', # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version='0.15.0', + version='0.15.1', author='Axel Huebl, Franz Poeschel, Fabian Koller, Junmin Gu', author_email='axelhuebl@lbl.gov, f.poeschel@hzdr.de', maintainer='Axel Huebl', diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index 9dda38cd4a..78316dd0cf 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -6031,7 +6031,7 @@ void adios2_bp5_no_steps(bool usesteps) IO.DefineAttribute("/openPMD", std::string("1.1.0")); IO.DefineAttribute("/openPMDextension", uint32_t(0)); IO.DefineAttribute("/software", std::string("openPMD-api")); - IO.DefineAttribute("/softwareVersion", std::string("0.15.0-dev")); + IO.DefineAttribute("/softwareVersion", std::string("0.15.1-dev")); IO.DefineAttribute("/data/0/dt", double(1)); IO.DefineAttribute( From 85323350058daf791f41c1b07dccef8c6311acc0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 5 Apr 2023 09:39:34 -0700 Subject: [PATCH 67/82] [pre-commit.ci] pre-commit autoupdate (#1416) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/Lucas-C/pre-commit-hooks: v1.4.2 → v1.5.1](https://github.com/Lucas-C/pre-commit-hooks/compare/v1.4.2...v1.5.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 2d6270105d..b489431da0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -49,7 +49,7 @@ repos: # Changes tabs to spaces - repo: https://github.com/Lucas-C/pre-commit-hooks - rev: v1.4.2 + rev: v1.5.1 hooks: - id: remove-tabs From b3d3057e141af3a40dde5f00262a5671979a95c7 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 5 Apr 2023 11:06:29 -0700 Subject: [PATCH 68/82] `version.hpp`: 0.15.1 (#1417) Forgot to bump this for the release. Will patch this in for package managers... --- include/openPMD/version.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/openPMD/version.hpp b/include/openPMD/version.hpp index 29a4f7d5ba..ab82e630c1 100644 --- a/include/openPMD/version.hpp +++ b/include/openPMD/version.hpp @@ -29,7 +29,7 @@ */ #define OPENPMDAPI_VERSION_MAJOR 0 #define OPENPMDAPI_VERSION_MINOR 15 -#define OPENPMDAPI_VERSION_PATCH 0 +#define OPENPMDAPI_VERSION_PATCH 1 #define OPENPMDAPI_VERSION_LABEL "" /** @} */ From 6940f82c5e77a7c5e1df296d45bf0fe67bd851b3 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 10 Apr 2023 11:29:14 -0700 Subject: [PATCH 69/82] GitHub Actions: macOS has 3 Cores (#1421) https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources --- .github/workflows/macos.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 9f6616fa9c..04fbbbe30e 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -40,7 +40,7 @@ jobs: -DopenPMD_USE_HDF5=ON \ -DopenPMD_USE_ADIOS2=ON \ -DopenPMD_USE_INVASIVE_TESTS=ON - cmake --build build --parallel 2 + cmake --build build --parallel 3 ctest --test-dir build --verbose appleclang12_py_ad1: @@ -59,7 +59,7 @@ jobs: tar -xzf adios-1.13.1.tar.gz cd adios-1.13.1/ CFLAGS="-fPIC" ./configure --enable-static --enable-shared --prefix=/usr/local --without-mpi --disable-fortran - make -j 2 + make -j 3 make install set -e - name: Build @@ -76,7 +76,7 @@ jobs: -DopenPMD_USE_ADIOS1=ON \ -DopenPMD_USE_ADIOS2=OFF \ -DopenPMD_USE_INVASIVE_TESTS=ON - cmake --build build --parallel 2 + cmake --build build --parallel 3 ctest --test-dir build --verbose # TODO: apple_conda_ompi_all (similar to conda_ompi_all on Linux) From bb98f66eba6552bb919cd8b22614ed6d6c5e25a3 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 10 Apr 2023 11:29:29 -0700 Subject: [PATCH 70/82] Post 0.15.0 Changelog Template (#1420) Change back to `-dev` after the 0.15.0 & 0.15.1 releases :) --- .github/workflows/windows.yml | 2 +- CHANGELOG.rst | 21 +++++++++++++++++++++ CITATION.cff | 2 +- CMakeLists.txt | 2 +- docs/source/conf.py | 4 ++-- include/openPMD/version.hpp | 6 +++--- setup.py | 2 +- test/SerialIOTest.cpp | 2 +- 8 files changed, 31 insertions(+), 10 deletions(-) diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 7b1cb5d9a9..27086ac9cb 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -42,7 +42,7 @@ jobs: python3.exe -m pip wheel . if(!$?) { Exit $LASTEXITCODE } - python3.exe -m pip install openPMD_api-0.15.1-cp39-cp39-win_amd64.whl + python3.exe -m pip install openPMD_api-0.16.0.dev0-cp39-cp39-win_amd64.whl if(!$?) { Exit $LASTEXITCODE } python3.exe -c "import openpmd_api as api; print(api.variants)" diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 8d03c5bb98..869850b130 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -3,6 +3,27 @@ Changelog ========= +0.16.0 +------ +**Date:** TBA + +[Title] + +[Summary] + +Changes to "0.15.0" +^^^^^^^^^^^^^^^^^^^ + +Features +"""""""" + +Bug Fixes +""""""""" + +Other +""""" + + 0.15.1 ------ **Date:** 2023-04-02 diff --git a/CITATION.cff b/CITATION.cff index df49273bf9..5a4216fa91 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -25,7 +25,7 @@ contact: orcid: https://orcid.org/0000-0003-1943-7141 email: axelhuebl@lbl.gov title: "openPMD-api: C++ & Python API for Scientific I/O with openPMD" -version: 0.15.1 +version: 0.16.0-dev repository-code: https://github.com/openPMD/openPMD-api doi: 10.14278/rodare.27 license: LGPL-3.0-or-later diff --git a/CMakeLists.txt b/CMakeLists.txt index 244794c9da..b77a1e2fa3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -2,7 +2,7 @@ # cmake_minimum_required(VERSION 3.15.0) -project(openPMD VERSION 0.15.1) # LANGUAGES CXX +project(openPMD VERSION 0.16.0) # LANGUAGES CXX # the openPMD "markup"/"schema" standard version set(openPMD_STANDARD_VERSION 1.1.0) diff --git a/docs/source/conf.py b/docs/source/conf.py index 2096024ad2..cf94621735 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -85,9 +85,9 @@ # built documents. # # The short X.Y version. -version = u'0.15.1' +version = u'0.16.0' # The full version, including alpha/beta/rc tags. -release = u'0.15.1' +release = u'0.16.0-dev' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/include/openPMD/version.hpp b/include/openPMD/version.hpp index ab82e630c1..c57e3ecf17 100644 --- a/include/openPMD/version.hpp +++ b/include/openPMD/version.hpp @@ -28,9 +28,9 @@ * @{ */ #define OPENPMDAPI_VERSION_MAJOR 0 -#define OPENPMDAPI_VERSION_MINOR 15 -#define OPENPMDAPI_VERSION_PATCH 1 -#define OPENPMDAPI_VERSION_LABEL "" +#define OPENPMDAPI_VERSION_MINOR 16 +#define OPENPMDAPI_VERSION_PATCH 0 +#define OPENPMDAPI_VERSION_LABEL "dev" /** @} */ /** maximum supported version of the openPMD standard (read & write, diff --git a/setup.py b/setup.py index 09cad27c19..9db688042a 100644 --- a/setup.py +++ b/setup.py @@ -170,7 +170,7 @@ def build_extension(self, ext): setup( name='openPMD-api', # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version='0.15.1', + version='0.16.0.dev', author='Axel Huebl, Franz Poeschel, Fabian Koller, Junmin Gu', author_email='axelhuebl@lbl.gov, f.poeschel@hzdr.de', maintainer='Axel Huebl', diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index 78316dd0cf..3d48b01789 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -6031,7 +6031,7 @@ void adios2_bp5_no_steps(bool usesteps) IO.DefineAttribute("/openPMD", std::string("1.1.0")); IO.DefineAttribute("/openPMDextension", uint32_t(0)); IO.DefineAttribute("/software", std::string("openPMD-api")); - IO.DefineAttribute("/softwareVersion", std::string("0.15.1-dev")); + IO.DefineAttribute("/softwareVersion", std::string("0.16.0-dev")); IO.DefineAttribute("/data/0/dt", double(1)); IO.DefineAttribute( From 489038843ed1b2679629594640182af4a6255abe Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 11 Apr 2023 10:57:38 -0700 Subject: [PATCH 71/82] Remove ADIOS1 - Long Live ADIOS2 (#1419) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Remove ADIOS1 - Long Live ADIOS2 Finally removing the deprecated and end-of-life backend ADIOS1. Long live ADIOS2! :rocket: * Fix forward declaration of ADIOS2IOHandlerImpl in Writable.hpp * Remove mentions of ADIOS1 from Serial testing --------- Co-authored-by: Franz Pöschel --- .github/ISSUE_TEMPLATE/bug_report.md | 1 - .github/ci/sanitizer/clang/Leak.supp | 3 - .../spack.yaml | 3 - .../spack.yaml | 3 - .../spack.yaml | 3 - .../spack.yaml | 3 - .../spack.yaml | 3 - .../spack.yaml | 3 - .github/workflows/codeql.yml | 2 +- .github/workflows/linux.yml | 53 +- .github/workflows/macos.yml | 12 +- .github/workflows/tooling.yml | 13 +- CMakeLists.txt | 230 +- Dockerfile | 14 +- NEWS.rst | 11 + README.md | 12 +- docs/source/backends/adios1.rst | 78 +- docs/source/backends/adios2.rst | 4 - docs/source/backends/overview.rst | 2 +- docs/source/details/adios1.json | 7 - docs/source/details/adios1.toml | 2 - docs/source/details/backendconfig.rst | 19 +- docs/source/details/config_layout.json | 1 - docs/source/details/config_layout.toml | 3 - docs/source/details/mpi.rst | 2 +- docs/source/dev/buildoptions.rst | 2 - docs/source/index.rst | 2 +- docs/source/install/install.rst | 6 +- docs/source/usage/firstread.rst | 2 +- docs/source/usage/firstwrite.rst | 2 +- docs/source/usage/workflow.rst | 3 +- examples/8_benchmark_parallel.cpp | 4 +- include/openPMD/Error.hpp | 2 +- include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp | 279 --- .../openPMD/IO/ADIOS/ADIOS1FilePosition.hpp | 36 - include/openPMD/IO/ADIOS/ADIOS1IOHandler.hpp | 81 - .../openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp | 64 - .../IO/ADIOS/CommonADIOS1IOHandler.hpp | 131 -- .../IO/ADIOS/ParallelADIOS1IOHandler.hpp | 69 - .../IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp | 71 - include/openPMD/IO/Format.hpp | 1 - include/openPMD/RecordComponent.hpp | 1 - include/openPMD/ThrowError.hpp | 9 - include/openPMD/backend/Writable.hpp | 4 - include/openPMD/config.hpp.in | 2 +- src/Format.cpp | 24 +- src/IO/ADIOS/ADIOS1IOHandler.cpp | 467 ---- src/IO/ADIOS/CommonADIOS1IOHandler.cpp | 2054 ----------------- src/IO/ADIOS/ParallelADIOS1IOHandler.cpp | 511 ---- src/IO/AbstractIOHandlerHelper.cpp | 37 - src/Iteration.cpp | 14 - src/Series.cpp | 1 - src/auxiliary/JSON.cpp | 2 +- src/config.cpp | 4 +- test/CoreTest.cpp | 33 +- test/ParallelIOTest.cpp | 111 +- test/SerialIOTest.cpp | 53 +- test/python/unittest/API/APITest.py | 96 +- 58 files changed, 130 insertions(+), 4535 deletions(-) rename .github/ci/spack-envs/{clang14_py311_nompi_h5_ad1_ad2 => clang14_py311_nompi_h5_ad2}/spack.yaml (95%) rename .github/ci/spack-envs/{clang7_nopy_ompi_h5_ad1_ad2 => clang7_nopy_ompi_h5_ad2}/spack.yaml (95%) rename .github/ci/spack-envs/{clang7_nopy_ompi_h5_ad1_ad2_bp3_libcpp => clang7_nopy_ompi_h5_ad2_libcpp}/spack.yaml (95%) rename .github/ci/spack-envs/{clang8_py38_mpich_h5_ad1_ad2 => clang8_py38_mpich_h5_ad2}/spack.yaml (95%) rename .github/ci/spack-envs/{clangtidy_nopy_ompi_h5_ad1_ad2 => clangtidy_nopy_ompi_h5_ad2}/spack.yaml (95%) rename .github/ci/spack-envs/{gcc7_py36_ompi_h5_ad1_ad2 => gcc7_py36_ompi_h5_ad2}/spack.yaml (95%) delete mode 100644 docs/source/details/adios1.json delete mode 100644 docs/source/details/adios1.toml delete mode 100644 include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp delete mode 100644 include/openPMD/IO/ADIOS/ADIOS1FilePosition.hpp delete mode 100644 include/openPMD/IO/ADIOS/ADIOS1IOHandler.hpp delete mode 100644 include/openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp delete mode 100644 include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp delete mode 100644 include/openPMD/IO/ADIOS/ParallelADIOS1IOHandler.hpp delete mode 100644 include/openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp delete mode 100644 src/IO/ADIOS/ADIOS1IOHandler.cpp delete mode 100644 src/IO/ADIOS/CommonADIOS1IOHandler.cpp delete mode 100644 src/IO/ADIOS/ParallelADIOS1IOHandler.cpp diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 86f073b184..eaef13f4ff 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -43,7 +43,6 @@ A clear and concise description of what you expected to happen. - machine: [Are you running on a public cluster? It's likely we compute on it as well!] - name and version of Python implementation: [e.g. CPython 3.9] - version of HDF5: [e.g. 1.12.0] - - version of ADIOS1: [e.g. 1.13.1] - version of ADIOS2: [e.g. 2.7.1] - name and version of MPI: [e.g. OpenMPI 4.1.1] diff --git a/.github/ci/sanitizer/clang/Leak.supp b/.github/ci/sanitizer/clang/Leak.supp index 81e8ee7f75..a093615cab 100644 --- a/.github/ci/sanitizer/clang/Leak.supp +++ b/.github/ci/sanitizer/clang/Leak.supp @@ -9,9 +9,6 @@ leak:libevent* leak:*py* # 10 byte memleak in main of "make" 4.1 leak:/usr/bin/make* -# ADIOS 1.13.1 -leak:adios_read_bp_open_file -leak:adios_inq_var # ADIOS2 leak:adios2::core::engine::SstReader::* leak:adios2::core::engine::SstWriter::* diff --git a/.github/ci/spack-envs/clang14_py311_nompi_h5_ad1_ad2/spack.yaml b/.github/ci/spack-envs/clang14_py311_nompi_h5_ad2/spack.yaml similarity index 95% rename from .github/ci/spack-envs/clang14_py311_nompi_h5_ad1_ad2/spack.yaml rename to .github/ci/spack-envs/clang14_py311_nompi_h5_ad2/spack.yaml index 2abc61177a..06e9d1ac40 100644 --- a/.github/ci/spack-envs/clang14_py311_nompi_h5_ad1_ad2/spack.yaml +++ b/.github/ci/spack-envs/clang14_py311_nompi_h5_ad2/spack.yaml @@ -6,15 +6,12 @@ # spack: specs: - - adios - adios2 - hdf5 packages: hdf5: variants: ~mpi - adios: - variants: ~mpi ~zfp ~sz ~lz4 ~blosc adios2: variants: ~mpi ~zfp ~sz ~png ~dataman ~python ~fortran ~ssc ~shared ~bzip2 cmake: diff --git a/.github/ci/spack-envs/clang7_nopy_ompi_h5_ad1_ad2/spack.yaml b/.github/ci/spack-envs/clang7_nopy_ompi_h5_ad2/spack.yaml similarity index 95% rename from .github/ci/spack-envs/clang7_nopy_ompi_h5_ad1_ad2/spack.yaml rename to .github/ci/spack-envs/clang7_nopy_ompi_h5_ad2/spack.yaml index e5a71811e5..fa2e588608 100644 --- a/.github/ci/spack-envs/clang7_nopy_ompi_h5_ad1_ad2/spack.yaml +++ b/.github/ci/spack-envs/clang7_nopy_ompi_h5_ad2/spack.yaml @@ -6,14 +6,11 @@ # spack: specs: - - adios - adios2 - hdf5 - openmpi packages: - adios: - variants: ~zfp ~sz ~lz4 ~blosc adios2: variants: ~zfp ~sz ~png ~dataman ~python ~fortran ~ssc ~shared ~bzip2 cmake: diff --git a/.github/ci/spack-envs/clang7_nopy_ompi_h5_ad1_ad2_bp3_libcpp/spack.yaml b/.github/ci/spack-envs/clang7_nopy_ompi_h5_ad2_libcpp/spack.yaml similarity index 95% rename from .github/ci/spack-envs/clang7_nopy_ompi_h5_ad1_ad2_bp3_libcpp/spack.yaml rename to .github/ci/spack-envs/clang7_nopy_ompi_h5_ad2_libcpp/spack.yaml index 5308c57ce5..8a8dd00024 100644 --- a/.github/ci/spack-envs/clang7_nopy_ompi_h5_ad1_ad2_bp3_libcpp/spack.yaml +++ b/.github/ci/spack-envs/clang7_nopy_ompi_h5_ad2_libcpp/spack.yaml @@ -6,14 +6,11 @@ # spack: specs: - - adios - adios2 - hdf5 - openmpi packages: - adios: - variants: ~zfp ~sz ~lz4 ~blosc adios2: variants: ~zfp ~sz ~png ~dataman ~python ~fortran ~ssc ~shared ~bzip2 cmake: diff --git a/.github/ci/spack-envs/clang8_py38_mpich_h5_ad1_ad2/spack.yaml b/.github/ci/spack-envs/clang8_py38_mpich_h5_ad2/spack.yaml similarity index 95% rename from .github/ci/spack-envs/clang8_py38_mpich_h5_ad1_ad2/spack.yaml rename to .github/ci/spack-envs/clang8_py38_mpich_h5_ad2/spack.yaml index 4a6ab7f74e..a6d568611f 100644 --- a/.github/ci/spack-envs/clang8_py38_mpich_h5_ad1_ad2/spack.yaml +++ b/.github/ci/spack-envs/clang8_py38_mpich_h5_ad2/spack.yaml @@ -6,14 +6,11 @@ # spack: specs: - - adios - adios2@2.7.1 - hdf5 - mpich packages: - adios: - variants: ~zfp ~sz ~lz4 ~blosc adios2: variants: ~zfp ~sz ~png ~dataman ~python ~fortran ~ssc ~shared ~bzip2 cmake: diff --git a/.github/ci/spack-envs/clangtidy_nopy_ompi_h5_ad1_ad2/spack.yaml b/.github/ci/spack-envs/clangtidy_nopy_ompi_h5_ad2/spack.yaml similarity index 95% rename from .github/ci/spack-envs/clangtidy_nopy_ompi_h5_ad1_ad2/spack.yaml rename to .github/ci/spack-envs/clangtidy_nopy_ompi_h5_ad2/spack.yaml index 98acde6e62..1543ec794c 100644 --- a/.github/ci/spack-envs/clangtidy_nopy_ompi_h5_ad1_ad2/spack.yaml +++ b/.github/ci/spack-envs/clangtidy_nopy_ompi_h5_ad2/spack.yaml @@ -6,14 +6,11 @@ # spack: specs: - - adios - adios2 - hdf5 - openmpi packages: - adios: - variants: ~zfp ~sz ~lz4 ~blosc adios2: variants: ~zfp ~sz ~png ~dataman ~python ~fortran ~ssc ~shared ~bzip2 cmake: diff --git a/.github/ci/spack-envs/gcc7_py36_ompi_h5_ad1_ad2/spack.yaml b/.github/ci/spack-envs/gcc7_py36_ompi_h5_ad2/spack.yaml similarity index 95% rename from .github/ci/spack-envs/gcc7_py36_ompi_h5_ad1_ad2/spack.yaml rename to .github/ci/spack-envs/gcc7_py36_ompi_h5_ad2/spack.yaml index f0b051678e..8785650cf2 100644 --- a/.github/ci/spack-envs/gcc7_py36_ompi_h5_ad1_ad2/spack.yaml +++ b/.github/ci/spack-envs/gcc7_py36_ompi_h5_ad2/spack.yaml @@ -6,14 +6,11 @@ # spack: specs: - - adios - adios2 - hdf5 - openmpi packages: - adios: - variants: ~zfp ~sz ~lz4 ~blosc adios2: variants: ~zfp ~sz ~png ~dataman ~python ~fortran ~ssc ~shared ~bzip2 cmake: diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 1925663362..02d72d06e9 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -33,7 +33,7 @@ jobs: - name: Install Packages run: | sudo apt-get update - sudo apt-get install --yes cmake openmpi-bin libopenmpi-dev libhdf5-openmpi-dev libadios-openmpi-dev + sudo apt-get install --yes cmake openmpi-bin libopenmpi-dev libhdf5-openmpi-dev python -m pip install --upgrade pip python -m pip install --upgrade wheel diff --git a/.github/workflows/linux.yml b/.github/workflows/linux.yml index 17226a99d6..91f089e03a 100644 --- a/.github/workflows/linux.yml +++ b/.github/workflows/linux.yml @@ -39,14 +39,14 @@ jobs: cmake --build build --parallel 2 ctest --test-dir build --output-on-failure - clang7_nopy_ompi_h5_ad1_ad2_bp3_libcpp: + clang7_nopy_ompi_h5_ad2_libcpp: runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false steps: - uses: actions/checkout@v3 - name: Spack Cache uses: actions/cache@v3 - with: {path: /opt/spack, key: clang7_nopy_ompi_h5_ad1_ad2_bp3_libcpp_v2} + with: {path: /opt/spack, key: clang7_nopy_ompi_h5_ad2_libcpp_v2} - name: Install run: | sudo apt-get update @@ -62,7 +62,7 @@ jobs: mpiexec --version perl --version python --version - eval $(spack env activate --sh .github/ci/spack-envs/clang7_nopy_ompi_h5_ad1_ad2_bp3_libcpp/) + eval $(spack env activate --sh .github/ci/spack-envs/clang7_nopy_ompi_h5_ad2_libcpp/) spack install share/openPMD/download_samples.sh build @@ -70,7 +70,6 @@ jobs: -DopenPMD_USE_PYTHON=OFF \ -DopenPMD_USE_MPI=ON \ -DopenPMD_USE_HDF5=ON \ - -DopenPMD_USE_ADIOS1=ON \ -DopenPMD_USE_ADIOS2=ON \ -DopenPMD_USE_INVASIVE_TESTS=ON \ -DCMAKE_VERBOSE_MAKEFILE=ON @@ -79,7 +78,6 @@ jobs: find . -name *.bp | xargs -n1 -P1 -I {} rm -rf {} find . -name *.bp.dir | xargs -n1 -P1 -I {} rm -rf {} - export OPENPMD_BP_BACKEND=ADIOS1 ctest --test-dir build --output-on-failure clang7_nopy_ompi_h5_ad2_newLayout: @@ -98,7 +96,7 @@ jobs: - name: Build env: {CC: clang-7, CXX: clang++-7, CXXFLAGS: -Werror, OPENPMD2_ADIOS2_SCHEMA: 20210209} run: | - eval $(spack env activate --sh .github/ci/spack-envs/clang7_nopy_ompi_h5_ad1_ad2/) + eval $(spack env activate --sh .github/ci/spack-envs/clang7_nopy_ompi_h5_ad2/) spack install share/openPMD/download_samples.sh build @@ -106,7 +104,6 @@ jobs: -DopenPMD_USE_PYTHON=OFF \ -DopenPMD_USE_MPI=ON \ -DopenPMD_USE_HDF5=ON \ - -DopenPMD_USE_ADIOS1=OFF \ -DopenPMD_USE_ADIOS2=ON \ -DopenPMD_USE_INVASIVE_TESTS=ON \ -DCMAKE_VERBOSE_MAKEFILE=ON @@ -114,16 +111,16 @@ jobs: ctest --test-dir build --output-on-failure # TODO -# clang7_py36_nompi_h5_ad1_ad2_libstdc++ +# clang7_py36_nompi_h5_ad2_libstdc++ - clang14_py311_nompi_h5_ad1_ad2: + clang14_py311_nompi_h5_ad2: runs-on: ubuntu-22.04 if: github.event.pull_request.draft == false steps: - uses: actions/checkout@v3 - name: Spack Cache uses: actions/cache@v3 - with: {path: /opt/spack, key: clang14_py311_nompi_h5_ad1_ad2_v2 } + with: {path: /opt/spack, key: clang14_py311_nompi_h5_ad2_v2 } - name: Install run: | sudo apt update @@ -135,7 +132,7 @@ jobs: - name: Build env: {CC: clang-14, CXX: clang++-14, CXXFLAGS: -Werror} run: | - eval $(spack env activate --sh .github/ci/spack-envs/clang14_py311_nompi_h5_ad1_ad2/) + eval $(spack env activate --sh .github/ci/spack-envs/clang14_py311_nompi_h5_ad2/) spack install share/openPMD/download_samples.sh build @@ -143,7 +140,6 @@ jobs: -DopenPMD_USE_PYTHON=ON \ -DopenPMD_USE_MPI=OFF \ -DopenPMD_USE_HDF5=ON \ - -DopenPMD_USE_ADIOS1=ON \ -DopenPMD_USE_ADIOS2=ON \ -DopenPMD_USE_INVASIVE_TESTS=ON \ -DCMAKE_VERBOSE_MAKEFILE=ON \ @@ -152,14 +148,14 @@ jobs: ctest --test-dir build --output-on-failure # ADIOS2 v2.7.1 - clang8_py38_mpich_h5_ad1_ad2_newLayout: + clang8_py38_mpich_h5_ad2_newLayout: runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false steps: - uses: actions/checkout@v3 - name: Spack Cache uses: actions/cache@v3 - with: {path: /opt/spack, key: clang8_py38_mpich_h5_ad1_ad2_newLayout_v2 } + with: {path: /opt/spack, key: clang8_py38_mpich_h5_ad2_newLayout_v2 } - name: Install run: | sudo apt-get update @@ -172,7 +168,7 @@ jobs: mpiexec --version perl --version python --version - eval $(spack env activate --sh .github/ci/spack-envs/clang8_py38_mpich_h5_ad1_ad2/) + eval $(spack env activate --sh .github/ci/spack-envs/clang8_py38_mpich_h5_ad2/) spack install share/openPMD/download_samples.sh build @@ -180,35 +176,34 @@ jobs: -DopenPMD_USE_PYTHON=OFF \ -DopenPMD_USE_MPI=ON \ -DopenPMD_USE_HDF5=ON \ - -DopenPMD_USE_ADIOS1=ON \ -DopenPMD_USE_ADIOS2=ON \ -DopenPMD_USE_INVASIVE_TESTS=ON cmake --build build --parallel 2 ctest --test-dir build --output-on-failure # TODO: (old Travis-CI coverage) -# clang10_py38_ompi_h5_1-10-6_ad1_ad2_release +# clang10_py38_ompi_h5_1-10-6_ad2_release # ..._h5coll with OPENPMD_HDF5_INDEPENDENT: OFF # TODO: (old Travis-CI coverage) -# gcc-4.9.4_nopy_nompi_h5_ad1_ad2 -# gcc-4.9.4_nopy_ompi_h5_ad1_ad2 -# gcc-9.3.0_nopy_nompi_h5_ad1_ad2-2.6.0 -# gcc-7.4.0_nopy_ompi_h5_ad1_h5coll -# gcc-6.5.0_py35_nompi_h5_ad1-1.13.1 -# gcc-8.1.0_py37_nompi_h5_ad1_static +# gcc-4.9.4_nopy_nompi_h5_ad2 +# gcc-4.9.4_nopy_ompi_h5_ad2 +# gcc-9.3.0_nopy_nompi_h5_ad2-2.6.0 +# gcc-7.4.0_nopy_ompi_h5_h5coll +# gcc-6.5.0_py35_nompi_h5-1.13.1 +# gcc-8.1.0_py37_nompi_h5_static # gcc-6.5.0_py36_nompi_h5-1.8.13 # gcc-4.8.5_py35_nompi_h5 -# gcc-7.4.0_py_ompi_h5_ad1_ad2_coveralls +# gcc-7.4.0_py_ompi_h5_ad2_coveralls - gcc7_py36_pd_dd_ompi_h5_ad1_ad2: + gcc7_py36_pd_dd_ompi_h5_ad2: runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false steps: - uses: actions/checkout@v3 - name: Spack Cache uses: actions/cache@v3 - with: {path: /opt/spack, key: gcc7_py36_ompi_h5_ad1_ad2_v2 } + with: {path: /opt/spack, key: gcc7_py36_ompi_h5_ad2_v2 } - name: Install run: | sudo apt-get update @@ -223,7 +218,7 @@ jobs: - name: Build env: {CC: gcc-7, CXX: g++-7, CXXFLAGS: -Werror} run: | - eval $(spack env activate --sh .github/ci/spack-envs/gcc7_py36_ompi_h5_ad1_ad2/) + eval $(spack env activate --sh .github/ci/spack-envs/gcc7_py36_ompi_h5_ad2/) spack install share/openPMD/download_samples.sh build @@ -231,7 +226,6 @@ jobs: -DopenPMD_USE_PYTHON=ON \ -DopenPMD_USE_MPI=ON \ -DopenPMD_USE_HDF5=ON \ - -DopenPMD_USE_ADIOS1=ON \ -DopenPMD_USE_ADIOS2=ON \ -DopenPMD_USE_INVASIVE_TESTS=ON cmake --build build --parallel 2 @@ -245,7 +239,7 @@ jobs: - name: Install run: | sudo apt-get update - sudo apt-get install g++ libopenmpi-dev libhdf5-openmpi-dev libadios-dev python3 python3-numpy python3-mpi4py python3-pandas + sudo apt-get install g++ libopenmpi-dev libhdf5-openmpi-dev python3 python3-numpy python3-mpi4py python3-pandas # TODO ADIOS2 - name: Build env: {CXXFLAGS: -Werror, PKG_CONFIG_PATH: /usr/lib/x86_64-linux-gnu/pkgconfig} @@ -313,7 +307,6 @@ jobs: -DopenPMD_USE_PYTHON=ON \ -DopenPMD_USE_MPI=ON \ -DopenPMD_USE_HDF5=ON \ - -DopenPMD_USE_ADIOS1=ON \ -DopenPMD_USE_ADIOS2=ON \ -DopenPMD_USE_INVASIVE_TESTS=ON cmake --build build --parallel 2 diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 04fbbbe30e..2295770ce2 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -8,7 +8,7 @@ concurrency: jobs: # TODO: (old Travis-CI coverage) -# appleclang9_py37_nompi_h5_ad1 +# appleclang9_py37_nompi_h5 # appleclang10_py37_h5_ad2_libcpp # appleclang11_nopy_nompi_h5_ad2 @@ -43,7 +43,7 @@ jobs: cmake --build build --parallel 3 ctest --test-dir build --verbose - appleclang12_py_ad1: + appleclang12_py: runs-on: macos-10.15 # next: macOS-11 if: github.event.pull_request.draft == false @@ -54,13 +54,6 @@ jobs: run: | set +e python3 -m pip install -U numpy pandas - - curl -Lo adios-1.13.1.tar.gz http://users.nccs.gov/~pnorbert/adios-1.13.1.tar.gz - tar -xzf adios-1.13.1.tar.gz - cd adios-1.13.1/ - CFLAGS="-fPIC" ./configure --enable-static --enable-shared --prefix=/usr/local --without-mpi --disable-fortran - make -j 3 - make install set -e - name: Build env: {CXXFLAGS: -Werror -DTOML11_DISABLE_STD_FILESYSTEM, MACOSX_DEPLOYMENT_TARGET: 10.14} @@ -73,7 +66,6 @@ jobs: -DopenPMD_USE_PYTHON=ON \ -DopenPMD_USE_MPI=OFF \ -DopenPMD_USE_HDF5=OFF \ - -DopenPMD_USE_ADIOS1=ON \ -DopenPMD_USE_ADIOS2=OFF \ -DopenPMD_USE_INVASIVE_TESTS=ON cmake --build build --parallel 3 diff --git a/.github/workflows/tooling.yml b/.github/workflows/tooling.yml index 9b46734578..96a1e5f8fd 100644 --- a/.github/workflows/tooling.yml +++ b/.github/workflows/tooling.yml @@ -7,7 +7,7 @@ concurrency: cancel-in-progress: true jobs: - clangtidy10_nopy_ompi_h5_ad1_ad2: + clangtidy10_nopy_ompi_h5_ad2: name: clang-tidy w/o py runs-on: ubuntu-20.04 if: github.event.pull_request.draft == false @@ -15,7 +15,7 @@ jobs: - uses: actions/checkout@v3 - name: Spack Cache uses: actions/cache@v3 - with: {path: /opt/spack, key: clangtidy10_nopy_ompi_h5_ad1_ad2 } + with: {path: /opt/spack, key: clangtidy10_nopy_ompi_h5_ad2 } - name: Install run: | sudo apt-get update @@ -24,7 +24,7 @@ jobs: - name: Build env: {CC: clang, CXX: clang++} run: | - eval $(spack env activate --sh .github/ci/spack-envs/clangtidy_nopy_ompi_h5_ad1_ad2/) + eval $(spack env activate --sh .github/ci/spack-envs/clangtidy_nopy_ompi_h5_ad2/) spack install share/openPMD/download_samples.sh build @@ -35,7 +35,7 @@ jobs: cat build/clang-tidy.log if [[ $(wc -m - $) - target_include_directories(openPMD.ADIOS1.Parallel SYSTEM PRIVATE - $ - $) - - set_target_properties(openPMD.ADIOS1.Serial PROPERTIES - COMPILE_PDB_NAME openPMD.ADIOS1.Serial - ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} - LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} - RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} - PDB_OUTPUT_DIRECTORY ${openPMD_PDB_OUTPUT_DIRECTORY} - COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY} - - POSITION_INDEPENDENT_CODE ON - CXX_VISIBILITY_PRESET hidden - VISIBILITY_INLINES_HIDDEN ON - ) - # note: same as above, but for Multi-Config generators - if(isMultiConfig) - foreach(CFG IN LISTS CMAKE_CONFIGURATION_TYPES) - string(TOUPPER "${CFG}" CFG_UPPER) - set_target_properties(openPMD.ADIOS1.Serial PROPERTIES - COMPILE_PDB_NAME_${CFG_UPPER} openPMD.ADIOS1.Serial - ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_ARCHIVE_OUTPUT_DIRECTORY}/${CFG} - LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_LIBRARY_OUTPUT_DIRECTORY}/${CFG} - RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} - PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PDB_OUTPUT_DIRECTORY}/${CFG} - COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY}/${CFG} - ) - endforeach() - endif() - if("${CMAKE_SYSTEM_NAME}" MATCHES "Linux") - set_target_properties(openPMD.ADIOS1.Serial PROPERTIES - LINK_FLAGS "-Wl,--exclude-libs,ALL") - elseif("${CMAKE_SYSTEM_NAME}" MATCHES "Darwin") - set_target_properties(openPMD.ADIOS1.Serial PROPERTIES - XCODE_ATTRIBUTE_STRIP_STYLE "non-global" - XCODE_ATTRIBUTE_DEPLOYMENT_POSTPROCESSING "YES" - XCODE_ATTRIBUTE_SEPARATE_STRIP "YES" - ) - endif() - foreach(adlib ${ADIOS_LIBRARIES_SEQUENTIAL}) - target_link_libraries(openPMD.ADIOS1.Serial PRIVATE ${adlib}) - endforeach() - target_include_directories(openPMD.ADIOS1.Serial SYSTEM PRIVATE ${ADIOS_INCLUDE_DIRS_SEQUENTIAL}) - target_compile_definitions(openPMD.ADIOS1.Serial PRIVATE "${ADIOS_DEFINITIONS_SEQUENTIAL}") - target_compile_definitions(openPMD.ADIOS1.Serial PRIVATE openPMD_HAVE_ADIOS1=1) - target_compile_definitions(openPMD.ADIOS1.Serial PRIVATE openPMD_HAVE_MPI=0) - target_compile_definitions(openPMD.ADIOS1.Serial PRIVATE _NOMPI) # ADIOS header - - if(openPMD_HAVE_MPI) - set_target_properties(openPMD.ADIOS1.Parallel PROPERTIES - COMPILE_PDB_NAME openPMD.ADIOS1.Parallel - ARCHIVE_OUTPUT_DIRECTORY ${openPMD_ARCHIVE_OUTPUT_DIRECTORY} - LIBRARY_OUTPUT_DIRECTORY ${openPMD_LIBRARY_OUTPUT_DIRECTORY} - RUNTIME_OUTPUT_DIRECTORY ${openPMD_RUNTIME_OUTPUT_DIRECTORY} - PDB_OUTPUT_DIRECTORY ${openPMD_PDB_OUTPUT_DIRECTORY} - COMPILE_PDB_OUTPUT_DIRECTORY ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY} - - POSITION_INDEPENDENT_CODE ON - CXX_VISIBILITY_PRESET hidden - VISIBILITY_INLINES_HIDDEN 1 - ) - # note: same as above, but for Multi-Config generators - if(isMultiConfig) - foreach(CFG IN LISTS CMAKE_CONFIGURATION_TYPES) - string(TOUPPER "${CFG}" CFG_UPPER) - set_target_properties(openPMD.ADIOS1.Parallel PROPERTIES - COMPILE_PDB_NAME_${CFG_UPPER} opemPMD.ADIOS1.Parallel - ARCHIVE_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_ARCHIVE_OUTPUT_DIRECTORY}/${CFG} - LIBRARY_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_LIBRARY_OUTPUT_DIRECTORY}/${CFG} - RUNTIME_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_RUNTIME_OUTPUT_DIRECTORY}/${CFG} - PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_PDB_OUTPUT_DIRECTORY}/${CFG} - COMPILE_PDB_OUTPUT_DIRECTORY_${CFG_UPPER} ${openPMD_COMPILE_PDB_OUTPUT_DIRECTORY}/${CFG} - ) - endforeach() - endif() - if("${CMAKE_SYSTEM_NAME}" MATCHES "Linux") - set_target_properties(openPMD.ADIOS1.Parallel PROPERTIES - LINK_FLAGS "-Wl,--exclude-libs,ALL") - elseif("${CMAKE_SYSTEM_NAME}" MATCHES "Darwin") - set_target_properties(openPMD.ADIOS1.Parallel PROPERTIES - XCODE_ATTRIBUTE_STRIP_STYLE "non-global" - XCODE_ATTRIBUTE_DEPLOYMENT_POSTPROCESSING "YES" - XCODE_ATTRIBUTE_SEPARATE_STRIP "YES" - ) - endif() - foreach(adlib ${ADIOS_LIBRARIES}) - target_link_libraries(openPMD.ADIOS1.Parallel PRIVATE ${adlib}) - endforeach() - target_link_libraries(openPMD.ADIOS1.Parallel PUBLIC ${openPMD_MPI_TARGETS}) - - target_include_directories(openPMD.ADIOS1.Parallel SYSTEM PRIVATE ${ADIOS_INCLUDE_DIRS}) - target_compile_definitions(openPMD.ADIOS1.Parallel PRIVATE "${ADIOS_DEFINITIONS}") - target_compile_definitions(openPMD.ADIOS1.Parallel PRIVATE openPMD_HAVE_ADIOS1=1) - target_compile_definitions(openPMD.ADIOS1.Parallel PRIVATE openPMD_HAVE_MPI=1) - else() - target_compile_definitions(openPMD.ADIOS1.Parallel PRIVATE openPMD_HAVE_ADIOS1=0) - target_compile_definitions(openPMD.ADIOS1.Parallel PRIVATE openPMD_HAVE_MPI=0) - target_compile_definitions(openPMD.ADIOS1.Parallel PRIVATE _NOMPI) # ADIOS header - endif() - # This ensures that the ADIOS1 targets don't ever include Error.hpp - # To avoid incompatible error types in weird compile configurations - target_compile_definitions(openPMD.ADIOS1.Parallel PRIVATE OPENPMD_ADIOS1_IMPLEMENTATION) - - # Runtime parameter and API status checks ("asserts") - if(openPMD_USE_VERIFY) - target_compile_definitions(openPMD.ADIOS1.Serial PRIVATE openPMD_USE_VERIFY=1) - target_compile_definitions(openPMD.ADIOS1.Parallel PRIVATE openPMD_USE_VERIFY=1) - else() - target_compile_definitions(openPMD.ADIOS1.Serial PRIVATE openPMD_USE_VERIFY=0) - target_compile_definitions(openPMD.ADIOS1.Parallel PRIVATE openPMD_USE_VERIFY=0) - endif() - - target_link_libraries(openPMD PUBLIC openPMD.ADIOS1.Serial) - target_link_libraries(openPMD PUBLIC openPMD.ADIOS1.Parallel) -else() - # add stubs to prevent missing symbols in Clang ASAN/UBSAN - target_sources(openPMD PRIVATE ${IO_ADIOS1_SEQUENTIAL_SOURCE} ${IO_ADIOS1_SOURCE}) -endif() - # ADIOS2 Backend if(openPMD_HAVE_ADIOS2) if(openPMD_HAVE_MPI) @@ -1265,11 +1047,6 @@ write_basic_package_version_file("openPMDConfigVersion.cmake" if(openPMD_INSTALL) set(openPMD_INSTALL_TARGET_NAMES openPMD) - if(openPMD_HAVE_ADIOS1) - list(APPEND openPMD_INSTALL_TARGET_NAMES - openPMD.ADIOS1.Serial openPMD.ADIOS1.Parallel) - endif() - if(openPMD_BUILD_CLI_TOOLS) foreach(toolname ${openPMD_CLI_TOOL_NAMES}) list(APPEND openPMD_INSTALL_TARGET_NAMES openpmd-${toolname}) @@ -1354,11 +1131,6 @@ if(openPMD_INSTALL) ${openPMD_BINARY_DIR}/openPMDConfigVersion.cmake DESTINATION ${openPMD_INSTALL_CMAKEDIR} ) - install( - FILES - ${openPMD_SOURCE_DIR}/share/openPMD/cmake/FindADIOS.cmake - DESTINATION ${openPMD_INSTALL_CMAKEDIR}/Modules - ) # pkg-config .pc file for depending legacy projects # This is for projects that do not use a build file generator, e.g. # because they compile manually on the command line or write their @@ -1565,7 +1337,7 @@ if(openPMD_BUILD_TESTING) # openpmd-pipe (python) test if( NOT WIN32 AND openPMD_HAVE_HDF5 - AND (openPMD_HAVE_ADIOS2 OR openPMD_HAVE_ADIOS1) + AND openPMD_HAVE_ADIOS2 AND EXAMPLE_DATA_FOUND ) if( openPMD_HAVE_MPI ) diff --git a/Dockerfile b/Dockerfile index a7830ecbb7..54e9050987 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,7 +13,7 @@ ENV CFLAGS="-fPIC ${CFLAGS}" ENV CXXFLAGS="-fPIC ${CXXFLAGS}" # install dependencies -# CMake, zlib?, HDF5, c-blosc, ADIOS1, ADIOS2 +# CMake, zlib?, HDF5, c-blosc, ADIOS2 RUN yum check-update -y \ ; yum -y install \ glibc-static \ @@ -39,7 +39,7 @@ RUN curl -sLo hdf5-1.10.5.tar.gz https://support.hdfgroup.org/ftp/HDF5/re && make \ && make install -# avoid picking up a static libpthread in adios (also: those libs lack -fPIC) +# avoid picking up a static libpthread (also: those libs lack -fPIC) RUN rm -f /usr/lib64/libpthread.a /usr/lib64/libm.a /usr/lib64/librt.a RUN rm -f /usr/lib/libpthread.a /usr/lib/libm.a /usr/lib/librt.a @@ -56,15 +56,6 @@ RUN curl -sLo c-blosc-1.15.0.tar.gz https://github.com/Blosc/c-blosc/arch && make \ && make install -RUN curl -sLo adios-1.13.1.tar.gz http://users.nccs.gov/~pnorbert/adios-1.13.1.tar.gz \ - && file adios*.tar.gz \ - && tar -xzf adios*.tar.gz \ - && rm adios*.tar.gz \ - && cd adios-* \ - && ./configure --enable-static --disable-shared --disable-fortran --without-mpi --prefix=/usr --with-blosc=/usr \ - && make \ - && make install - RUN curl -sLo adios2-2.7.1.tar.gz https://github.com/ornladios/ADIOS2/archive/v2.7.1.tar.gz \ && file adios2*.tar.gz \ && tar -xzf adios2*.tar.gz \ @@ -84,7 +75,6 @@ RUN ls /opt/python/ ENV HDF5_USE_STATIC_LIBRARIES=ON \ ZLIB_USE_STATIC_LIBS=ON \ - ADIOS_USE_STATIC_LIBS=ON \ openPMD_BUILD_TESTING=OFF \ openPMD_BUILD_EXAMPLES=OFF diff --git a/NEWS.rst b/NEWS.rst index 40246be635..86ea34fff4 100644 --- a/NEWS.rst +++ b/NEWS.rst @@ -3,6 +3,17 @@ Upgrade Guide ============= +0.16.0 +------ + +The ADIOS1 library is no longer developed in favor of ADIOS2. +Consequently, ADIOS1 support was removed in openPMD-api 0.16.0 and newer. +Please transition to ADIOS2. + +For reading legacy ADIOS1 BP3 files, either use an older version of openPMD-api or the BP3 backend in ADIOS2. +Note that ADIOS2 does not support compression in BP3 files. + + 0.15.0 ------ diff --git a/README.md b/README.md index b422379d44..7d596ad94e 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ openPMD is an open meta-data schema that provides meaning and self-description f See [the openPMD standard](https://github.com/openPMD/openPMD-standard) for details of this schema. This library provides a reference API for openPMD data handling. -Since openPMD is a schema (or markup) on top of portable, hierarchical file formats, this library implements various backends such as HDF5, ADIOS1, ADIOS2 and JSON. +Since openPMD is a schema (or markup) on top of portable, hierarchical file formats, this library implements various backends such as HDF5, ADIOS2 and JSON. Writing & reading through those backends and their associated files are supported for serial and [MPI-parallel](https://www.mpi-forum.org/docs/) workflows. ## Usage @@ -108,7 +108,6 @@ Shipped internally in `share/openPMD/thirdParty/`: I/O backends: * [JSON](https://en.wikipedia.org/wiki/JSON) * [HDF5](https://support.hdfgroup.org/HDF5) 1.8.13+ (optional) -* [ADIOS1](https://www.olcf.ornl.gov/center-projects/adios) 1.13.1+ (optional, deprecated) * [ADIOS2](https://github.com/ornladios/ADIOS2) 2.7.0+ (optional) while those can be built either with or without: @@ -144,7 +143,7 @@ Choose *one* of the install methods below to get started: [![Spack Use Case](https://img.shields.io/badge/use_case-desktop_%28C%2B%2B,_py%29,_development,_HPC-brightgreen)](https://spack.readthedocs.io/en/latest/package_list.html#openpmd-api) ```bash -# optional: +python +adios1 -adios2 -hdf5 -mpi +# optional: +python -adios2 -hdf5 -mpi spack install openpmd-api spack load openpmd-api ``` @@ -182,7 +181,7 @@ brew install openpmd-api [![PyPI Format](https://img.shields.io/pypi/format/openPMD-api)](https://pypi.org/project/openPMD-api) [![PyPI Downloads](https://img.shields.io/pypi/dm/openPMD-api)](https://pypi.org/project/openPMD-api) -On very old macOS versions (<10.9) or on exotic processor architectures, this install method *compiles from source* against the found installations of HDF5, ADIOS1, ADIOS2, and/or MPI (in system paths, from other package managers, or loaded via a module system, ...). +On very old macOS versions (<10.9) or on exotic processor architectures, this install method *compiles from source* against the found installations of HDF5, ADIOS2, and/or MPI (in system paths, from other package managers, or loaded via a module system, ...). ```bash # we need pip 19 or newer @@ -251,7 +250,6 @@ CMake controls options with prefixed `-D`, e.g. `-DopenPMD_USE_MPI=OFF`: |------------------------------|------------------|------------------------------------------------------------------------------| | `openPMD_USE_MPI` | **AUTO**/ON/OFF | Parallel, Multi-Node I/O for clusters | | `openPMD_USE_HDF5` | **AUTO**/ON/OFF | HDF5 backend (`.h5` files) | -| `openPMD_USE_ADIOS1` | AUTO/ON/**OFF** | ADIOS1 backend (`.bp` files up to version BP3) - deprecated | | `openPMD_USE_ADIOS2` | **AUTO**/ON/OFF | ADIOS2 backend (`.bp` files in BP3, BP4 or higher) | | `openPMD_USE_PYTHON` | **AUTO**/ON/OFF | Enable Python bindings | | `openPMD_USE_INVASIVE_TESTS` | ON/**OFF** | Enable unit tests that modify source code 1 | @@ -307,7 +305,7 @@ export CMAKE_PREFIX_PATH=$HOME/somepath:$CMAKE_PREFIX_PATH Use the following lines in your project's `CMakeLists.txt`: ```cmake -# supports: COMPONENTS MPI NOMPI HDF5 ADIOS1 ADIOS2 +# supports: COMPONENTS MPI NOMPI HDF5 ADIOS2 find_package(openPMD 0.9.0 CONFIG) if(openPMD_FOUND) @@ -435,7 +433,7 @@ This work was partially funded by the Center of Advanced Systems Understanding ( openPMD-api stands on the shoulders of giants and we are grateful for the following projects included as direct dependencies: -* [ADIOS1](https://github.com/ornladios/ADIOS) and [ADIOS2](https://github.com/ornladios/ADIOS2) by [S. Klasky (ORNL), team, collaborators](https://csmd.ornl.gov/adios) and [contributors](https://github.com/ornladios/ADIOS2/graphs/contributors) +* [ADIOS2](https://github.com/ornladios/ADIOS2) by [S. Klasky, N. Podhorszki, W.F. Godoy (ORNL), team, collaborators](https://csmd.ornl.gov/adios) and [contributors](https://github.com/ornladios/ADIOS2/graphs/contributors) * [Catch2](https://github.com/catchorg/Catch2) by [Phil Nash](https://github.com/philsquared), [Martin Hořeňovský](https://github.com/horenmar) and [contributors](https://github.com/catchorg/Catch2/graphs/contributors) * HDF5 by [the HDF group](https://www.hdfgroup.org) and community * [json](https://github.com/nlohmann/json) by [Niels Lohmann](https://github.com/nlohmann) and [contributors](https://github.com/nlohmann/json/graphs/contributors) diff --git a/docs/source/backends/adios1.rst b/docs/source/backends/adios1.rst index 11680d7e49..4be380b4ff 100644 --- a/docs/source/backends/adios1.rst +++ b/docs/source/backends/adios1.rst @@ -3,80 +3,12 @@ ADIOS1 ====== -openPMD supports writing to and reading from ADIOS1 ``.bp`` files. -For this, the installed copy of openPMD must have been built with support for the ADIOS1 backend. -To build openPMD with support for ADIOS, use the CMake option ``-DopenPMD_USE_ADIOS1=ON``. -For further information, check out the :ref:`installation guide `, -:ref:`build dependencies ` and the :ref:`build options `. +The ADIOS1 library is no longer developed in favor of ADIOS2. +Consequently, ADIOS1 support was removed in openPMD-api 0.16.0 and newer. +Please transition to ADIOS2. -.. note:: - - This backend is deprecated, please use ADIOS2 instead. - - -I/O Method ----------- - -ADIOS1 has several staging methods for alternative file formats, yet natively writes to ``.bp`` files. -We currently implement the ``MPI_AGGREGATE`` transport method for MPI-parallel write (``POSIX`` for serial write) and ``ADIOS_READ_METHOD_BP`` for read. - - -Backend-Specific Controls -------------------------- - -The following environment variables control ADIOS1 I/O behavior at runtime. -Fine-tuning these is especially useful when running at large scale. - -============================================== ========== ================================================================================ -environment variable default description -============================================== ========== ================================================================================ -``OPENPMD_ADIOS_NUM_AGGREGATORS`` ``1`` Number of I/O aggregator nodes for ADIOS1 ``MPI_AGGREGATE`` transport method. -``OPENPMD_ADIOS_NUM_OST`` ``0`` Number of I/O OSTs for ADIOS1 ``MPI_AGGREGATE`` transport method. -``OPENPMD_ADIOS_HAVE_METADATA_FILE`` ``1`` Online creation of the adios journal file (``1``: yes, ``0``: no). -``OPENPMD_BP_BACKEND`` ``ADIOS2`` Chose preferred ``.bp`` file backend if ``ADIOS1`` and ``ADIOS2`` are available. -``OPENPMD_ADIOS_SUPPRESS_DEPRECATED_WARNING`` ``0`` Set to ``1`` to suppress ADIOS1 deprecation warnings. -============================================== ========== ================================================================================ - -Please refer to the `ADIOS1 manual, section 6.1.5 `_ for details on I/O tuning. - -In case both the ADIOS1 backend and the :ref:`ADIOS2 backend ` are enabled, set ``OPENPMD_BP_BACKEND`` to ``ADIOS1`` to enforce using ADIOS1. -If only the ADIOS1 backend was compiled but not the :ref:`ADIOS2 backend `, the default of ``OPENPMD_BP_BACKEND`` is automatically switched to ``ADIOS1``. -Be advised that ADIOS1 only supports ``.bp`` files up to the internal version BP3, while ADIOS2 supports BP3, BP4 and later formats. - - -Best Practice at Large Scale ----------------------------- - -A good practice at scale is to disable the online creation of the metadata file. -After writing the data, run ``bpmeta`` on the (to-be-created) filename to generate the metadata file offline (repeat per iteration for file-based encoding). -This metadata file is needed for reading, while the actual heavy data resides in ``.dir/`` directories. - -Further options depend heavily on filesystem type, specific file striping, network infrastructure and available RAM on the aggregator nodes. -If your filesystem exposes explicit object-storage-targets (OSTs), such as Lustre, try to set the number of OSTs to the maximum number available and allowed per job (e.g. non-full), assuming the number of writing MPI ranks is larger. -A good number for aggregators is usually the number of contributing nodes divided by four. - -For fine-tuning at extreme scale or for exotic systems, please refer to the ADIOS1 manual and talk to your filesystem admins and the ADIOS1 authors. -Be aware that extreme-scale I/O is a research topic after all. - - -Limitations ------------ - -.. note:: - - You cannot initialize and use more than one ``openPMD::Series`` with ADIOS1 backend at the same time in a process, even if both Series operate on totally independent data. - This is an upstream bug in ADIOS1 that we cannot control: ADIOS1 cannot be initialized more than once, probably because it shares some internal state. - -.. note:: - - The way we currently implement ADIOS1 in openPMD-api is sub-ideal and we close/re-open file handles way too often. - Consequently, this can lead to severe performance degradation unless fixed. - Mea culpa, we did better in the past (in PIConGPU). - Please consider using our ADIOS2 backend instead, on which we focus our developments these days. - -.. note:: - - ADIOS1 does not support attributes that are `arrays of complex types `_. +For reading legacy ADIOS1 BP3 files, either use an older version of openPMD-api or the BP3 backend in ADIOS2. +Note that ADIOS2 does not support compression in BP3 files. Selected References diff --git a/docs/source/backends/adios2.rst b/docs/source/backends/adios2.rst index 807538c2f6..ac5e50930f 100644 --- a/docs/source/backends/adios2.rst +++ b/docs/source/backends/adios2.rst @@ -84,7 +84,6 @@ environment variable default description ``OPENPMD_ADIOS2_ENGINE`` ``File`` `ADIOS2 engine `_ ``OPENPMD2_ADIOS2_SCHEMA`` ``0`` ADIOS2 schema version (see below) ``OPENPMD_ADIOS2_STATS_LEVEL`` ``0`` whether to generate statistics for variables in ADIOS2. (``1``: yes, ``0``: no). -``OPENPMD_BP_BACKEND`` ``ADIOS2`` Chose preferred ``.bp`` file backend if ``ADIOS1`` and ``ADIOS2`` are available. ``OPENPMD_ADIOS2_BP5_BufferChunkMB`` ``0`` ADIOS2 BP5 engine: applies when using either EveryoneWrites or EveryoneWritesSerial aggregation ``OPENPMD_ADIOS2_BP5_MaxShmMB`` ``0`` ADIOS2 BP5 engine: applies when using TwoLevelShm aggregation ``OPENPMD_ADIOS2_BP5_NumSubFiles`` ``0`` ADIOS2 BP5 engine: num of subfiles @@ -94,9 +93,6 @@ environment variable default description Please refer to the `ADIOS2 documentation `_ for details on I/O tuning. -In case the ADIOS2 backend was not compiled but only the deprecated :ref:`ADIOS1 backend `, the default of ``OPENPMD_BP_BACKEND`` will fall back to ``ADIOS1``. -Be advised that ADIOS1 only supports ``.bp`` files up to the internal version BP3, while ADIOS2 supports BP3, BP4 and later formats. - Notice that the ADIOS2 backend is alternatively configurable via :ref:`JSON parameters `. Due to performance considerations, the ADIOS2 backend configures ADIOS2 not to compute any dataset statistics (Min/Max) by default. diff --git a/docs/source/backends/overview.rst b/docs/source/backends/overview.rst index cb426575d0..28c20df1c7 100644 --- a/docs/source/backends/overview.rst +++ b/docs/source/backends/overview.rst @@ -23,7 +23,7 @@ Performance A- A B C Native File Format ``.bp`` (BP3) ``.bp`` (BP3-5) ``.h5`` ``.json`` ================== ============= =============== ========= ========== -:ref:`ADIOS1 is deprecated and will be removed in a future release `. +:ref:`ADIOS1 was removed in version 0.16.0 `. Please use ADIOS2 instead. * supported/yes: implemented and accessible for users of openPMD-api diff --git a/docs/source/details/adios1.json b/docs/source/details/adios1.json deleted file mode 100644 index 95c934c76b..0000000000 --- a/docs/source/details/adios1.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "adios1": { - "dataset": { - "transform": "blosc:compressor=zlib,shuffle=bit,lvl=1;nometa" - } - } -} diff --git a/docs/source/details/adios1.toml b/docs/source/details/adios1.toml deleted file mode 100644 index 6d5dca07c3..0000000000 --- a/docs/source/details/adios1.toml +++ /dev/null @@ -1,2 +0,0 @@ -[adios1.dataset] -transform = "blosc:compressor=zlib,shuffle=bit,lvl=1;nometa" diff --git a/docs/source/details/backendconfig.rst b/docs/source/details/backendconfig.rst index cef1e4d6f9..f1c47c78bd 100644 --- a/docs/source/details/backendconfig.rst +++ b/docs/source/details/backendconfig.rst @@ -29,7 +29,7 @@ The following list specifies the priority of these means, beginning with the low 5. Explicit API calls such as ``setIterationEncoding()`` The configuration is read in a case-insensitive manner, keys as well as values. -An exception to this are string values which are forwarded to other libraries such as ADIOS1 and ADIOS2. +An exception to this are string values which are forwarded to other libraries such as ADIOS2. Those are read "as-is" and interpreted by the backend library. Parameters that are directly passed through to an external library and not interpreted within openPMD API (e.g. ``adios2.engine.parameters``) are unaffected by this and follow the respective library's conventions. @@ -77,7 +77,7 @@ For a consistent user interface, backends shall follow the following rules: Backend-independent JSON configuration -------------------------------------- -The openPMD backend can be chosen via the JSON/TOML key ``backend`` which recognizes the alternatives ``["hdf5", "adios1", "adios2", "json"]``. +The openPMD backend can be chosen via the JSON/TOML key ``backend`` which recognizes the alternatives ``["hdf5", "adios2", "json"]``. The iteration encoding can be chosen via the JSON/TOML key ``iteration_encoding`` which recognizes the alternatives ``["file_based", "group_based", "variable_based"]``. Note that for file-based iteration encoding, specification of the expansion pattern in the file name (e.g. ``data_%T.json``) remains mandatory. @@ -187,20 +187,7 @@ Explanation of the single keys: ``"none"`` can be used to disable chunking. Chunking generally improves performance and only needs to be disabled in corner-cases, e.g. when heavily relying on independent, parallel I/O that non-collectively declares data records. -ADIOS1 -^^^^^^ - -ADIOS1 allows configuring custom dataset transforms via JSON/TOML: - -.. literalinclude:: adios1.json - :language: json - -.. literalinclude:: adios1.toml - :language: toml - -This configuration can be passed globally (i.e. for the ``Series`` object) to apply for all datasets. -Alternatively, it can also be passed for single ``Dataset`` objects to only apply for single datasets. - +.. _backendconfig-other: Other backends ^^^^^^^^^^^^^^ diff --git a/docs/source/details/config_layout.json b/docs/source/details/config_layout.json index 382060f364..84b45e938b 100644 --- a/docs/source/details/config_layout.json +++ b/docs/source/details/config_layout.json @@ -1,5 +1,4 @@ { - "adios1": "put ADIOS config here", "adios2": "put ADIOS2 config here", "hdf5": "put HDF5 config here", "json": "put JSON config here" diff --git a/docs/source/details/config_layout.toml b/docs/source/details/config_layout.toml index d44522c3bb..2dafa05ade 100644 --- a/docs/source/details/config_layout.toml +++ b/docs/source/details/config_layout.toml @@ -1,6 +1,3 @@ -[adios1] -# put ADIOS config here - [adios2] # put ADIOS2 config here diff --git a/docs/source/details/mpi.rst b/docs/source/details/mpi.rst index 5c0f1674fa..b94e886fac 100644 --- a/docs/source/details/mpi.rst +++ b/docs/source/details/mpi.rst @@ -38,7 +38,7 @@ Functionality Behavior Description .. [2] Dataset declarations in :ref:`parallel HDF5 ` are only non-collective if :ref:`chunking ` is set to ``none`` (``auto`` by default). Otherwise these operations are collective. -.. [3] :ref:`HDF5 ` only supports collective attribute definitions/writes; :ref:`ADIOS1 ` and :ref:`ADIOS2 ` attributes can be written independently. +.. [3] :ref:`HDF5 ` only supports collective attribute definitions/writes; :ref:`ADIOS2 ` attributes can be written independently. If you want to support all backends equally, treat as a collective operation. Note that openPMD represents constant record components with attributes, thus inheriting this for ``::makeConstant``. diff --git a/docs/source/dev/buildoptions.rst b/docs/source/dev/buildoptions.rst index 018857fc9e..ab03a1cfcb 100644 --- a/docs/source/dev/buildoptions.rst +++ b/docs/source/dev/buildoptions.rst @@ -14,7 +14,6 @@ CMake Option Values Description ============================== =============== ======================================================================== ``openPMD_USE_MPI`` **AUTO**/ON/OFF Parallel, Multi-Node I/O for clusters ``openPMD_USE_HDF5`` **AUTO**/ON/OFF HDF5 backend (``.h5`` files) -``openPMD_USE_ADIOS1`` AUTO/ON/**OFF** ADIOS1 backend (``.bp`` files up to version BP3) - deprecated ``openPMD_USE_ADIOS2`` **AUTO**/ON/OFF ADIOS2 backend (``.bp`` files in BP3, BP4 or higher) ``openPMD_USE_PYTHON`` **AUTO**/ON/OFF Enable Python bindings ``openPMD_USE_INVASIVE_TESTS`` ON/**OFF** Enable unit tests that modify source code :sup:`1` @@ -42,7 +41,6 @@ CMake Option Values Description ============================== =============== ================================================== ``openPMD_BUILD_SHARED_LIBS`` **ON**/OFF Build the C++ API as shared library ``HDF5_USE_STATIC_LIBRARIES`` ON/**OFF** Require static HDF5 library -``ADIOS_USE_STATIC_LIBS`` ON/**OFF** Require static ADIOS1 library ============================== =============== ================================================== Note that python modules (``openpmd_api.cpython.[...].so`` or ``openpmd_api.pyd``) are always dynamic libraries. diff --git a/docs/source/index.rst b/docs/source/index.rst index 37b1e4c630..1d6ad14703 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -7,7 +7,7 @@ openPMD is an open meta-data schema that provides meaning and self-description f See `the openPMD standard `_ for details of this schema. This library provides a reference API for openPMD data handling. -Since openPMD is a schema (or markup) on top of portable, hierarchical file formats, this library implements various backends such as HDF5, ADIOS1, ADIOS2 and JSON. +Since openPMD is a schema (or markup) on top of portable, hierarchical file formats, this library implements various backends such as HDF5, ADIOS2 and JSON. Writing & reading through those backends and their associated files is supported for serial and `MPI-parallel `_ workflows. .. raw:: html diff --git a/docs/source/install/install.rst b/docs/source/install/install.rst index 176924e976..48b9f6c3e9 100644 --- a/docs/source/install/install.rst +++ b/docs/source/install/install.rst @@ -34,7 +34,7 @@ A package for openPMD-api is available via the `Spack `_ packa .. code-block:: bash - # optional: +python +adios1 -adios2 -hdf5 -mpi + # optional: +python -adios2 -hdf5 -mpi spack install openpmd-api spack load openpmd-api @@ -72,7 +72,7 @@ A package for openPMD-api is available via the `Homebrew `_/`L brew tap openpmd/openpmd brew install openpmd-api -Brew ship only the latest release, includes (Open)MPI support and lacks the ADIOS1 backend. +Brew ship only the latest release and includes (Open)MPI support. .. _install-pypi: @@ -85,7 +85,7 @@ Using the PyPI Package A package for openPMD-api is available via the Python Package Index (`PyPI `_). -On very old macOS versions (<10.9) or on exotic processor architectures, this install method *compiles from source* against the found installations of HDF5, ADIOS1, ADIOS2, and/or MPI (in system paths, from other package managers, or loaded via a module system, ...). +On very old macOS versions (<10.9) or on exotic processor architectures, this install method *compiles from source* against the found installations of HDF5, ADIOS2, and/or MPI (in system paths, from other package managers, or loaded via a module system, ...). .. code-block:: bash diff --git a/docs/source/usage/firstread.rst b/docs/source/usage/firstread.rst index f3edf391f3..f27e3c595b 100644 --- a/docs/source/usage/firstread.rst +++ b/docs/source/usage/firstread.rst @@ -78,7 +78,7 @@ Open Open an existing openPMD series in ``data.h5``. Further file formats than ``.h5`` (`HDF5 `_) are supported: -``.bp`` (`ADIOS1 `_/`ADIOS2 `_) or ``.json`` (`JSON `_). +``.bp`` (`ADIOS2 `_) or ``.json`` (`JSON `_). C++17 ^^^^^ diff --git a/docs/source/usage/firstwrite.rst b/docs/source/usage/firstwrite.rst index 193530f09f..d339d00ad1 100644 --- a/docs/source/usage/firstwrite.rst +++ b/docs/source/usage/firstwrite.rst @@ -76,7 +76,7 @@ Open Write into a new openPMD series in ``myOutput/data_<00...N>.h5``. Further file formats than ``.h5`` (`HDF5 `_) are supported: -``.bp`` (`ADIOS1 `_/`ADIOS2 `_) or ``.json`` (`JSON `_). +``.bp`` (`ADIOS2 `_) or ``.json`` (`JSON `_). C++17 ^^^^^ diff --git a/docs/source/usage/workflow.rst b/docs/source/usage/workflow.rst index 64194629ba..bf5027b53b 100644 --- a/docs/source/usage/workflow.rst +++ b/docs/source/usage/workflow.rst @@ -39,11 +39,10 @@ The openPMD-api distinguishes between a number of different access modes: New datasets and iterations will be inserted as needed. Not fully supported by all backends: - * ADIOS1: Automatically coerced to *Create* mode if the file does not exist yet and to *Read-only* mode if it exists. * ADIOS2: Automatically coerced to *Create* mode if the file does not exist yet and to *Read-only* mode if it exists. Since this happens on a per-file level, this mode allows to read from existing iterations and write to new iterations at the same time in file-based iteration encoding. -* **Append mode**: Restricted mode for appending new iterations to an existing Series that is supported by all backends at least in file-based iteration encoding, and by all but ADIOS1 in other encodings. +* **Append mode**: Restricted mode for appending new iterations to an existing Series that is supported by all backends in all encodings. The API is equivalent to that of the *Create* mode, meaning that no reading is supported whatsoever. If the Series does not exist yet, this behaves equivalently to the *Create* mode. Existing iterations will not be deleted, newly-written iterations will be inserted. diff --git a/examples/8_benchmark_parallel.cpp b/examples/8_benchmark_parallel.cpp index 7cad63f7d7..5adf2512ff 100644 --- a/examples/8_benchmark_parallel.cpp +++ b/examples/8_benchmark_parallel.cpp @@ -80,7 +80,7 @@ int main(int argc, char *argv[]) // given that you provide it with an appropriate DatasetFillerProvider // (template parameter of the Benchmark class). using type = uint64_t; -#if openPMD_HAVE_ADIOS1 || openPMD_HAVE_ADIOS2 || openPMD_HAVE_HDF5 +#if openPMD_HAVE_ADIOS2 || openPMD_HAVE_HDF5 openPMD::Datatype dt = openPMD::determineDatatype(); #endif @@ -141,7 +141,7 @@ int main(int argc, char *argv[]) // * The number of iterations. Effectively, the benchmark will be repeated // for this many // times. -#if openPMD_HAVE_ADIOS1 || openPMD_HAVE_ADIOS2 +#if openPMD_HAVE_ADIOS2 benchmark.addConfiguration( R"({"adios2": {"dataset":{"operators":[{"type": "blosc"}]}}})", "bp", diff --git a/include/openPMD/Error.hpp b/include/openPMD/Error.hpp index f503dd2294..c50e2918b6 100644 --- a/include/openPMD/Error.hpp +++ b/include/openPMD/Error.hpp @@ -44,7 +44,7 @@ namespace error * @brief An operation was requested that is not supported in a specific * backend. * - * Example: Append mode is not available in ADIOS1. + * Example: Append mode is not available in JSON. */ class OperationUnsupportedInBackend : public Error { diff --git a/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp b/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp deleted file mode 100644 index 62e9f00493..0000000000 --- a/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp +++ /dev/null @@ -1,279 +0,0 @@ -/* Copyright 2018-2021 Fabian Koller - * - * This file is part of openPMD-api. - * - * openPMD-api is free software: you can redistribute it and/or modify - * it under the terms of of either the GNU General Public License or - * the GNU Lesser General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * openPMD-api is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with openPMD-api. - * If not, see . - */ -#pragma once - -#include "openPMD/IO/ADIOS/ADIOS1FilePosition.hpp" -#include "openPMD/ThrowError.hpp" -#include "openPMD/auxiliary/StringManip.hpp" -#include "openPMD/backend/Attribute.hpp" -#include "openPMD/backend/Writable.hpp" - -#include - -#include -#include -#include -#include -#include -#include - -namespace openPMD -{ -inline std::string -getBP1Extent(Extent const &e, std::string const &delimiter = ",") -{ - switch (e.size()) - { - case 0: - return ""; - case 1: - return std::to_string(e[0]); - default: - std::ostringstream os; - std::for_each( - e.begin(), e.end() - 1, [&os, &delimiter](std::uint64_t const ext) { - os << std::to_string(ext) << delimiter; - }); - os << std::to_string(*e.rbegin()); - return os.str(); - } -} - -inline std::string -getZerosLikeBP1Extent(Extent const &e, std::string const &delimiter = ",") -{ - switch (e.size()) - { - case 0: - return ""; - case 1: - return "0"; - default: - std::ostringstream os; - std::for_each( - e.begin(), e.end() - 1, [&os, &delimiter](std::uint64_t const) { - os << "0" << delimiter; - }); - os << "0"; - return os.str(); - } -} - -inline ADIOS_DATATYPES getBP1DataType(Datatype dtype) -{ - using DT = Datatype; - - // note the ill-named fixed-byte adios_... types - // https://github.com/ornladios/ADIOS/issues/187 - switch (dtype) - { - case DT::CHAR: - case DT::VEC_CHAR: - case DT::SCHAR: - case DT::VEC_SCHAR: - return adios_byte; - case DT::UCHAR: - case DT::VEC_UCHAR: - case DT::BOOL: - return adios_unsigned_byte; - case DT::SHORT: - case DT::VEC_SHORT: - if (sizeof(short) == 2u) - return adios_short; - else if (sizeof(short) == 4u) - return adios_integer; - else if (sizeof(long) == 8u) - return adios_long; - else - error::throwOperationUnsupportedInBackend( - "ADIOS1", "No native equivalent for Datatype::SHORT found."); - case DT::INT: - case DT::VEC_INT: - if (sizeof(int) == 2u) - return adios_short; - else if (sizeof(int) == 4u) - return adios_integer; - else if (sizeof(int) == 8u) - return adios_long; - else - error::throwOperationUnsupportedInBackend( - "ADIOS1", "No native equivalent for Datatype::INT found."); - case DT::LONG: - case DT::VEC_LONG: - if (sizeof(long) == 2u) - return adios_short; - else if (sizeof(long) == 4u) - return adios_integer; - else if (sizeof(long) == 8u) - return adios_long; - else - error::throwOperationUnsupportedInBackend( - "ADIOS1", "No native equivalent for Datatype::LONG found."); - case DT::LONGLONG: - case DT::VEC_LONGLONG: - if (sizeof(long long) == 2u) - return adios_short; - else if (sizeof(long long) == 4u) - return adios_integer; - else if (sizeof(long long) == 8u) - return adios_long; - else - error::throwOperationUnsupportedInBackend( - "ADIOS1", "No native equivalent for Datatype::LONGLONG found."); - case DT::USHORT: - case DT::VEC_USHORT: - if (sizeof(unsigned short) == 2u) - return adios_unsigned_short; - else if (sizeof(unsigned short) == 4u) - return adios_unsigned_integer; - else if (sizeof(unsigned long) == 8u) - return adios_unsigned_long; - else - error::throwOperationUnsupportedInBackend( - "ADIOS1", "No native equivalent for Datatype::USHORT found."); - case DT::UINT: - case DT::VEC_UINT: - if (sizeof(unsigned int) == 2u) - return adios_unsigned_short; - else if (sizeof(unsigned int) == 4u) - return adios_unsigned_integer; - else if (sizeof(unsigned int) == 8u) - return adios_unsigned_long; - else - error::throwOperationUnsupportedInBackend( - "ADIOS1", "No native equivalent for Datatype::UINT found."); - case DT::ULONG: - case DT::VEC_ULONG: - if (sizeof(unsigned long) == 2u) - return adios_unsigned_short; - else if (sizeof(unsigned long) == 4u) - return adios_unsigned_integer; - else if (sizeof(unsigned long) == 8u) - return adios_unsigned_long; - else - error::throwOperationUnsupportedInBackend( - "ADIOS1", "No native equivalent for Datatype::ULONG found."); - case DT::ULONGLONG: - case DT::VEC_ULONGLONG: - if (sizeof(unsigned long long) == 2u) - return adios_unsigned_short; - else if (sizeof(unsigned long long) == 4u) - return adios_unsigned_integer; - else if (sizeof(unsigned long long) == 8u) - return adios_unsigned_long; - else - error::throwOperationUnsupportedInBackend( - "ADIOS1", - "No native equivalent for Datatype::ULONGLONG found."); - case DT::FLOAT: - case DT::VEC_FLOAT: - return adios_real; - case DT::DOUBLE: - case DT::ARR_DBL_7: - case DT::VEC_DOUBLE: - return adios_double; - case DT::LONG_DOUBLE: - case DT::VEC_LONG_DOUBLE: - return adios_long_double; - case DT::CFLOAT: - case DT::VEC_CFLOAT: - return adios_complex; - case DT::CDOUBLE: - case DT::VEC_CDOUBLE: - return adios_double_complex; - case DT::CLONG_DOUBLE: - case DT::VEC_CLONG_DOUBLE: - error::throwOperationUnsupportedInBackend( - "ADIOS1", "No native equivalent for Datatype::CLONG_DOUBLE found."); - case DT::STRING: - return adios_string; - case DT::VEC_STRING: - return adios_string_array; - case DT::UNDEFINED: - throw std::runtime_error("Unknown Attribute datatype (ADIOS datatype)"); - default: - throw std::runtime_error("Datatype not implemented in ADIOS IO"); - } -} - -inline std::string concrete_bp1_file_position(Writable *w) -{ - std::stack hierarchy; - if (!w->abstractFilePosition) - w = w->parent; - while (w) - { - hierarchy.push(w); - w = w->parent; - } - - std::string pos; - while (!hierarchy.empty()) - { - auto const tmp_ptr = std::dynamic_pointer_cast( - hierarchy.top()->abstractFilePosition); - if (tmp_ptr == nullptr) - throw std::runtime_error( - "Dynamic pointer cast returned a nullptr!"); - pos += tmp_ptr->location; - hierarchy.pop(); - } - - return auxiliary::replace_all(pos, "//", "/"); -} - -inline std::string -getEnvNum(std::string const &key, std::string const &defaultValue) -{ - char const *env = std::getenv(key.c_str()); - if (env != nullptr) - { - char const *tmp = env; - while (tmp) - { - if (isdigit(*tmp)) - ++tmp; - else - { - std::cerr << key << " is invalid" << std::endl; - break; - } - } - if (!tmp) - return std::string(env, std::strlen(env)); - else - return defaultValue; - } - else - return defaultValue; -} - -template -inline Attribute readVectorAttributeInternal(void *data, int size) -{ - auto d = reinterpret_cast(data); - std::vector v; - v.resize(size); - for (int i = 0; i < size; ++i) - v[i] = d[i]; - return Attribute(v); -} -} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS1FilePosition.hpp b/include/openPMD/IO/ADIOS/ADIOS1FilePosition.hpp deleted file mode 100644 index 0d1d7ab619..0000000000 --- a/include/openPMD/IO/ADIOS/ADIOS1FilePosition.hpp +++ /dev/null @@ -1,36 +0,0 @@ -/* Copyright 2017-2021 Fabian Koller - * - * This file is part of openPMD-api. - * - * openPMD-api is free software: you can redistribute it and/or modify - * it under the terms of of either the GNU General Public License or - * the GNU Lesser General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * openPMD-api is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with openPMD-api. - * If not, see . - */ -#pragma once - -#include "openPMD/IO/AbstractFilePosition.hpp" - -#include - -namespace openPMD -{ -struct ADIOS1FilePosition : public AbstractFilePosition -{ - ADIOS1FilePosition(std::string const &s) : location{s} - {} - - std::string location; -}; // ADIOS1FilePosition -} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS1IOHandler.hpp b/include/openPMD/IO/ADIOS/ADIOS1IOHandler.hpp deleted file mode 100644 index bef874c607..0000000000 --- a/include/openPMD/IO/ADIOS/ADIOS1IOHandler.hpp +++ /dev/null @@ -1,81 +0,0 @@ -/* Copyright 2017-2021 Fabian Koller - * - * This file is part of openPMD-api. - * - * openPMD-api is free software: you can redistribute it and/or modify - * it under the terms of of either the GNU General Public License or - * the GNU Lesser General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * openPMD-api is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with openPMD-api. - * If not, see . - */ -#pragma once - -#include "openPMD/IO/AbstractIOHandler.hpp" -#include "openPMD/auxiliary/Export.hpp" -#include "openPMD/auxiliary/JSON_internal.hpp" -#include "openPMD/config.hpp" - -#include -#include -#include -#if openPMD_HAVE_ADIOS1 -#include -#endif - -namespace openPMD -{ -class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl; - -#if openPMD_HAVE_ADIOS1 -class OPENPMDAPI_EXPORT ADIOS1IOHandler : public AbstractIOHandler -{ - friend class ADIOS1IOHandlerImpl; - -public: - ADIOS1IOHandler(std::string path, Access, json::TracingJSON); - ~ADIOS1IOHandler() override; - - std::string backendName() const override - { - return "ADIOS1"; - } - - std::future flush(internal::ParsedFlushParams &) override; - - void enqueue(IOTask const &) override; - -private: - std::queue m_setup; - std::unique_ptr m_impl; -}; // ADIOS1IOHandler -#else -class OPENPMDAPI_EXPORT ADIOS1IOHandler : public AbstractIOHandler -{ - friend class ADIOS1IOHandlerImpl; - -public: - ADIOS1IOHandler(std::string path, Access, json::TracingJSON); - ~ADIOS1IOHandler() override; - - std::string backendName() const override - { - return "DUMMY_ADIOS1"; - } - - std::future flush(internal::ParsedFlushParams &) override; - -private: - std::unique_ptr m_impl; -}; // ADIOS1IOHandler -#endif -} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp b/include/openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp deleted file mode 100644 index 42305bc78c..0000000000 --- a/include/openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp +++ /dev/null @@ -1,64 +0,0 @@ -/* Copyright 2017-2021 Fabian Koller - * - * This file is part of openPMD-api. - * - * openPMD-api is free software: you can redistribute it and/or modify - * it under the terms of of either the GNU General Public License or - * the GNU Lesser General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * openPMD-api is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with openPMD-api. - * If not, see . - */ -#pragma once - -#include "openPMD/IO/AbstractIOHandler.hpp" -#include "openPMD/auxiliary/Export.hpp" -#include "openPMD/config.hpp" - -#if openPMD_HAVE_ADIOS1 -#include "openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp" -#endif - -#include -#include -#include -#if openPMD_HAVE_ADIOS1 -#include -#include -#endif - -namespace openPMD -{ -#if openPMD_HAVE_ADIOS1 -class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl - : public CommonADIOS1IOHandlerImpl -{ -private: - using Base_t = CommonADIOS1IOHandlerImpl; - -public: - ADIOS1IOHandlerImpl(AbstractIOHandler *, json::TracingJSON); - virtual ~ADIOS1IOHandlerImpl(); - - virtual void init(); - - std::future flush(); - - virtual int64_t open_write(Writable *); - virtual ADIOS_FILE *open_read(std::string const &name); - int64_t initialize_group(std::string const &name); -}; // ADIOS1IOHandlerImpl -#else -class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl -{}; // ADIOS1IOHandlerImpl -#endif -} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp b/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp deleted file mode 100644 index d1a079b17f..0000000000 --- a/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp +++ /dev/null @@ -1,131 +0,0 @@ -/* Copyright 2017-2021 Fabian Koller and Franz Poeschel - * - * This file is part of openPMD-api. - * - * openPMD-api is free software: you can redistribute it and/or modify - * it under the terms of of either the GNU General Public License or - * the GNU Lesser General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * openPMD-api is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with openPMD-api. - * If not, see . - */ -#pragma once - -#include "openPMD/config.hpp" - -#if openPMD_HAVE_ADIOS1 - -#include "openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp" -#include "openPMD/IO/ADIOS/ADIOS1FilePosition.hpp" -#include "openPMD/IO/AbstractIOHandler.hpp" -#include "openPMD/IO/AbstractIOHandlerImpl.hpp" -#include "openPMD/auxiliary/DerefDynamicCast.hpp" -#include "openPMD/auxiliary/Filesystem.hpp" -#include "openPMD/auxiliary/JSON_internal.hpp" -#include "openPMD/auxiliary/Memory.hpp" -#include "openPMD/auxiliary/StringManip.hpp" - -#include -#include - -#include -#include -#include -#include -#include -#include - -namespace openPMD -{ -template // CRT pattern -class CommonADIOS1IOHandlerImpl : public AbstractIOHandlerImpl -{ -public: - void - createFile(Writable *, Parameter const &) override; - void checkFile(Writable *, Parameter &) override; - void - createPath(Writable *, Parameter const &) override; - void createDataset( - Writable *, Parameter const &) override; - void extendDataset( - Writable *, Parameter const &) override; - void openFile(Writable *, Parameter &) override; - void - closeFile(Writable *, Parameter const &) override; - void availableChunks( - Writable *, Parameter &) override; - void openPath(Writable *, Parameter const &) override; - void openDataset(Writable *, Parameter &) override; - void - deleteFile(Writable *, Parameter const &) override; - void - deletePath(Writable *, Parameter const &) override; - void deleteDataset( - Writable *, Parameter const &) override; - void deleteAttribute( - Writable *, Parameter const &) override; - void - writeDataset(Writable *, Parameter &) override; - void writeAttribute( - Writable *, Parameter const &) override; - void readDataset(Writable *, Parameter &) override; - void readAttribute(Writable *, Parameter &) override; - void listPaths(Writable *, Parameter &) override; - void - listDatasets(Writable *, Parameter &) override; - void listAttributes(Writable *, Parameter &) override; - void - deregister(Writable *, Parameter const &) override; - - void close(int64_t); - void close(ADIOS_FILE *); - void - flush_attribute(int64_t group, std::string const &name, Attribute const &); - -protected: - template - CommonADIOS1IOHandlerImpl(Args &&...args) - : AbstractIOHandlerImpl{std::forward(args)...} - {} - - ADIOS_READ_METHOD m_readMethod; - std::unordered_map > m_filePaths; - std::unordered_map, int64_t> m_groups; - std::unordered_map, bool> m_existsOnDisk; - std::unordered_map, int64_t> - m_openWriteFileHandles; - std::unordered_map, ADIOS_FILE *> - m_openReadFileHandles; - struct ScheduledRead - { - ADIOS_SELECTION *selection; - std::shared_ptr data; // needed to avoid early freeing - }; - std::unordered_map > - m_scheduledReads; - std::unordered_map > - m_attributeWrites; - // config options - std::string m_defaultTransform; - /** - * Call this function to get adios file id for a Writable. Will create one - * if does not exist - * @return returns an adios file id. - */ - int64_t GetFileHandle(Writable *); - - void initJson(json::TracingJSON); -}; // ParallelADIOS1IOHandlerImpl -} // namespace openPMD - -#endif diff --git a/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandler.hpp b/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandler.hpp deleted file mode 100644 index e28122582b..0000000000 --- a/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandler.hpp +++ /dev/null @@ -1,69 +0,0 @@ -/* Copyright 2017-2021 Fabian Koller - * - * This file is part of openPMD-api. - * - * openPMD-api is free software: you can redistribute it and/or modify - * it under the terms of of either the GNU General Public License or - * the GNU Lesser General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * openPMD-api is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with openPMD-api. - * If not, see . - */ -#pragma once - -#include "openPMD/IO/AbstractIOHandler.hpp" -#include "openPMD/auxiliary/Export.hpp" -#include "openPMD/auxiliary/JSON_internal.hpp" -#include "openPMD/config.hpp" - -#include -#include -#include -#if openPMD_HAVE_ADIOS1 -#include -#endif - -namespace openPMD -{ -class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl; - -class OPENPMDAPI_EXPORT ParallelADIOS1IOHandler : public AbstractIOHandler -{ - friend class ParallelADIOS1IOHandlerImpl; - -public: -#if openPMD_HAVE_MPI - ParallelADIOS1IOHandler( - std::string path, Access, json::TracingJSON, MPI_Comm); -#else - ParallelADIOS1IOHandler(std::string path, Access, json::TracingJSON); -#endif - ~ParallelADIOS1IOHandler() override; - - std::string backendName() const override - { - return "MPI_ADIOS1"; - } - - std::future flush(internal::ParsedFlushParams &) override; -#if openPMD_HAVE_ADIOS1 - void enqueue(IOTask const &) override; -#endif - -private: -#if openPMD_HAVE_ADIOS1 - std::queue m_setup; -#endif - std::unique_ptr m_impl; -}; // ParallelADIOS1IOHandler - -} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp b/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp deleted file mode 100644 index 0b1bb2ca34..0000000000 --- a/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp +++ /dev/null @@ -1,71 +0,0 @@ -/* Copyright 2017-2021 Fabian Koller - * - * This file is part of openPMD-api. - * - * openPMD-api is free software: you can redistribute it and/or modify - * it under the terms of of either the GNU General Public License or - * the GNU Lesser General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * openPMD-api is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with openPMD-api. - * If not, see . - */ -#pragma once - -#include "openPMD/IO/AbstractIOHandler.hpp" -#include "openPMD/auxiliary/Export.hpp" -#include "openPMD/auxiliary/JSON_internal.hpp" -#include "openPMD/config.hpp" - -#if openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI -#include "openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp" -#endif - -#include -#include -#include -#if openPMD_HAVE_ADIOS1 -#include -#include -#endif - -namespace openPMD -{ -#if openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI -class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl - : public CommonADIOS1IOHandlerImpl -{ -private: - using Base_t = CommonADIOS1IOHandlerImpl; - -public: - ParallelADIOS1IOHandlerImpl( - AbstractIOHandler *, json::TracingJSON, MPI_Comm); - virtual ~ParallelADIOS1IOHandlerImpl(); - - virtual void init(); - - std::future flush(); - - virtual int64_t open_write(Writable *); - virtual ADIOS_FILE *open_read(std::string const &name); - int64_t initialize_group(std::string const &name); - -protected: - MPI_Comm m_mpiComm; - MPI_Info m_mpiInfo; -}; // ParallelADIOS1IOHandlerImpl -#else -class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl -{}; // ParallelADIOS1IOHandlerImpl -#endif - -} // namespace openPMD diff --git a/include/openPMD/IO/Format.hpp b/include/openPMD/IO/Format.hpp index 697e73f788..43ec4d04a1 100644 --- a/include/openPMD/IO/Format.hpp +++ b/include/openPMD/IO/Format.hpp @@ -29,7 +29,6 @@ namespace openPMD enum class Format { HDF5, - ADIOS1, ADIOS2_BP, ADIOS2_BP4, ADIOS2_BP5, diff --git a/include/openPMD/RecordComponent.hpp b/include/openPMD/RecordComponent.hpp index a4bc694e16..3443b27d7a 100644 --- a/include/openPMD/RecordComponent.hpp +++ b/include/openPMD/RecordComponent.hpp @@ -136,7 +136,6 @@ class RecordComponent : public BaseRecordComponent * * Backend support for resizing datasets: * * JSON: Supported - * * ADIOS1: Unsupported * * ADIOS2: Supported as of ADIOS2 2.7.0 * * HDF5: (Currently) unsupported. * Will be probably supported as soon as chunking is supported in HDF5. diff --git a/include/openPMD/ThrowError.hpp b/include/openPMD/ThrowError.hpp index 4e48e9bfdc..f2695f7ae0 100644 --- a/include/openPMD/ThrowError.hpp +++ b/include/openPMD/ThrowError.hpp @@ -19,15 +19,6 @@ * If not, see . */ -/* - * For objects that must not include Error.hpp but still need to throw errors. - * In some exotic compiler configurations (clang-6 with libc++), - * including Error.hpp into the ADIOS1 backend leads to incompatible error type - * symbols. - * So, use only the functions defined in here in the ADIOS1 backend. - * Definitions are in Error.cpp. - */ - #pragma once #include "openPMD/auxiliary/Export.hpp" diff --git a/include/openPMD/backend/Writable.hpp b/include/openPMD/backend/Writable.hpp index bc0e35bc31..c585893fc3 100644 --- a/include/openPMD/backend/Writable.hpp +++ b/include/openPMD/backend/Writable.hpp @@ -78,10 +78,6 @@ class Writable final friend class Series; friend class Record; friend class AbstractIOHandlerImpl; - template - friend class CommonADIOS1IOHandlerImpl; - friend class ADIOS1IOHandlerImpl; - friend class ParallelADIOS1IOHandlerImpl; friend class ADIOS2IOHandlerImpl; friend class HDF5IOHandlerImpl; friend class ParallelHDF5IOHandlerImpl; diff --git a/include/openPMD/config.hpp.in b/include/openPMD/config.hpp.in index 8da1b5e6fc..8df5dae9de 100644 --- a/include/openPMD/config.hpp.in +++ b/include/openPMD/config.hpp.in @@ -35,7 +35,7 @@ #endif #ifndef openPMD_HAVE_ADIOS1 -#cmakedefine01 openPMD_HAVE_ADIOS1 +#define openPMD_HAVE_ADIOS1 0 #endif #ifndef openPMD_HAVE_ADIOS2 diff --git a/src/Format.cpp b/src/Format.cpp index 7f30e1a793..d5a8acf5f3 100644 --- a/src/Format.cpp +++ b/src/Format.cpp @@ -32,28 +32,7 @@ Format determineFormat(std::string const &filename) if (auxiliary::ends_with(filename, ".h5")) return Format::HDF5; if (auxiliary::ends_with(filename, ".bp")) - { - auto const bp_backend = auxiliary::getEnvString( - "OPENPMD_BP_BACKEND", -#if openPMD_HAVE_ADIOS2 - "ADIOS2" -#elif openPMD_HAVE_ADIOS1 - "ADIOS1" -#else - "ADIOS2" -#endif - ); - - if (bp_backend == "ADIOS2") - return Format::ADIOS2_BP; - else if (bp_backend == "ADIOS1") - return Format::ADIOS1; - else - throw std::runtime_error( - "Environment variable OPENPMD_BP_BACKEND for .bp backend is " - "neither ADIOS1 nor ADIOS2: " + - bp_backend); - } + return Format::ADIOS2_BP; if (auxiliary::ends_with(filename, ".bp4")) return Format::ADIOS2_BP4; if (auxiliary::ends_with(filename, ".bp5")) @@ -75,7 +54,6 @@ std::string suffix(Format f) { case Format::HDF5: return ".h5"; - case Format::ADIOS1: case Format::ADIOS2_BP: return ".bp"; case Format::ADIOS2_BP4: diff --git a/src/IO/ADIOS/ADIOS1IOHandler.cpp b/src/IO/ADIOS/ADIOS1IOHandler.cpp deleted file mode 100644 index 1e13fc1c82..0000000000 --- a/src/IO/ADIOS/ADIOS1IOHandler.cpp +++ /dev/null @@ -1,467 +0,0 @@ -/* Copyright 2017-2021 Fabian Koller, Axel Huebl - * - * This file is part of openPMD-api. - * - * openPMD-api is free software: you can redistribute it and/or modify - * it under the terms of of either the GNU General Public License or - * the GNU Lesser General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * openPMD-api is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with openPMD-api. - * If not, see . - */ - -#include "openPMD/IO/ADIOS/ADIOS1IOHandler.hpp" -#include "openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp" - -#if openPMD_HAVE_ADIOS1 -#include "openPMD/IO/AbstractIOHandlerImpl.hpp" - -#include "openPMD/IO/IOTask.hpp" -#include -#include -#include -#include -#include -#include -#endif - -namespace openPMD -{ -#if openPMD_HAVE_ADIOS1 -#if openPMD_USE_VERIFY -#define VERIFY(CONDITION, TEXT) \ - { \ - if (!(CONDITION)) \ - throw std::runtime_error((TEXT)); \ - } -#else -#define VERIFY(CONDITION, TEXT) \ - do \ - { \ - (void)sizeof(CONDITION); \ - } while (0) -#endif - -ADIOS1IOHandlerImpl::ADIOS1IOHandlerImpl( - AbstractIOHandler *handler, json::TracingJSON json) - : Base_t(handler) -{ - initJson(std::move(json)); -} - -ADIOS1IOHandlerImpl::~ADIOS1IOHandlerImpl() -{ - for (auto &f : m_openReadFileHandles) - close(f.second); - m_openReadFileHandles.clear(); - - if (access::write(m_handler->m_backendAccess)) - { - for (auto &group : m_attributeWrites) - for (auto &att : group.second) - flush_attribute(group.first, att.first, att.second); - - for (auto &f : m_openWriteFileHandles) - close(f.second); - m_openWriteFileHandles.clear(); - } - - int status; - status = adios_read_finalize_method(m_readMethod); - if (status != err_no_error) - std::cerr << "Internal error: Failed to finalize ADIOS reading method " - "(serial)\n"; - - status = adios_finalize(0); - if (status != err_no_error) - std::cerr << "Internal error: Failed to finalize ADIOS (serial)\n"; -} - -std::future ADIOS1IOHandlerImpl::flush() -{ - using namespace auxiliary; - - auto handler = dynamic_cast(m_handler); - while (!handler->m_setup.empty()) - { - IOTask &i = handler->m_setup.front(); - try - { - switch (i.operation) - { - using O = Operation; - case O::CREATE_FILE: - createFile( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::CHECK_FILE: - checkFile( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::CREATE_PATH: - createPath( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::OPEN_PATH: - openPath( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::CREATE_DATASET: - createDataset( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::WRITE_ATT: - writeAttribute( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::OPEN_FILE: - openFile( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::KEEP_SYNCHRONOUS: - keepSynchronous( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::DEREGISTER: - deregister( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - default: - VERIFY( - false, - "[ADIOS1] Internal error: Wrong operation in ADIOS setup " - "queue"); - } - } - catch (...) - { - std::cerr << "[AbstractIOHandlerImpl] IO Task " - << internal::operationAsString(i.operation) - << " failed with exception. Clearing IO queue and " - "passing on the exception." - << std::endl; - while (!m_handler->m_work.empty()) - { - m_handler->m_work.pop(); - } - throw; - } - handler->m_setup.pop(); - } - - while (!handler->m_work.empty()) - { - using namespace auxiliary; - - IOTask &i = handler->m_work.front(); - try - { - switch (i.operation) - { - using O = Operation; - case O::EXTEND_DATASET: - extendDataset( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::CLOSE_PATH: - closePath( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::OPEN_DATASET: - openDataset( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::CLOSE_FILE: - closeFile( - i.writable, - *dynamic_cast *>( - i.parameter.get())); - break; - case O::DELETE_FILE: - deleteFile( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::DELETE_PATH: - deletePath( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::DELETE_DATASET: - deleteDataset( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::DELETE_ATT: - deleteAttribute( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::WRITE_DATASET: - writeDataset( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::READ_DATASET: - readDataset( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::GET_BUFFER_VIEW: - getBufferView( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::READ_ATT: - readAttribute( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::LIST_PATHS: - listPaths( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::LIST_DATASETS: - listDatasets( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::LIST_ATTS: - listAttributes( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::ADVANCE: - advance( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::AVAILABLE_CHUNKS: - availableChunks( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - default: - VERIFY( - false, - "[ADIOS1] Internal error: Wrong operation in ADIOS work " - "queue"); - } - } - catch (...) - { - std::cerr << "[AbstractIOHandlerImpl] IO Task " - << internal::operationAsString(i.operation) - << " failed with exception. Clearing IO queue and " - "passing on the exception." - << std::endl; - while (!m_handler->m_work.empty()) - { - m_handler->m_work.pop(); - } - throw; - } - handler->m_work.pop(); - } - - int status; - for (auto &file : m_scheduledReads) - { - status = adios_perform_reads(file.first, 1); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to perform ADIOS reads during " - "dataset reading"); - - for (auto &sel : file.second) - adios_selection_delete(sel.selection); - } - m_scheduledReads.clear(); - - return std::future(); -} - -void ADIOS1IOHandlerImpl::init() -{ - int status; - status = adios_init_noxml(MPI_COMM_NULL); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to initialize ADIOS"); - - m_readMethod = ADIOS_READ_METHOD_BP; - status = adios_read_init_method(m_readMethod, MPI_COMM_NULL, ""); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to initialize ADIOS reading method"); -} -#endif - -#if openPMD_HAVE_ADIOS1 -ADIOS1IOHandler::ADIOS1IOHandler( - std::string path, Access at, json::TracingJSON json) - : AbstractIOHandler(std::move(path), at) - , m_impl{new ADIOS1IOHandlerImpl(this, std::move(json))} -{ - m_impl->init(); -} - -ADIOS1IOHandler::~ADIOS1IOHandler() = default; - -std::future ADIOS1IOHandler::flush(internal::ParsedFlushParams &) -{ - return m_impl->flush(); -} - -void ADIOS1IOHandler::enqueue(IOTask const &i) -{ - switch (i.operation) - { - case Operation::CREATE_FILE: - case Operation::CHECK_FILE: - case Operation::CREATE_PATH: - case Operation::OPEN_PATH: - case Operation::CREATE_DATASET: - case Operation::OPEN_FILE: - case Operation::WRITE_ATT: - case Operation::KEEP_SYNCHRONOUS: - case Operation::DEREGISTER: - m_setup.push(i); - return; - default: - m_work.push(i); - return; - } -} - -int64_t ADIOS1IOHandlerImpl::open_write(Writable *writable) -{ - auto res = m_filePaths.find(writable); - if (res == m_filePaths.end()) - res = m_filePaths.find(writable->parent); - - std::string mode; - if (m_existsOnDisk[res->second]) - { - mode = "u"; - /* close the handle that corresponds to the file we want to append to */ - if (m_openReadFileHandles.find(res->second) != - m_openReadFileHandles.end()) - { - close(m_openReadFileHandles[res->second]); - m_openReadFileHandles.erase(res->second); - } - } - else - { - mode = "w"; - m_existsOnDisk[res->second] = true; - } - - int64_t fd = -1; - int status; - status = adios_open( - &fd, - res->second->c_str(), - res->second->c_str(), - mode.c_str(), - MPI_COMM_NULL); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to open_write ADIOS file"); - - return fd; -} - -ADIOS_FILE *ADIOS1IOHandlerImpl::open_read(std::string const &name) -{ - ADIOS_FILE *f = nullptr; - f = adios_read_open_file(name.c_str(), m_readMethod, MPI_COMM_NULL); - VERIFY( - adios_errno != err_file_not_found, - "[ADIOS1] Internal error: ADIOS file not found"); - VERIFY( - f != nullptr, - "[ADIOS1] Internal error: Failed to open_read ADIOS file"); - - return f; -} - -int64_t ADIOS1IOHandlerImpl::initialize_group(std::string const &name) -{ - int status; - int64_t group; - ADIOS_STATISTICS_FLAG noStatistics = adios_stat_no; - status = adios_declare_group(&group, name.c_str(), "", noStatistics); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to declare ADIOS group"); - status = adios_select_method(group, "POSIX", "", ""); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to select ADIOS method"); - return group; -} - -#else -ADIOS1IOHandler::ADIOS1IOHandler(std::string path, Access at, json::TracingJSON) - : AbstractIOHandler(std::move(path), at) -{ - throw std::runtime_error("openPMD-api built without ADIOS1 support"); -} - -ADIOS1IOHandler::~ADIOS1IOHandler() = default; - -std::future ADIOS1IOHandler::flush(internal::ParsedFlushParams &) -{ - return std::future(); -} -#endif -} // namespace openPMD diff --git a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp deleted file mode 100644 index e5e32d2dca..0000000000 --- a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp +++ /dev/null @@ -1,2054 +0,0 @@ -/* Copyright 2017-2021 Fabian Koller, Axel Huebl - * - * This file is part of openPMD-api. - * - * openPMD-api is free software: you can redistribute it and/or modify - * it under the terms of of either the GNU General Public License or - * the GNU Lesser General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * openPMD-api is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with openPMD-api. - * If not, see . - */ - -#include "openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp" -#include "openPMD/Error.hpp" - -#if openPMD_HAVE_ADIOS1 - -#include "openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp" -#include "openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp" -#include "openPMD/auxiliary/JSON_internal.hpp" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -namespace openPMD -{ - -#if openPMD_USE_VERIFY -#define VERIFY(CONDITION, TEXT) \ - { \ - if (!(CONDITION)) \ - throw std::runtime_error((TEXT)); \ - } -#else -#define VERIFY(CONDITION, TEXT) \ - do \ - { \ - (void)sizeof(CONDITION); \ - } while (0) -#endif - -template -void CommonADIOS1IOHandlerImpl::close(int64_t fd) -{ - int status; - status = adios_close(fd); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to close ADIOS file (open_write)"); -} - -template -void CommonADIOS1IOHandlerImpl::close(ADIOS_FILE *f) -{ - int status; - status = adios_read_close(f); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to close ADIOS file (open_read)"); -} - -template -void CommonADIOS1IOHandlerImpl::flush_attribute( - int64_t group, std::string const &name, Attribute const &att) -{ - auto dtype = att.dtype; - // https://github.com/ComputationalRadiationPhysics/picongpu/pull/1756 - if (dtype == Datatype::BOOL) - dtype = Datatype::UCHAR; - - int nelems = 0; - switch (dtype) - { - using DT = Datatype; - case DT::VEC_CHAR: - nelems = att.get >().size(); - break; - case DT::VEC_SHORT: - nelems = att.get >().size(); - break; - case DT::VEC_INT: - nelems = att.get >().size(); - break; - case DT::VEC_LONG: - nelems = att.get >().size(); - break; - case DT::VEC_LONGLONG: - nelems = att.get >().size(); - break; - case DT::VEC_UCHAR: - nelems = att.get >().size(); - break; - case DT::VEC_SCHAR: - nelems = att.get >().size(); - break; - case DT::VEC_USHORT: - nelems = att.get >().size(); - break; - case DT::VEC_UINT: - nelems = att.get >().size(); - break; - case DT::VEC_ULONG: - nelems = att.get >().size(); - break; - case DT::VEC_ULONGLONG: - nelems = att.get >().size(); - break; - case DT::VEC_FLOAT: - nelems = att.get >().size(); - break; - case DT::VEC_DOUBLE: - nelems = att.get >().size(); - break; - case DT::VEC_LONG_DOUBLE: - nelems = att.get >().size(); - break; - case DT::VEC_STRING: - nelems = att.get >().size(); - break; - case DT::ARR_DBL_7: - nelems = 7; - break; - case DT::UNDEFINED: - throw std::runtime_error( - "[ADIOS1] Unknown Attribute datatype (ADIOS1 Attribute flush)"); - default: - nelems = 1; - } - - auto values = auxiliary::allocatePtr(dtype, nelems); - switch (att.dtype) - { - using DT = Datatype; - case DT::CHAR: { - auto ptr = reinterpret_cast(values.get()); - *ptr = att.get(); - break; - } - case DT::UCHAR: { - auto ptr = reinterpret_cast(values.get()); - *ptr = att.get(); - break; - } - case DT::SCHAR: { - auto ptr = reinterpret_cast(values.get()); - *ptr = att.get(); - break; - } - case DT::SHORT: { - auto ptr = reinterpret_cast(values.get()); - *ptr = att.get(); - break; - } - case DT::INT: { - auto ptr = reinterpret_cast(values.get()); - *ptr = att.get(); - break; - } - case DT::LONG: { - auto ptr = reinterpret_cast(values.get()); - *ptr = att.get(); - break; - } - case DT::LONGLONG: { - auto ptr = reinterpret_cast(values.get()); - *ptr = att.get(); - break; - } - case DT::USHORT: { - auto ptr = reinterpret_cast(values.get()); - *ptr = att.get(); - break; - } - case DT::UINT: { - auto ptr = reinterpret_cast(values.get()); - *ptr = att.get(); - break; - } - case DT::ULONG: { - auto ptr = reinterpret_cast(values.get()); - *ptr = att.get(); - break; - } - case DT::ULONGLONG: { - auto ptr = reinterpret_cast(values.get()); - *ptr = att.get(); - break; - } - case DT::FLOAT: { - auto ptr = reinterpret_cast(values.get()); - *ptr = att.get(); - break; - } - case DT::DOUBLE: { - auto ptr = reinterpret_cast(values.get()); - *ptr = att.get(); - break; - } - case DT::LONG_DOUBLE: { - auto ptr = reinterpret_cast(values.get()); - *ptr = att.get(); - break; - } - case DT::CFLOAT: { - auto ptr = reinterpret_cast *>(values.get()); - *ptr = att.get >(); - break; - } - case DT::CDOUBLE: { - auto ptr = reinterpret_cast *>(values.get()); - *ptr = att.get >(); - break; - } - case DT::CLONG_DOUBLE: { - throw std::runtime_error( - "[ADIOS1] Unknown Attribute datatype (CLONG_DOUBLE)"); - break; - } - case DT::STRING: { - auto const &v = att.get(); - if (v.empty()) - { - error::throwOperationUnsupportedInBackend( - "ADIOS1", "Empty string attributes not supported."); - } - values = auxiliary::allocatePtr(Datatype::CHAR, v.length() + 1u); - strcpy((char *)values.get(), v.c_str()); - break; - } - case DT::VEC_CHAR: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - ptr[i] = vec[i]; - break; - } - case DT::VEC_SHORT: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - ptr[i] = vec[i]; - break; - } - case DT::VEC_INT: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - ptr[i] = vec[i]; - break; - } - case DT::VEC_LONG: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - ptr[i] = vec[i]; - break; - } - case DT::VEC_LONGLONG: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - ptr[i] = vec[i]; - break; - } - case DT::VEC_UCHAR: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - ptr[i] = vec[i]; - break; - } - case DT::VEC_SCHAR: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - ptr[i] = vec[i]; - break; - } - case DT::VEC_USHORT: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - ptr[i] = vec[i]; - break; - } - case DT::VEC_UINT: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - ptr[i] = vec[i]; - break; - } - case DT::VEC_ULONG: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - ptr[i] = vec[i]; - break; - } - case DT::VEC_ULONGLONG: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - ptr[i] = vec[i]; - break; - } - case DT::VEC_FLOAT: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - ptr[i] = vec[i]; - break; - } - case DT::VEC_DOUBLE: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - ptr[i] = vec[i]; - break; - } - case DT::VEC_LONG_DOUBLE: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - ptr[i] = vec[i]; - break; - } - /* not supported by ADIOS 1.13.1: - * https://github.com/ornladios/ADIOS/issues/212 - */ - case DT::VEC_CFLOAT: - case DT::VEC_CDOUBLE: - case DT::VEC_CLONG_DOUBLE: { - throw std::runtime_error( - "[ADIOS1] Arrays of complex attributes are not supported"); - break; - } - case DT::VEC_STRING: { - auto ptr = reinterpret_cast(values.get()); - auto const &vec = att.get >(); - for (size_t i = 0; i < vec.size(); ++i) - { - if (vec[i].empty()) - { - error::throwOperationUnsupportedInBackend( - "ADIOS1", "Empty string attributes not supported."); - } - size_t size = vec[i].size() + 1; - ptr[i] = new char[size]; - strncpy(ptr[i], vec[i].c_str(), size); - } - break; - } - case DT::ARR_DBL_7: { - auto ptr = reinterpret_cast(values.get()); - auto const &arr = att.get >(); - for (size_t i = 0; i < 7; ++i) - ptr[i] = arr[i]; - break; - } - case DT::BOOL: { - auto ptr = reinterpret_cast(values.get()); - *ptr = static_cast(att.get()); - break; - } - case DT::UNDEFINED: - throw std::runtime_error( - "[ADIOS1] Unknown Attribute datatype (ADIOS1 Attribute flush)"); - default: - throw std::runtime_error( - "[ADIOS1] Datatype not implemented in ADIOS IO"); - } - - int status; - status = adios_define_attribute_byvalue( - group, - name.c_str(), - "", - getBP1DataType(att.dtype), - nelems, - values.get()); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to define ADIOS attribute by value"); - - if (att.dtype == Datatype::VEC_STRING) - { - auto ptr = reinterpret_cast(values.get()); - for (int i = 0; i < nelems; ++i) - delete[] ptr[i]; - } -} - -template -void CommonADIOS1IOHandlerImpl::createFile( - Writable *writable, Parameter const ¶meters) -{ - if (access::readOnly(m_handler->m_backendAccess)) - throw std::runtime_error( - "[ADIOS1] Creating a file in read-only mode is not possible."); - - if (!writable->written) - { - if (!auxiliary::directory_exists(m_handler->directory)) - { - bool success = auxiliary::create_directories(m_handler->directory); - VERIFY( - success, - "[ADIOS1] Internal error: Failed to create directories during " - "ADIOS file creation"); - } - - std::string name = m_handler->directory + parameters.name; - if (!auxiliary::ends_with(name, ".bp")) - name += ".bp"; - - if (m_handler->m_backendAccess == Access::APPEND && - auxiliary::file_exists(name)) - { - error::throwOperationUnsupportedInBackend( - "ADIOS1", - "Appending to existing file on disk (use Access::CREATE to " - "overwrite)"); - } - - writable->written = true; - writable->abstractFilePosition = - std::make_shared("/"); - - m_filePaths[writable] = std::make_shared(name); - - /* our control flow allows for more than one open file handle - * if multiple files are opened with the same group, data might be lost - */ - - /* defer actually opening the file handle until the first - * Operation::WRITE_DATASET occurs */ - m_existsOnDisk[m_filePaths[writable]] = false; - - GetFileHandle(writable); - } -} - -template -void CommonADIOS1IOHandlerImpl::checkFile( - Writable *, Parameter ¶meter) -{ - *parameter.fileExists = - Parameter::FileExists::DontKnow; -} - -template -void CommonADIOS1IOHandlerImpl::createPath( - Writable *writable, Parameter const ¶meters) -{ - if (access::readOnly(m_handler->m_backendAccess)) - throw std::runtime_error( - "[ADIOS1] Creating a path in a file opened as read only is not " - "possible."); - - if (!writable->written) - { - /* Sanitize path */ - std::string path = parameters.path; - if (auxiliary::starts_with(path, '/')) - path = auxiliary::replace_first(path, "/", ""); - if (!auxiliary::ends_with(path, '/')) - path += '/'; - - /* ADIOS has no concept for explicitly creating paths. - * They are implicitly created with the paths of variables/attributes. - */ - - writable->written = true; - writable->abstractFilePosition = - std::make_shared(path); - - Writable *position; - if (writable->parent) - position = writable->parent; - else - position = writable; /* root does not have a parent but might still - have to be written */ - auto res = m_filePaths.find(position); - - m_filePaths[writable] = res->second; - } -} - -static std::optional datasetTransform(json::TracingJSON config) -{ - using ret_t = std::optional; - if (!config.json().contains("dataset")) - { - return ret_t{}; - } - config = config["dataset"]; - if (!config.json().contains("transform")) - { - return ret_t{}; - } - config = config["transform"]; - auto maybeRes = json::asStringDynamic(config.json()); - if (maybeRes.has_value()) - { - return std::move(maybeRes.value()); - } - else - { - error::throwBackendConfigSchema( - {"adios1", "dataset", "transform"}, - "Key must convertible to type string."); - } -} - -template -void CommonADIOS1IOHandlerImpl::createDataset( - Writable *writable, Parameter const ¶meters) -{ - if (access::readOnly(m_handler->m_backendAccess)) - throw std::runtime_error( - "[ADIOS1] Creating a dataset in a file opened as read only is not " - "possible."); - - if (!writable->written) - { - /* ADIOS variable definitions require the file to be (re-)opened to take - * effect/not cause errors */ - auto res = m_filePaths.find(writable->parent); - - int64_t group = m_groups[res->second]; - - /* Sanitize name */ - std::string name = parameters.name; - if (auxiliary::starts_with(name, '/')) - name = auxiliary::replace_first(name, "/", ""); - if (auxiliary::ends_with(name, '/')) - name = auxiliary::replace_last(name, "/", ""); - - std::string path = concrete_bp1_file_position(writable) + name; - - size_t ndims = parameters.extent.size(); - - std::vector chunkSize(ndims, ""); - std::vector chunkOffset(ndims, ""); - int64_t id; - for (size_t i = 0; i < ndims; ++i) - { - chunkSize[i] = "/tmp" + path + "_chunkSize" + std::to_string(i); - id = adios_define_var( - group, - chunkSize[i].c_str(), - "", - adios_unsigned_long, - "", - "", - ""); - VERIFY( - id != 0, - "[ADIOS1] Internal error: Failed to define ADIOS variable " - "during Dataset creation"); - chunkOffset[i] = "/tmp" + path + "_chunkOffset" + std::to_string(i); - id = adios_define_var( - group, - chunkOffset[i].c_str(), - "", - adios_unsigned_long, - "", - "", - ""); - VERIFY( - id != 0, - "[ADIOS1] Internal error: Failed to define ADIOS variable " - "during Dataset creation"); - } - - std::string chunkSizeParam = auxiliary::join(chunkSize, ","); - std::string globalSize = getBP1Extent(parameters.extent); - std::string chunkOffsetParam = auxiliary::join(chunkOffset, ","); - id = adios_define_var( - group, - path.c_str(), - "", - getBP1DataType(parameters.dtype), - chunkSizeParam.c_str(), - globalSize.c_str(), - chunkOffsetParam.c_str()); - VERIFY( - id != 0, - "[ADIOS1] Internal error: Failed to define ADIOS variable during " - "Dataset creation"); - - std::string transform = ""; - { - json::TracingJSON options = json::parseOptions( - parameters.options, /* considerFiles = */ false); - if (options.json().contains("adios1")) - { - options = options["adios1"]; - auto maybeTransform = datasetTransform(options); - if (maybeTransform.has_value()) - { - transform = maybeTransform.value(); - } - - parameters.warnUnusedParameters( - options, - "ADIOS1", - "Warning: parts of the backend configuration for " - "ADIOS1 dataset '" + - name + "' remain unused:\n"); - } - } - // Fallback: global option - if (transform.empty()) - { - transform = m_defaultTransform; - } - - if (!transform.empty()) - { - int status; - status = adios_set_transform(id, transform.c_str()); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to set ADIOS transform during " - "Dataset creation"); - } - - writable->written = true; - writable->abstractFilePosition = - std::make_shared(name); - - m_filePaths[writable] = res->second; - } -} - -template -void CommonADIOS1IOHandlerImpl::extendDataset( - Writable *, Parameter const &) -{ - throw std::runtime_error( - "[ADIOS1] Dataset extension not implemented in ADIOS backend"); -} - -template -void CommonADIOS1IOHandlerImpl::openFile( - Writable *writable, Parameter ¶meters) -{ - if (!auxiliary::directory_exists(m_handler->directory)) - error::throwReadError( - error::AffectedObject::File, - error::Reason::Inaccessible, - "ADIOS1", - "Supplied directory is not valid: " + m_handler->directory); - - std::string name = m_handler->directory + parameters.name; - if (!auxiliary::ends_with(name, ".bp")) - name += ".bp"; - - std::shared_ptr filePath; - auto it = std::find_if( - m_filePaths.begin(), - m_filePaths.end(), - [name](std::unordered_map >:: - value_type const &entry) { return *entry.second == name; }); - if (it == m_filePaths.end()) - filePath = std::make_shared(name); - else - filePath = it->second; - - if (m_handler->m_backendAccess == Access::CREATE) - { - // called at Series::flush for iterations that has been flushed before - // this is to make sure to point the Series.m_writer points to this - // iteration so when call Series.flushAttribute(), the attributes can be - // flushed to the iteration level file. - m_filePaths[writable] = filePath; - writable->written = true; - writable->abstractFilePosition = - std::make_shared("/"); - return; - } - /* close the handle that corresponds to the file we want to open */ - if (m_openWriteFileHandles.find(filePath) != m_openWriteFileHandles.end()) - { - close(m_openWriteFileHandles[filePath]); - m_openWriteFileHandles.erase(filePath); - } - - if (m_groups.find(filePath) == m_groups.end()) - m_groups[filePath] = - static_cast(this)->initialize_group(name); - - if (m_openReadFileHandles.find(filePath) == m_openReadFileHandles.end()) - { - ADIOS_FILE *f = static_cast(this)->open_read(name); - m_openReadFileHandles[filePath] = f; - } - - writable->written = true; - writable->abstractFilePosition = std::make_shared("/"); - - m_filePaths[writable] = filePath; - m_existsOnDisk[filePath] = true; -} - -template -void CommonADIOS1IOHandlerImpl::closeFile( - Writable *writable, Parameter const &) -{ - auto myFile = m_filePaths.find(writable); - if (myFile == m_filePaths.end()) - { - return; - } - - // finish write operations - auto myGroup = m_groups.find(myFile->second); - if (myGroup != m_groups.end()) - { - auto attributeWrites = m_attributeWrites.find(myGroup->second); - if (access::write(this->m_handler->m_backendAccess) && - attributeWrites != m_attributeWrites.end()) - { - for (auto &att : attributeWrites->second) - { - flush_attribute(myGroup->second, att.first, att.second); - } - m_attributeWrites.erase(attributeWrites); - } - m_groups.erase(myGroup); - } - - auto handle_write = m_openWriteFileHandles.find(myFile->second); - if (handle_write != m_openWriteFileHandles.end()) - { - close(handle_write->second); - m_openWriteFileHandles.erase(handle_write); - } - - // finish read operations - auto handle_read = m_openReadFileHandles.find(myFile->second); - if (handle_read != m_openReadFileHandles.end()) - { - auto scheduled = m_scheduledReads.find(handle_read->second); - if (scheduled != m_scheduledReads.end()) - { - auto status = adios_perform_reads(scheduled->first, 1); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to perform ADIOS reads during " - "dataset reading"); - - for (auto &sel : scheduled->second) - adios_selection_delete(sel.selection); - m_scheduledReads.erase(scheduled); - } - close(handle_read->second); - m_openReadFileHandles.erase(handle_read); - } - m_existsOnDisk.erase(myFile->second); - m_filePaths.erase(myFile); -} - -template -void CommonADIOS1IOHandlerImpl::availableChunks( - Writable *writable, Parameter ¶ms) -{ - ADIOS_FILE *f; - f = m_openReadFileHandles.at(m_filePaths.at(writable)); - std::string name = concrete_bp1_file_position(writable); - VERIFY( - std::strcmp(f->path, m_filePaths.at(writable)->c_str()) == 0, - "[ADIOS1] Internal Error: Invalid ADIOS read file handle"); - ADIOS_VARINFO *varinfo = adios_inq_var(f, name.c_str()); - VERIFY( - adios_errno == err_no_error, - "[ADIOS1] Internal error: Failed to inquire ADIOS variable while " - "querying available chunks."); - int err = adios_inq_var_blockinfo(f, varinfo); - VERIFY( - err == 0, - "[ADIOS1] Internal error: Failed to obtain ADIOS varinfo while " - "querying available chunks."); - int nblocks = varinfo->nblocks[0]; // we don't use steps, so index 0 is fine - int ndim = varinfo->ndim; - auto &table = *params.chunks; - table.reserve(nblocks); - for (int block = 0; block < nblocks; ++block) - { - ADIOS_VARBLOCK &varblock = varinfo->blockinfo[block]; - Offset offset(ndim); - Extent extent(ndim); - for (int i = 0; i < ndim; ++i) - { - offset[i] = varblock.start[i]; - extent[i] = varblock.count[i]; - } - table.emplace_back(offset, extent, int(varblock.process_id)); - } - adios_free_varinfo(varinfo); -} - -template -void CommonADIOS1IOHandlerImpl::openPath( - Writable *writable, Parameter const ¶meters) -{ - /* Sanitize path */ - std::string path = parameters.path; - if (!path.empty()) - { - if (auxiliary::starts_with(path, '/')) - path = auxiliary::replace_first(path, "/", ""); - if (!auxiliary::ends_with(path, '/')) - path += '/'; - } - - writable->written = true; - writable->abstractFilePosition = std::make_shared(path); - - auto res = writable->parent ? m_filePaths.find(writable->parent) - : m_filePaths.find(writable); - - m_filePaths[writable] = res->second; -} - -template -void CommonADIOS1IOHandlerImpl::openDataset( - Writable *writable, Parameter ¶meters) -{ - ADIOS_FILE *f; - auto res = m_filePaths.find(writable->parent); - f = m_openReadFileHandles.at(res->second); - - /* Sanitize name */ - std::string name = parameters.name; - if (auxiliary::starts_with(name, '/')) - name = auxiliary::replace_first(name, "/", ""); - - std::string datasetname = writable->abstractFilePosition - ? concrete_bp1_file_position(writable) - : concrete_bp1_file_position(writable) + name; - - ADIOS_VARINFO *vi; - vi = adios_inq_var(f, datasetname.c_str()); - std::string error_string("[ADIOS1] Internal error: "); - error_string.append("Failed to inquire about ADIOS variable '") - .append(datasetname) - .append("' during dataset opening"); - VERIFY(adios_errno == err_no_error, error_string); - VERIFY(vi != nullptr, error_string); - - Datatype dtype; - - // note the ill-named fixed-byte adios_... types - // https://github.com/ornladios/ADIOS/issues/187 - switch (vi->type) - { - using DT = Datatype; - case adios_byte: - dtype = DT::CHAR; - break; - case adios_short: - if (sizeof(short) == 2u) - dtype = DT::SHORT; - else if (sizeof(int) == 2u) - dtype = DT::INT; - else if (sizeof(long) == 2u) - dtype = DT::LONG; - else if (sizeof(long long) == 2u) - dtype = DT::LONGLONG; - else - error::throwOperationUnsupportedInBackend( - "ADIOS1", - "No native equivalent for Datatype adios_short found."); - break; - case adios_integer: - if (sizeof(short) == 4u) - dtype = DT::SHORT; - else if (sizeof(int) == 4u) - dtype = DT::INT; - else if (sizeof(long) == 4u) - dtype = DT::LONG; - else if (sizeof(long long) == 4u) - dtype = DT::LONGLONG; - else - error::throwOperationUnsupportedInBackend( - "ADIOS1", - "No native equivalent for Datatype adios_integer found."); - break; - case adios_long: - if (sizeof(short) == 8u) - dtype = DT::SHORT; - else if (sizeof(int) == 8u) - dtype = DT::INT; - else if (sizeof(long) == 8u) - dtype = DT::LONG; - else if (sizeof(long long) == 8u) - dtype = DT::LONGLONG; - else - error::throwOperationUnsupportedInBackend( - "ADIOS1", - "No native equivalent for Datatype adios_long found."); - break; - case adios_unsigned_byte: - dtype = DT::UCHAR; - break; - case adios_unsigned_short: - if (sizeof(unsigned short) == 2u) - dtype = DT::USHORT; - else if (sizeof(unsigned int) == 2u) - dtype = DT::UINT; - else if (sizeof(unsigned long) == 2u) - dtype = DT::ULONG; - else if (sizeof(unsigned long long) == 2u) - dtype = DT::ULONGLONG; - else - error::throwOperationUnsupportedInBackend( - "ADIOS1", - "No native equivalent for Datatype adios_unsigned_short " - "found."); - break; - case adios_unsigned_integer: - if (sizeof(unsigned short) == 4u) - dtype = DT::USHORT; - else if (sizeof(unsigned int) == 4u) - dtype = DT::UINT; - else if (sizeof(unsigned long) == 4u) - dtype = DT::ULONG; - else if (sizeof(unsigned long long) == 4u) - dtype = DT::ULONGLONG; - else - error::throwOperationUnsupportedInBackend( - "ADIOS1", - "No native equivalent for Datatype adios_unsigned_integer " - "found."); - break; - case adios_unsigned_long: - if (sizeof(unsigned short) == 8u) - dtype = DT::USHORT; - else if (sizeof(unsigned int) == 8u) - dtype = DT::UINT; - else if (sizeof(unsigned long) == 8u) - dtype = DT::ULONG; - else if (sizeof(unsigned long long) == 8u) - dtype = DT::ULONGLONG; - else - error::throwOperationUnsupportedInBackend( - "ADIOS1", - "No native equivalent for Datatype adios_unsigned_long found."); - break; - case adios_real: - dtype = DT::FLOAT; - break; - case adios_double: - dtype = DT::DOUBLE; - break; - case adios_long_double: - dtype = DT::LONG_DOUBLE; - break; - case adios_complex: - dtype = DT::CFLOAT; - break; - case adios_double_complex: - dtype = DT::CDOUBLE; - break; - - case adios_string: - case adios_string_array: - default: - error::throwOperationUnsupportedInBackend( - "ADIOS1", - "[ADIOS1] Datatype not implemented for ADIOS dataset writing"); - } - *parameters.dtype = dtype; - - Extent e; - e.resize(vi->ndim); - for (int i = 0; i < vi->ndim; ++i) - e[i] = vi->dims[i]; - *parameters.extent = e; - - writable->written = true; - if (!writable->abstractFilePosition) - { - writable->abstractFilePosition = - std::make_shared(name); - } - - m_openReadFileHandles[res->second] = f; - m_filePaths[writable] = res->second; -} - -template -void CommonADIOS1IOHandlerImpl::deleteFile( - Writable *writable, Parameter const ¶meters) -{ - if (access::readOnly(m_handler->m_backendAccess)) - throw std::runtime_error( - "[ADIOS1] Deleting a file opened as read only is not possible."); - - if (writable->written) - { - auto path = m_filePaths.at(writable); - if (m_openReadFileHandles.find(path) != m_openReadFileHandles.end()) - { - close(m_openReadFileHandles.at(path)); - m_openReadFileHandles.erase(path); - } - if (m_openWriteFileHandles.find(path) != m_openWriteFileHandles.end()) - { - close(m_openWriteFileHandles.at(path)); - m_openWriteFileHandles.erase(path); - } - - std::string name = m_handler->directory + parameters.name; - if (!auxiliary::ends_with(name, ".bp")) - name += ".bp"; - - if (!auxiliary::file_exists(name)) - throw std::runtime_error("[ADIOS1] File does not exist: " + name); - - auxiliary::remove_file(name); - - writable->written = false; - writable->abstractFilePosition.reset(); - - m_filePaths.erase(writable); - } -} - -template -void CommonADIOS1IOHandlerImpl::deletePath( - Writable *, Parameter const &) -{ - throw std::runtime_error( - "[ADIOS1] Path deletion not implemented in ADIOS backend"); -} - -template -void CommonADIOS1IOHandlerImpl::deleteDataset( - Writable *, Parameter const &) -{ - throw std::runtime_error( - "[ADIOS1] Dataset deletion not implemented in ADIOS backend"); -} - -template -void CommonADIOS1IOHandlerImpl::deleteAttribute( - Writable *, Parameter const &) -{ - throw std::runtime_error( - "[ADIOS1] Attribute deletion not implemented in ADIOS backend"); -} - -template -int64_t CommonADIOS1IOHandlerImpl::GetFileHandle(Writable *writable) -{ - auto res = m_filePaths.find(writable); - if (res == m_filePaths.end()) - res = m_filePaths.find(writable->parent); - int64_t fd; - - if (m_openWriteFileHandles.find(res->second) == - m_openWriteFileHandles.end()) - { - std::string name = *(res->second); - m_groups[m_filePaths[writable]] = - static_cast(this)->initialize_group(name); - - fd = static_cast(this)->open_write(writable); - m_openWriteFileHandles[res->second] = fd; - } - else - fd = m_openWriteFileHandles.at(res->second); - - return fd; -} - -template -void CommonADIOS1IOHandlerImpl::writeDataset( - Writable *writable, Parameter ¶meters) -{ - if (access::readOnly(m_handler->m_backendAccess)) - throw std::runtime_error( - "[ADIOS1] Writing into a dataset in a file opened as read-only is " - "not possible."); - - int64_t fd = GetFileHandle(writable); - - std::string name = concrete_bp1_file_position(writable); - - size_t ndims = parameters.extent.size(); - - std::string chunkSize; - std::string chunkOffset; - int status; - for (size_t i = 0; i < ndims; ++i) - { - chunkSize = "/tmp" + name + "_chunkSize" + std::to_string(i); - status = adios_write(fd, chunkSize.c_str(), ¶meters.extent[i]); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to write ADIOS variable during " - "Dataset writing"); - chunkOffset = "/tmp" + name + "_chunkOffset" + std::to_string(i); - status = adios_write(fd, chunkOffset.c_str(), ¶meters.offset[i]); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to write ADIOS variable during " - "Dataset writing"); - } - - status = adios_write(fd, name.c_str(), parameters.data.get()); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to write ADIOS variable during " - "Dataset writing"); -} - -template -void CommonADIOS1IOHandlerImpl::writeAttribute( - Writable *writable, Parameter const ¶meters) -{ - if (parameters.changesOverSteps) - { - // cannot do this - return; - } - if (access::readOnly(m_handler->m_backendAccess)) - throw std::runtime_error( - "[ADIOS1] Writing an attribute in a file opened as read only is " - "not possible."); - - std::string name = concrete_bp1_file_position(writable); - if (!auxiliary::ends_with(name, '/')) - name += '/'; - name += parameters.name; - - auto res = m_filePaths.find(writable); - if (res == m_filePaths.end()) - res = m_filePaths.find(writable->parent); - GetFileHandle(writable); - - int64_t group = m_groups[res->second]; - - auto &attributes = m_attributeWrites[group]; - attributes.erase(name); - attributes.emplace(name, parameters.resource); -} - -template -void CommonADIOS1IOHandlerImpl::readDataset( - Writable *writable, Parameter ¶meters) -{ - switch (parameters.dtype) - { - using DT = Datatype; - case DT::DOUBLE: - case DT::FLOAT: - case DT::CDOUBLE: - case DT::CFLOAT: - case DT::SHORT: - case DT::INT: - case DT::LONG: - case DT::LONGLONG: - case DT::USHORT: - case DT::UINT: - case DT::ULONG: - case DT::ULONGLONG: - case DT::CHAR: - case DT::UCHAR: - case DT::SCHAR: - case DT::BOOL: - break; - case DT::UNDEFINED: - throw std::runtime_error( - "[ADIOS1] Unknown Attribute datatype (ADIOS1 Dataset read)"); - default: - throw std::runtime_error( - "[ADIOS1] Datatype not implemented in ADIOS1 IO"); - } - - ADIOS_FILE *f; - f = m_openReadFileHandles.at(m_filePaths.at(writable)); - VERIFY( - std::strcmp(f->path, m_filePaths.at(writable)->c_str()) == 0, - "[ADIOS1] Internal Error: Invalid ADIOS read file handle"); - - ADIOS_SELECTION *sel; - sel = adios_selection_boundingbox( - parameters.extent.size(), - parameters.offset.data(), - parameters.extent.data()); - VERIFY( - sel != nullptr, - "[ADIOS1] Internal error: Failed to select ADIOS bounding box during " - "dataset reading"); - VERIFY( - adios_errno == err_no_error, - "[ADIOS1] Internal error: Failed to select ADIOS bounding box during " - "dataset reading"); - - std::string varname = concrete_bp1_file_position(writable); - void *data = parameters.data.get(); - - int status; - status = adios_schedule_read(f, sel, varname.c_str(), 0, 1, data); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to schedule ADIOS read during dataset " - "reading"); - VERIFY( - adios_errno == err_no_error, - "[ADIOS1] Internal error: Failed to schedule ADIOS read during dataset " - "reading"); - - m_scheduledReads[f].push_back({sel, parameters.data}); -} - -template -void CommonADIOS1IOHandlerImpl::readAttribute( - Writable *writable, Parameter ¶meters) -{ - if (!writable->written) - throw std::runtime_error( - "[ADIOS1] Internal error: Writable not marked written during " - "attribute reading"); - - ADIOS_FILE *f; - f = m_openReadFileHandles.at(m_filePaths.at(writable)); - - std::string attrname = concrete_bp1_file_position(writable); - if (!auxiliary::ends_with(attrname, '/')) - attrname += "/"; - attrname += parameters.name; - - ADIOS_DATATYPES datatype = adios_unknown; - int size = 0; - void *data = nullptr; - - int status; - status = adios_get_attr(f, attrname.c_str(), &datatype, &size, &data); - if (status != 0) - { - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::NotFound, - "ADIOS1", - attrname); - } - if (datatype == adios_unknown) - { - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::UnexpectedContent, - "ADIOS1", - "Unknown datatype: " + attrname); - } - - // size is returned in number of allocated bytes - // note the ill-named fixed-byte adios_... types - // https://github.com/ornladios/ADIOS/issues/187 - switch (datatype) - { - case adios_byte: - break; - case adios_short: - size /= 2; - break; - case adios_integer: - size /= 4; - break; - case adios_long: - size /= 8; - break; - case adios_unsigned_byte: - break; - case adios_unsigned_short: - size /= 2; - break; - case adios_unsigned_integer: - size /= 4; - break; - case adios_unsigned_long: - size /= 8; - break; - case adios_real: - size /= 4; - break; - case adios_double: - size /= 8; - break; - case adios_long_double: - size /= sizeof(long double); - break; - case adios_complex: - size /= 8; - break; - case adios_double_complex: - size /= 16; - break; - case adios_string: - break; - case adios_string_array: - size /= sizeof(char *); - break; - - default: - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::UnexpectedContent, - "ADIOS1", - "Unsupported datatype: " + attrname); - } - - Datatype dtype; - Attribute a(0); - if (size == 1) - { - switch (datatype) - { - using DT = Datatype; - case adios_byte: - dtype = DT::CHAR; - a = Attribute(*reinterpret_cast(data)); - break; - case adios_short: - if (sizeof(short) == 2u) - { - dtype = DT::SHORT; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(int) == 2u) - { - dtype = DT::INT; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(long) == 2u) - { - dtype = DT::LONG; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(long long) == 2u) - { - dtype = DT::LONGLONG; - a = Attribute(*reinterpret_cast(data)); - } - else - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::Other, - "ADIOS1", - "No native equivalent found for type adios_short: " + - attrname); - break; - case adios_integer: - if (sizeof(short) == 4u) - { - dtype = DT::SHORT; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(int) == 4u) - { - dtype = DT::INT; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(long) == 4u) - { - dtype = DT::LONG; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(long long) == 4u) - { - dtype = DT::LONGLONG; - a = Attribute(*reinterpret_cast(data)); - } - else - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::Other, - "ADIOS1", - "No native equivalent found for type adios_integer: " + - attrname); - break; - case adios_long: - if (sizeof(short) == 8u) - { - dtype = DT::SHORT; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(int) == 8u) - { - dtype = DT::INT; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(long) == 8u) - { - dtype = DT::LONG; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(long long) == 8u) - { - dtype = DT::LONGLONG; - a = Attribute(*reinterpret_cast(data)); - } - else - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::Other, - "ADIOS1", - "No native equivalent found for type adios_long: " + - attrname); - break; - case adios_unsigned_byte: - dtype = DT::UCHAR; - a = Attribute(*reinterpret_cast(data)); - break; - case adios_unsigned_short: - if (sizeof(unsigned short) == 2u) - { - dtype = DT::USHORT; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(unsigned int) == 2u) - { - dtype = DT::UINT; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(unsigned long) == 2u) - { - dtype = DT::ULONG; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(unsigned long long) == 2u) - { - dtype = DT::ULONGLONG; - a = Attribute(*reinterpret_cast(data)); - } - else - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::Other, - "ADIOS1", - "No native equivalent found for type " - "adios_unsigned_short: " + - attrname); - break; - case adios_unsigned_integer: - if (sizeof(unsigned short) == 4u) - { - dtype = DT::USHORT; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(unsigned int) == 4u) - { - dtype = DT::UINT; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(unsigned long) == 4u) - { - dtype = DT::ULONG; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(unsigned long long) == 4u) - { - dtype = DT::ULONGLONG; - a = Attribute(*reinterpret_cast(data)); - } - else - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::Other, - "ADIOS1", - "No native equivalent found for type " - "adios_unsigned_integer: " + - attrname); - break; - case adios_unsigned_long: - if (sizeof(unsigned short) == 8u) - { - dtype = DT::USHORT; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(unsigned int) == 8u) - { - dtype = DT::UINT; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(unsigned long) == 8u) - { - dtype = DT::ULONG; - a = Attribute(*reinterpret_cast(data)); - } - else if (sizeof(unsigned long long) == 8u) - { - dtype = DT::ULONGLONG; - a = Attribute(*reinterpret_cast(data)); - } - else - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::Other, - "ADIOS1", - "No native equivalent found for type " - "adios_unsigned_long: " + - attrname); - break; - case adios_real: - dtype = DT::FLOAT; - a = Attribute(*reinterpret_cast(data)); - break; - case adios_double: - dtype = DT::DOUBLE; - a = Attribute(*reinterpret_cast(data)); - break; - case adios_long_double: - dtype = DT::LONG_DOUBLE; - a = Attribute(*reinterpret_cast(data)); - break; - case adios_complex: - dtype = DT::CFLOAT; - a = Attribute(*reinterpret_cast *>(data)); - break; - case adios_double_complex: - dtype = DT::CDOUBLE; - a = Attribute(*reinterpret_cast *>(data)); - break; - case adios_string: { - dtype = DT::STRING; - auto c = reinterpret_cast(data); - a = Attribute( - auxiliary::strip(std::string(c, std::strlen(c)), {'\0'})); - break; - } - case adios_string_array: { - dtype = DT::VEC_STRING; - auto c = reinterpret_cast(data); - std::vector vs; - vs.resize(size); - for (int i = 0; i < size; ++i) - { - vs[i] = auxiliary::strip( - std::string(c[i], std::strlen(c[i])), {'\0'}); - /** @todo pointer should be freed, but this causes memory - * corruption */ - // free(c[i]); - } - a = Attribute(vs); - break; - } - default: - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::Other, - "ADIOS1", - "Unsupported ADIOS1 attribute datatype '" + - std::to_string(datatype) + - "' in scalar branch: " + attrname); - } - } - else - { - switch (datatype) - { - using DT = Datatype; - case adios_byte: { - dtype = DT::VEC_CHAR; - auto c = reinterpret_cast(data); - std::vector vc; - vc.resize(size); - for (int i = 0; i < size; ++i) - vc[i] = c[i]; - a = Attribute(vc); - break; - } - case adios_short: { - if (sizeof(short) == 2u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_SHORT); - else if (sizeof(int) == 2u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), DT::VEC_INT); - else if (sizeof(long) == 2u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_LONG); - else if (sizeof(long long) == 2u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_LONGLONG); - else - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::Other, - "ADIOS1", - "No native equivalent found for type adios_short: " + - attrname); - break; - } - case adios_integer: { - if (sizeof(short) == 4u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_SHORT); - else if (sizeof(int) == 4u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), DT::VEC_INT); - else if (sizeof(long) == 4u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_LONG); - else if (sizeof(long long) == 4u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_LONGLONG); - else - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::Other, - "ADIOS1", - "No native equivalent found for type adios_integer: " + - attrname); - break; - } - case adios_long: { - if (sizeof(short) == 8u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_SHORT); - else if (sizeof(int) == 8u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), DT::VEC_INT); - else if (sizeof(long) == 8u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_LONG); - else if (sizeof(long long) == 8u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_LONGLONG); - else - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::Other, - "ADIOS1", - "No native equivalent found for type adios_long: " + - attrname); - break; - } - case adios_unsigned_byte: { - dtype = DT::VEC_UCHAR; - auto uc = reinterpret_cast(data); - std::vector vuc; - vuc.resize(size); - for (int i = 0; i < size; ++i) - vuc[i] = uc[i]; - a = Attribute(vuc); - break; - } - case adios_unsigned_short: { - if (sizeof(unsigned short) == 2u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_USHORT); - else if (sizeof(unsigned int) == 2u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_UINT); - else if (sizeof(unsigned long) == 2u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_ULONG); - else if (sizeof(unsigned long long) == 2u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_ULONGLONG); - else - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::Other, - "ADIOS1", - "No native equivalent found for type " - "adios_unsigned_short: " + - attrname); - break; - } - case adios_unsigned_integer: { - if (sizeof(unsigned short) == 4u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_USHORT); - else if (sizeof(unsigned int) == 4u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_UINT); - else if (sizeof(unsigned long) == 4u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_ULONG); - else if (sizeof(unsigned long long) == 4u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_ULONGLONG); - else - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::Other, - "ADIOS1", - "No native equivalent found for type " - "adios_unsigned_integer: " + - attrname); - break; - } - case adios_unsigned_long: { - if (sizeof(unsigned short) == 8u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_USHORT); - else if (sizeof(unsigned int) == 8u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_UINT); - else if (sizeof(unsigned long) == 8u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_ULONG); - else if (sizeof(unsigned long long) == 8u) - std::tie(a, dtype) = std::make_tuple( - readVectorAttributeInternal(data, size), - DT::VEC_ULONGLONG); - else - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::Other, - "ADIOS1", - "No native equivalent found for type " - "adios_unsigned_long: " + - attrname); - break; - } - case adios_real: { - dtype = DT::VEC_FLOAT; - auto f4 = reinterpret_cast(data); - std::vector vf; - vf.resize(size); - for (int i = 0; i < size; ++i) - vf[i] = f4[i]; - a = Attribute(vf); - break; - } - case adios_double: { - dtype = DT::VEC_DOUBLE; - auto d8 = reinterpret_cast(data); - std::vector vd; - vd.resize(size); - for (int i = 0; i < size; ++i) - vd[i] = d8[i]; - a = Attribute(vd); - break; - } - case adios_long_double: { - dtype = DT::VEC_LONG_DOUBLE; - auto ld = reinterpret_cast(data); - std::vector vld; - vld.resize(size); - for (int i = 0; i < size; ++i) - vld[i] = ld[i]; - a = Attribute(vld); - break; - } - /* not supported by ADIOS 1.13.1: VEC_CFLOAT, VEC_CDOUBLE, - * VEC_CLONG_DOUBLE https://github.com/ornladios/ADIOS/issues/212 - */ - case adios_string: { - dtype = DT::STRING; - a = Attribute(auxiliary::strip( - std::string(reinterpret_cast(data), size), {'\0'})); - break; - } - case adios_string_array: { - dtype = DT::VEC_STRING; - auto c = reinterpret_cast(data); - std::vector vs; - vs.resize(size); - for (int i = 0; i < size; ++i) - { - vs[i] = auxiliary::strip( - std::string(c[i], std::strlen(c[i])), {'\0'}); - /** @todo pointer should be freed, but this causes memory - * corruption */ - // free(c[i]); - } - a = Attribute(vs); - break; - } - - default: - error::throwReadError( - error::AffectedObject::Attribute, - error::Reason::Other, - "ADIOS1", - "Unsupported ADIOS1 attribute datatype '" + - std::to_string(datatype) + - "' in vector branch: " + attrname); - } - } - - free(data); - - *parameters.dtype = dtype; - *parameters.resource = a.getResource(); -} - -template -void CommonADIOS1IOHandlerImpl::listPaths( - Writable *writable, Parameter ¶meters) -{ - if (!writable->written) - throw std::runtime_error( - "[ADIOS1] Internal error: Writable not marked written during path " - "listing"); - - ADIOS_FILE *f; - f = m_openReadFileHandles.at(m_filePaths.at(writable)); - - std::string name = concrete_bp1_file_position(writable); - - std::unordered_set paths; - std::unordered_set variables; - for (int i = 0; i < f->nvars; ++i) - { - char *str = f->var_namelist[i]; - std::string s(str, std::strlen(str)); - if (auxiliary::starts_with(s, name)) - { - /* remove the writable's path from the name */ - s = auxiliary::replace_first(s, name, ""); - variables.emplace(s); - if (std::any_of( - s.begin(), s.end(), [](char c) { return c == '/'; })) - { - /* there are more path levels after the current writable */ - s = s.substr(0, s.find_first_of('/')); - paths.emplace(s); - } - } - } - for (int i = 0; i < f->nattrs; ++i) - { - char *str = f->attr_namelist[i]; - std::string s(str, std::strlen(str)); - if (auxiliary::starts_with(s, name)) - { - /* remove the writable's path from the name */ - s = auxiliary::replace_first(s, name, ""); - if (std::any_of( - s.begin(), s.end(), [](char c) { return c == '/'; })) - { - /* remove the attribute name */ - s = s.substr(0, s.find_last_of('/')); - if (!std::any_of( - variables.begin(), - variables.end(), - [&s](std::string const &var) { - return auxiliary::starts_with(var, s); - })) - { - /* this is either a group or a constant scalar */ - s = s.substr(0, s.find_first_of('/')); - paths.emplace(s); - } - } - } - } - - *parameters.paths = std::vector(paths.begin(), paths.end()); -} - -template -void CommonADIOS1IOHandlerImpl::listDatasets( - Writable *writable, Parameter ¶meters) -{ - if (!writable->written) - throw std::runtime_error( - "[ADIOS1] Internal error: Writable not marked written during " - "dataset listing"); - - ADIOS_FILE *f; - f = m_openReadFileHandles.at(m_filePaths.at(writable)); - - std::string name = concrete_bp1_file_position(writable); - - std::unordered_set paths; - for (int i = 0; i < f->nvars; ++i) - { - char *str = f->var_namelist[i]; - std::string s(str, std::strlen(str)); - if (auxiliary::starts_with(s, name)) - { - /* remove the writable's path from the name */ - s = auxiliary::replace_first(s, name, ""); - if (std::none_of( - s.begin(), s.end(), [](char c) { return c == '/'; })) - { - /* this is a dataset of the writable */ - paths.emplace(s); - } - } - } - - *parameters.datasets = std::vector(paths.begin(), paths.end()); -} - -template -void CommonADIOS1IOHandlerImpl::listAttributes( - Writable *writable, Parameter ¶meters) -{ - if (!writable->written) - throw std::runtime_error( - "[ADIOS1] Internal error: Writable not marked written during " - "attribute listing"); - - ADIOS_FILE *f; - f = m_openReadFileHandles.at(m_filePaths.at(writable)); - - std::string name = concrete_bp1_file_position(writable); - - if (!auxiliary::ends_with(name, '/')) - { - /* writable is a dataset and corresponds to an ADIOS variable */ - ADIOS_VARINFO *info; - info = adios_inq_var(f, name.c_str()); - VERIFY( - adios_errno == err_no_error, - "[ADIOS1] Internal error: Failed to inquire ADIOS variable during " - "attribute listing"); - VERIFY( - info != nullptr, - "[ADIOS1] Internal error: Failed to inquire ADIOS variable during " - "attribute listing"); - - name += '/'; - parameters.attributes->reserve(info->nattrs); - for (int i = 0; i < info->nattrs; ++i) - { - char *c = f->attr_namelist[info->attr_ids[i]]; - parameters.attributes->push_back(auxiliary::replace_first( - std::string(c, std::strlen(c)), name, "")); - } - - adios_free_varinfo(info); - } - else - { - /* there is no ADIOS variable associated with the writable */ - std::unordered_set attributes; - for (int i = 0; i < f->nattrs; ++i) - { - char *str = f->attr_namelist[i]; - std::string s(str, std::strlen(str)); - if (auxiliary::starts_with(s, name)) - { - /* remove the writable's path from the name */ - s = auxiliary::replace_first(s, name, ""); - if (std::none_of( - s.begin(), s.end(), [](char c) { return c == '/'; })) - { - /* this is an attribute of the writable */ - attributes.insert(s); - } - } - } - *parameters.attributes = - std::vector(attributes.begin(), attributes.end()); - } -} - -template -void CommonADIOS1IOHandlerImpl::deregister( - Writable *writable, Parameter const &) -{ - m_filePaths.erase(writable); -} - -template -void CommonADIOS1IOHandlerImpl::initJson(json::TracingJSON config) -{ - if (!config.json().contains("adios1")) - { - return; - } - auto maybeTransform = datasetTransform(config["adios1"]); - if (maybeTransform.has_value()) - { - m_defaultTransform = std::move(maybeTransform.value()); - } - auto shadow = config.invertShadow(); - if (shadow.size() > 0) - { - switch (config.originallySpecifiedAs) - { - case json::SupportedLanguages::JSON: - std::cerr << "Warning: parts of the JSON configuration for ADIOS1 " - "remain unused:\n" - << shadow << std::endl; - break; - case json::SupportedLanguages::TOML: { - auto asToml = json::jsonToToml(shadow); - std::cerr << "Warning: parts of the JSON configuration for ADIOS1 " - "remain unused:\n" - << asToml << std::endl; - break; - } - } - } -} - -/* - * Sic! - * The ADIOS1 IO Handler is built as two CMake targets: serial and parallel. - * One receives the definition openPMD_HAVE_MPI=0, the other receives - * openPMD_HAVE_MPI=1. - * So, if openPMD_HAVE_MPI is true, then we do not need to instantiate both - * the serial and the parallel handler down here, since the serial handler will - * be instantiated in another target. - */ -#if openPMD_HAVE_MPI -template class CommonADIOS1IOHandlerImpl; -#else -template class CommonADIOS1IOHandlerImpl; -#endif // openPMD_HAVE_MPI - -} // namespace openPMD -#endif // openPMD_HAVE_ADIOS1 diff --git a/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp b/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp deleted file mode 100644 index 396a9106d8..0000000000 --- a/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp +++ /dev/null @@ -1,511 +0,0 @@ -/* Copyright 2017-2021 Fabian Koller, Axel Huebl - * - * This file is part of openPMD-api. - * - * openPMD-api is free software: you can redistribute it and/or modify - * it under the terms of of either the GNU General Public License or - * the GNU Lesser General Public License as published by - * the Free Software Foundation, either version 3 of the License, or - * (at your option) any later version. - * - * openPMD-api is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License and the GNU Lesser General Public License - * for more details. - * - * You should have received a copy of the GNU General Public License - * and the GNU Lesser General Public License along with openPMD-api. - * If not, see . - */ -#include "openPMD/IO/ADIOS/ParallelADIOS1IOHandler.hpp" -#include "openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp" - -#if openPMD_HAVE_MPI && openPMD_HAVE_ADIOS1 -#include "openPMD/IO/IOTask.hpp" -#include -#include -#include -#include -#include -#include -#endif - -namespace openPMD -{ -#if openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI -#if openPMD_USE_VERIFY -#define VERIFY(CONDITION, TEXT) \ - { \ - if (!(CONDITION)) \ - throw std::runtime_error((TEXT)); \ - } -#else -#define VERIFY(CONDITION, TEXT) \ - do \ - { \ - (void)sizeof(CONDITION); \ - } while (0) -#endif - -ParallelADIOS1IOHandlerImpl::ParallelADIOS1IOHandlerImpl( - AbstractIOHandler *handler, json::TracingJSON json, MPI_Comm comm) - : Base_t{handler}, m_mpiInfo{MPI_INFO_NULL} -{ - int status = MPI_SUCCESS; - status = MPI_Comm_dup(comm, &m_mpiComm); - VERIFY( - status == MPI_SUCCESS, - "[ADIOS1] Internal error: Failed to duplicate MPI communicator"); - initJson(std::move(json)); -} - -ParallelADIOS1IOHandlerImpl::~ParallelADIOS1IOHandlerImpl() -{ - for (auto &f : m_openReadFileHandles) - close(f.second); - m_openReadFileHandles.clear(); - - if (this->m_handler->m_backendAccess != Access::READ_ONLY) - { - for (auto &group : m_attributeWrites) - for (auto &att : group.second) - flush_attribute(group.first, att.first, att.second); - - // unordered map caused the value of the same container - // stored with different orders in different processors. - // which caused trouble with close(), which is collective - // so I just sort by file name to force all processors close - // all the fids in the same order - std::map allFiles; - for (auto &f : m_openWriteFileHandles) - allFiles[*(f.first)] = f.second; - - for (auto const &p : allFiles) - { - auto const fid = p.second; - close(fid); - } - - m_openWriteFileHandles.clear(); - } - - int status; - MPI_Barrier(m_mpiComm); - status = adios_read_finalize_method(m_readMethod); - if (status != err_no_error) - std::cerr << "Internal error: Failed to finalize ADIOS reading method " - "(parallel)\n"; - - MPI_Barrier(m_mpiComm); - int rank = 0; - MPI_Comm_rank(m_mpiComm, &rank); - status = adios_finalize(rank); - if (status != err_no_error) - std::cerr << "Internal error: Failed to finalize ADIOS (parallel)\n"; - - MPI_Comm_free(&m_mpiComm); -} - -std::future ParallelADIOS1IOHandlerImpl::flush() -{ - using namespace auxiliary; - - auto handler = dynamic_cast(m_handler); - while (!handler->m_setup.empty()) - { - IOTask &i = handler->m_setup.front(); - try - { - switch (i.operation) - { - using O = Operation; - case O::CREATE_FILE: - createFile( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::CHECK_FILE: - checkFile( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::CREATE_PATH: - createPath( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::OPEN_PATH: - openPath( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::CREATE_DATASET: - createDataset( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::WRITE_ATT: - writeAttribute( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::OPEN_FILE: - openFile( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::KEEP_SYNCHRONOUS: - keepSynchronous( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::DEREGISTER: - deregister( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - default: - VERIFY( - false, - "[ADIOS1] Internal error: Wrong operation in ADIOS setup " - "queue"); - } - } - catch (...) - { - std::cerr << "[AbstractIOHandlerImpl] IO Task " - << internal::operationAsString(i.operation) - << " failed with exception. Clearing IO queue and " - "passing on the exception." - << std::endl; - while (!m_handler->m_work.empty()) - { - m_handler->m_work.pop(); - } - throw; - } - handler->m_setup.pop(); - } - - while (!handler->m_work.empty()) - { - IOTask &i = handler->m_work.front(); - try - { - switch (i.operation) - { - using O = Operation; - case O::EXTEND_DATASET: - extendDataset( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::CLOSE_PATH: - closePath( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::OPEN_DATASET: - openDataset( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::CLOSE_FILE: - closeFile( - i.writable, - *dynamic_cast *>( - i.parameter.get())); - break; - case O::DELETE_FILE: - deleteFile( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::DELETE_PATH: - deletePath( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::DELETE_DATASET: - deleteDataset( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::DELETE_ATT: - deleteAttribute( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::WRITE_DATASET: - writeDataset( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::READ_DATASET: - readDataset( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::GET_BUFFER_VIEW: - getBufferView( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::READ_ATT: - readAttribute( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::LIST_PATHS: - listPaths( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::LIST_DATASETS: - listDatasets( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::LIST_ATTS: - listAttributes( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::ADVANCE: - advance( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - case O::AVAILABLE_CHUNKS: - availableChunks( - i.writable, - deref_dynamic_cast>( - i.parameter.get())); - break; - default: - VERIFY( - false, - "[ADIOS1] Internal error: Wrong operation in ADIOS work " - "queue"); - } - } - catch (...) - { - std::cerr << "[AbstractIOHandlerImpl] IO Task " - << internal::operationAsString(i.operation) - << " failed with exception. Clearing IO queue and " - "passing on the exception." - << std::endl; - while (!m_handler->m_work.empty()) - { - m_handler->m_work.pop(); - } - throw; - } - handler->m_work.pop(); - } - - int status; - for (auto &file : m_scheduledReads) - { - status = adios_perform_reads(file.first, 1); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to perform ADIOS reads during " - "dataset reading"); - - for (auto &sel : file.second) - adios_selection_delete(sel.selection); - } - m_scheduledReads.clear(); - - return std::future(); -} - -void ParallelADIOS1IOHandlerImpl::init() -{ - int status; - status = adios_init_noxml(m_mpiComm); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to initialize ADIOS"); - - /** @todo ADIOS_READ_METHOD_BP_AGGREGATE */ - m_readMethod = ADIOS_READ_METHOD_BP; - status = adios_read_init_method(m_readMethod, m_mpiComm, ""); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to initialize ADIOS reading method"); -} - -ParallelADIOS1IOHandler::ParallelADIOS1IOHandler( - std::string path, Access at, json::TracingJSON json, MPI_Comm comm) - : AbstractIOHandler(std::move(path), at, comm) - , m_impl{new ParallelADIOS1IOHandlerImpl(this, std::move(json), comm)} -{ - m_impl->init(); -} - -ParallelADIOS1IOHandler::~ParallelADIOS1IOHandler() = default; - -std::future ParallelADIOS1IOHandler::flush(internal::ParsedFlushParams &) -{ - return m_impl->flush(); -} - -void ParallelADIOS1IOHandler::enqueue(IOTask const &i) -{ - switch (i.operation) - { - case Operation::CREATE_FILE: - case Operation::CHECK_FILE: - case Operation::CREATE_PATH: - case Operation::OPEN_PATH: - case Operation::CREATE_DATASET: - case Operation::OPEN_FILE: - case Operation::WRITE_ATT: - case Operation::KEEP_SYNCHRONOUS: - case Operation::DEREGISTER: - m_setup.push(i); - return; - default: - m_work.push(i); - return; - } -} - -int64_t ParallelADIOS1IOHandlerImpl::open_write(Writable *writable) -{ - auto res = m_filePaths.find(writable); - if (res == m_filePaths.end()) - res = m_filePaths.find(writable->parent); - - std::string mode; - if (m_existsOnDisk[res->second]) - { - mode = "u"; - /* close the handle that corresponds to the file we want to append to */ - if (m_openReadFileHandles.find(res->second) != - m_openReadFileHandles.end()) - { - close(m_openReadFileHandles[res->second]); - m_openReadFileHandles.erase(res->second); - } - } - else - { - mode = "w"; - m_existsOnDisk[res->second] = true; - } - - int64_t fd; - int status; - status = adios_open( - &fd, - res->second->c_str(), - res->second->c_str(), - mode.c_str(), - m_mpiComm); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to open_write ADIOS file"); - - return fd; -} - -ADIOS_FILE *ParallelADIOS1IOHandlerImpl::open_read(std::string const &name) -{ - ADIOS_FILE *f; - f = adios_read_open_file(name.c_str(), m_readMethod, m_mpiComm); - VERIFY( - adios_errno != err_file_not_found, - "[ADIOS1] Internal error: ADIOS file not found"); - VERIFY( - f != nullptr, - "[ADIOS1] Internal error: Failed to open_read ADIOS file"); - - return f; -} - -int64_t ParallelADIOS1IOHandlerImpl::initialize_group(std::string const &name) -{ - std::stringstream params; - params << "num_aggregators=" - << getEnvNum("OPENPMD_ADIOS_NUM_AGGREGATORS", "1") - << ";num_ost=" << getEnvNum("OPENPMD_ADIOS_NUM_OST", "0") - << ";have_metadata_file=" - << getEnvNum("OPENPMD_ADIOS_HAVE_METADATA_FILE", "1") - << ";verbose=2"; - std::string params_str = params.str(); // important: copy out of temporary! - - int status; - int64_t group; - ADIOS_STATISTICS_FLAG noStatistics = adios_stat_no; - status = adios_declare_group(&group, name.c_str(), "", noStatistics); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to declare ADIOS group"); - status = - adios_select_method(group, "MPI_AGGREGATE", params_str.c_str(), ""); - VERIFY( - status == err_no_error, - "[ADIOS1] Internal error: Failed to select ADIOS method"); - return group; -} - -#else // openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI -#if openPMD_HAVE_MPI -ParallelADIOS1IOHandler::ParallelADIOS1IOHandler( - std::string path, Access at, json::TracingJSON, MPI_Comm comm) - : AbstractIOHandler(std::move(path), at, comm) -{ - throw std::runtime_error("openPMD-api built without ADIOS1 support"); -} -#else -ParallelADIOS1IOHandler::ParallelADIOS1IOHandler( - std::string path, Access at, json::TracingJSON) - : AbstractIOHandler(std::move(path), at) -{ - throw std::runtime_error( - "openPMD-api built without parallel ADIOS1 support"); -} -#endif // openPMD_HAVE_MPI - -ParallelADIOS1IOHandler::~ParallelADIOS1IOHandler() = default; - -std::future ParallelADIOS1IOHandler::flush(internal::ParsedFlushParams &) -{ - return std::future(); -} - -#if openPMD_HAVE_ADIOS1 -void ParallelADIOS1IOHandler::enqueue(IOTask const &) -{} -#endif // openPMD_HAVE_ADIOS1 -#endif // openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI -} // namespace openPMD diff --git a/src/IO/AbstractIOHandlerHelper.cpp b/src/IO/AbstractIOHandlerHelper.cpp index 27efe722c1..c58528f568 100644 --- a/src/IO/AbstractIOHandlerHelper.cpp +++ b/src/IO/AbstractIOHandlerHelper.cpp @@ -23,9 +23,7 @@ #include "openPMD/config.hpp" #include "openPMD/Error.hpp" -#include "openPMD/IO/ADIOS/ADIOS1IOHandler.hpp" #include "openPMD/IO/ADIOS/ADIOS2IOHandler.hpp" -#include "openPMD/IO/ADIOS/ParallelADIOS1IOHandler.hpp" #include "openPMD/IO/DummyIOHandler.hpp" #include "openPMD/IO/HDF5/HDF5IOHandler.hpp" #include "openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp" @@ -62,22 +60,6 @@ namespace } throw "Unreachable"; } - - constexpr char const *adios1Deprecation = R"( -[Deprecation warning] - Development on the ADIOS1 IO library has ceased. - Support for ADIOS1 in the openPMD-api has been deprecated - and will be removed in a future version. - - Please consider switching to ADIOS2. - We recommend checking your ADIOS1 datasets for compatibility with ADIOS2. - Conversion of data from one backend to another may optionally be achieved - by using the `openpmd-pipe` tool.) - - Suppress this warning via `export OPENPMD_ADIOS_SUPPRESS_DEPRECATED_WARNING=1`.)"; - - constexpr char const *suppressAdios1DeprecationWarning = - "OPENPMD_ADIOS_SUPPRESS_DEPRECATED_WARNING"; } // namespace #if openPMD_HAVE_MPI @@ -97,18 +79,6 @@ std::unique_ptr createIOHandler( case Format::HDF5: return constructIOHandler( "HDF5", path, access, comm, std::move(options)); - case Format::ADIOS1: - if (auxiliary::getEnvNum(suppressAdios1DeprecationWarning, 0) == 0) - { - int rank; - MPI_Comm_rank(comm, &rank); - if (rank == 0) - { - std::cerr << adios1Deprecation << std::endl; - } - } - return constructIOHandler( - "ADIOS1", path, access, std::move(options), comm); case Format::ADIOS2_BP: return constructIOHandler( "ADIOS2", @@ -175,13 +145,6 @@ std::unique_ptr createIOHandler( case Format::HDF5: return constructIOHandler( "HDF5", path, access, std::move(options)); - case Format::ADIOS1: - if (auxiliary::getEnvNum(suppressAdios1DeprecationWarning, 0) == 0) - { - std::cerr << adios1Deprecation << std::endl; - } - return constructIOHandler( - "ADIOS1", path, access, std::move(options)); case Format::ADIOS2_BP: return constructIOHandler( "ADIOS2", diff --git a/src/Iteration.cpp b/src/Iteration.cpp index 26ab93940e..3e6560e4c9 100644 --- a/src/Iteration.cpp +++ b/src/Iteration.cpp @@ -219,20 +219,6 @@ void Iteration::flushFileBased( } else { - // operations for create mode - if ((IOHandler()->m_frontendAccess == Access::CREATE) && - ((IOHandler()->backendName() == "MPI_ADIOS1") || - (IOHandler()->backendName() == "ADIOS1"))) - { - Parameter fOpen; - fOpen.name = filename; - fOpen.encoding = IterationEncoding::fileBased; - IOHandler()->enqueue(IOTask(&s.writable(), fOpen)); - flush(flushParams); - - return; - } - // operations for read/read-write mode /* open file */ s.openIteration(i, *this); diff --git a/src/Series.cpp b/src/Series.cpp index 779ba97906..39c46f30ba 100644 --- a/src/Series.cpp +++ b/src/Series.cpp @@ -2155,7 +2155,6 @@ void Series::parseJsonOptions(TracingJSON &options, ParsedInput &input) { std::map const backendDescriptors{ {"hdf5", Format::HDF5}, - {"adios1", Format::ADIOS1}, {"adios2", Format::ADIOS2_BP}, {"json", Format::JSON}}; std::string backend; diff --git a/src/auxiliary/JSON.cpp b/src/auxiliary/JSON.cpp index f57cf7455a..c04e672ae6 100644 --- a/src/auxiliary/JSON.cpp +++ b/src/auxiliary/JSON.cpp @@ -501,7 +501,7 @@ std::optional asLowerCaseStringDynamic(nlohmann::json const &value) std::vector backendKeys() { - return {"adios1", "adios2", "json", "hdf5"}; + return {"adios2", "json", "hdf5"}; } void warnGlobalUnusedOptions(TracingJSON const &config) diff --git a/src/config.cpp b/src/config.cpp index b9e27be752..a44925287a 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -34,7 +34,7 @@ std::map openPMD::getVariants() {"mpi", bool(openPMD_HAVE_MPI)}, {"json", true}, {"hdf5", bool(openPMD_HAVE_HDF5)}, - {"adios1", bool(openPMD_HAVE_ADIOS1)}, + {"adios1", false}, {"adios2", bool(openPMD_HAVE_ADIOS2)}}; } @@ -42,7 +42,7 @@ std::vector openPMD::getFileExtensions() { std::vector fext; fext.emplace_back("json"); -#if openPMD_HAVE_ADIOS1 || openPMD_HAVE_ADIOS2 +#if openPMD_HAVE_ADIOS2 fext.emplace_back("bp"); #endif #if openPMD_HAVE_ADIOS2 diff --git a/test/CoreTest.cpp b/test/CoreTest.cpp index 496cb0a36f..f4ad2ce110 100644 --- a/test/CoreTest.cpp +++ b/test/CoreTest.cpp @@ -1095,25 +1095,6 @@ TEST_CASE("backend_via_json", "[core]") REQUIRE(auxiliary::directory_exists( "../samples/optionsViaJsonOverwritesAutomaticDetectionBp4.sst")); -#if openPMD_HAVE_ADIOS1 - setenv("OPENPMD_BP_BACKEND", "ADIOS1", 1); - { - /* - * ADIOS2 backend should be selected even if OPENPMD_BP_BACKEND is set - * as ADIOS1 - * JSON config overwrites environment variables - */ - Series series( - "../samples/optionsPreferJsonOverEnvVar.bp", - Access::CREATE, - R"({"backend": "ADIOS2"})"); - REQUIRE(series.backend() == "ADIOS2"); - } - // unset again - unsetenv("OPENPMD_BP_BACKEND"); - REQUIRE(auxiliary::directory_exists( - "../samples/optionsPreferJsonOverEnvVar.bp")); -#endif #endif std::string encodingFileBased = R"({"backend": "json", "iteration_encoding": "file_based"})"; @@ -1317,18 +1298,6 @@ TEST_CASE("DoConvert_single_value_to_vector", "[core]") TEST_CASE("unavailable_backend", "[core]") { -#if !openPMD_HAVE_ADIOS1 - { - auto fail = []() { - Series( - "unavailable.bp", Access::CREATE, R"({"backend": "ADIOS1"})"); - }; - REQUIRE_THROWS_WITH( - fail(), - "Wrong API usage: openPMD-api built without support for backend " - "'ADIOS1'."); - } -#endif #if !openPMD_HAVE_ADIOS2 { auto fail = []() { @@ -1341,7 +1310,7 @@ TEST_CASE("unavailable_backend", "[core]") "'ADIOS2'."); } #endif -#if !openPMD_HAVE_ADIOS1 && !openPMD_HAVE_ADIOS2 +#if !openPMD_HAVE_ADIOS2 { auto fail = []() { Series("unavailable.bp", Access::CREATE); }; REQUIRE_THROWS_WITH( diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index 5ace4a2cd4..b6621ea8f1 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -34,7 +34,7 @@ std::vector getBackends() // first component: backend file ending // second component: whether to test 128 bit values std::vector res; -#if openPMD_HAVE_ADIOS1 || openPMD_HAVE_ADIOS2 +#if openPMD_HAVE_ADIOS2 res.emplace_back("bp"); #endif #if openPMD_HAVE_HDF5 @@ -71,14 +71,6 @@ TEST_CASE("parallel_multi_series_test", "[parallel]") auto myBackends = getBackends(); - // this test demonstrates an ADIOS1 (upstream) bug, comment this section to - // trigger it - auto const rmEnd = std::remove_if( - myBackends.begin(), myBackends.end(), [](std::string const &beit) { - return beit == "bp" && determineFormat("test.bp") == Format::ADIOS1; - }); - myBackends.erase(rmEnd, myBackends.end()); - // have multiple serial series alive at the same time for (auto const sn : {1, 2, 3}) { @@ -385,8 +377,7 @@ TEST_CASE("no_parallel_hdf5", "[parallel][hdf5]") #endif -// this one works for both ADIOS1 and ADIOS2 -#if (openPMD_HAVE_ADIOS1 || openPMD_HAVE_ADIOS2) && openPMD_HAVE_MPI +#if openPMD_HAVE_ADIOS2 && openPMD_HAVE_MPI void available_chunks_test(std::string file_ending) { int r_mpi_rank{-1}, r_mpi_size{-1}; @@ -481,11 +472,6 @@ void extendDataset(std::string const &ext, std::string const &jsonConfig) std::iota(data2.begin(), data2.end(), 25); { Series write(filename, Access::CREATE, MPI_COMM_WORLD, jsonConfig); - if (ext == "bp" && write.backend() != "ADIOS2") - { - // dataset resizing unsupported in ADIOS1 - return; - } Dataset ds1{Datatype::INT, {mpi_size, 25}}; Dataset ds2{{mpi_size, 50}}; @@ -525,7 +511,7 @@ TEST_CASE("extend_dataset", "[parallel]") } #endif -#if openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI +#if openPMD_HAVE_ADIOS2 && openPMD_HAVE_MPI TEST_CASE("adios_write_test", "[parallel][adios]") { Series o = @@ -538,7 +524,7 @@ TEST_CASE("adios_write_test", "[parallel][adios]") auto mpi_size = static_cast(size); auto mpi_rank = static_cast(rank); - o.setAuthor("Parallel ADIOS1"); + o.setAuthor("Parallel ADIOS2"); ParticleSpecies &e = o.iterations[1].particles["e"]; std::vector position_global(mpi_size); @@ -587,7 +573,7 @@ TEST_CASE("adios_write_test_skip_declare", "[parallel][adios]") write_test_zero_extent(true, "bp", false, false); } -TEST_CASE("hzdr_adios_sample_content_test", "[parallel][adios1]") +TEST_CASE("hzdr_adios_sample_content_test", "[parallel][adios2][bp3]") { int mpi_rank{-1}; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); @@ -720,7 +706,6 @@ void close_iteration_test(std::string file_ending) std::vector data{2, 4, 6, 8}; // { // we do *not* need these parentheses Series write(name, Access::CREATE, MPI_COMM_WORLD); - bool isAdios1 = write.backend() == "MPI_ADIOS1"; { Iteration it0 = write.iterations[0]; auto E_x = it0.meshes["E"]["x"]; @@ -731,14 +716,6 @@ void close_iteration_test(std::string file_ending) write.flush(); // } - if (isAdios1) - { - // run a simplified test for Adios1 since Adios1 has issues opening - // twice in the same process - REQUIRE(auxiliary::file_exists( - "../samples/close_iterations_parallel_0.bp")); - } - else { Series read(name, Access::READ_ONLY, MPI_COMM_WORLD); Iteration it0 = read.iterations[0]; @@ -764,14 +741,6 @@ void close_iteration_test(std::string file_ending) REQUIRE_THROWS(write.flush()); } - if (isAdios1) - { - // run a simplified test for Adios1 since Adios1 has issues opening - // twice in the same process - REQUIRE(auxiliary::file_exists( - "../samples/close_iterations_parallel_1.bp")); - } - else { Series read(name, Access::READ_ONLY, MPI_COMM_WORLD); Iteration it1 = read.iterations[1]; @@ -1075,11 +1044,6 @@ void adios2_streaming(bool variableBasedLayout) int rank{-1}; MPI_Comm_size(MPI_COMM_WORLD, &size); MPI_Comm_rank(MPI_COMM_WORLD, &rank); - if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1") - { - // run this test for ADIOS2 only - return; - } if (size < 2 || rank > 1) { @@ -1179,11 +1143,6 @@ TEST_CASE("adios2_streaming", "[pseudoserial][adios2]") TEST_CASE("parallel_adios2_json_config", "[parallel][adios2]") { - if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1") - { - // run this test for ADIOS2 only - return; - } int size{-1}; int rank{-1}; MPI_Comm_size(MPI_COMM_WORLD, &size); @@ -1327,11 +1286,6 @@ void adios2_ssc() int global_rank{-1}; MPI_Comm_size(MPI_COMM_WORLD, &global_size); MPI_Comm_rank(MPI_COMM_WORLD, &global_rank); - if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1") - { - // run this test for ADIOS2 only - return; - } if (global_size < 2) { @@ -1489,16 +1443,6 @@ void append_mode( { write.setIterationEncoding(IterationEncoding::variableBased); } - if (write.backend() == "MPI_ADIOS1") - { - REQUIRE_THROWS_WITH( - write.flush(), - Catch::Equals( - "Operation unsupported in ADIOS1: Appending to existing " - "file on disk (use Access::CREATE to overwrite)")); - // destructor will be noisy now - return; - } writeSomeIterations( write.writeIterations(), std::vector{3, 2}); @@ -1518,16 +1462,6 @@ void append_mode( { write.setIterationEncoding(IterationEncoding::variableBased); } - if (write.backend() == "MPI_ADIOS1") - { - REQUIRE_THROWS_WITH( - write.flush(), - Catch::Equals( - "Operation unsupported in ADIOS1: Appending to existing " - "file on disk (use Access::CREATE to overwrite)")); - // destructor will be noisy now - return; - } writeSomeIterations( write.writeIterations(), std::vector{4, 3, 10}); @@ -1540,13 +1474,6 @@ void append_mode( { write.setIterationEncoding(IterationEncoding::variableBased); } - if (write.backend() == "MPI_ADIOS1") - { - REQUIRE_THROWS_AS( - write.flush(), error::OperationUnsupportedInBackend); - // destructor will be noisy now - return; - } writeSomeIterations( write.writeIterations(), std::vector{7, 1, 11}); @@ -1681,17 +1608,6 @@ void append_mode( { write.setIterationEncoding(IterationEncoding::variableBased); } - if (write.backend() == "ADIOS1") - { - REQUIRE_THROWS_WITH( - write.flush(), - Catch::Equals( - "Operation unsupported in ADIOS1: Appending to " - "existing " - "file on disk (use Access::CREATE to overwrite)")); - // destructor will be noisy now - return; - } writeSomeIterations( write.writeIterations(), std::vector{4, 5}); @@ -1808,21 +1724,6 @@ TEST_CASE("append_mode", "[serial]") TEST_CASE("unavailable_backend", "[core][parallel]") { -#if !openPMD_HAVE_ADIOS1 - { - auto fail = []() { - Series( - "unavailable.bp", - Access::CREATE, - MPI_COMM_WORLD, - R"({"backend": "ADIOS1"})"); - }; - REQUIRE_THROWS_WITH( - fail(), - "Wrong API usage: openPMD-api built without support for backend " - "'ADIOS1'."); - } -#endif #if !openPMD_HAVE_ADIOS2 { auto fail = []() { @@ -1838,7 +1739,7 @@ TEST_CASE("unavailable_backend", "[core][parallel]") "'ADIOS2'."); } #endif -#if !openPMD_HAVE_ADIOS1 && !openPMD_HAVE_ADIOS2 +#if !openPMD_HAVE_ADIOS2 { auto fail = []() { Series("unavailable.bp", Access::CREATE, MPI_COMM_WORLD); diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index 3d48b01789..0607a19a4a 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -57,10 +57,7 @@ std::vector testedBackends() { auto variants = getVariants(); std::map extensions{ - {"json", "json"}, - {"adios1", "adios1.bp"}, - {"adios2", "bp"}, - {"hdf5", "h5"}}; + {"json", "json"}, {"adios2", "bp"}, {"hdf5", "h5"}}; std::vector res; for (auto const &pair : variants) { @@ -175,10 +172,6 @@ TEST_CASE("adios2_char_portability", "[serial][adios2]") } { - if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "ADIOS2") != "ADIOS2") - { - return; - } Series read( "../samples/adios2_char_portability.bp", Access::READ_ONLY, config); auto signedVectorAttribute = read.getAttribute("signedVector"); @@ -301,16 +294,6 @@ TEST_CASE("multi_series_test", "[serial]") auto myfileExtensions = testedFileExtensions(); - // this test demonstrates an ADIOS1 (upstream) bug, comment this section to - // trigger it - auto const rmEnd = std::remove_if( - myfileExtensions.begin(), - myfileExtensions.end(), - [](std::string const &beit) { - return beit == "bp" && determineFormat("test.bp") == Format::ADIOS1; - }); - myfileExtensions.erase(rmEnd, myfileExtensions.end()); - // have multiple serial series alive at the same time for (auto const sn : {1, 2, 3}) { @@ -642,9 +625,6 @@ void close_iteration_interleaved_test( TEST_CASE("close_iteration_interleaved_test", "[serial]") { - bool const bp_prefer_adios1 = - (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1"); - for (auto const &t : testedFileExtensions()) { close_iteration_interleaved_test(t, IterationEncoding::fileBased); @@ -653,8 +633,6 @@ TEST_CASE("close_iteration_interleaved_test", "[serial]") // run this test for ADIOS2 & JSON only if (t == "h5") continue; - if (t == "bp" && bp_prefer_adios1) - continue; close_iteration_interleaved_test(t, IterationEncoding::variableBased); } } @@ -4400,8 +4378,6 @@ TEST_CASE("adios2_engines_and_file_endings") std::string const &requiredEngine, std::string const &filesystemExt, std::string const &jsonCfg = "{}") mutable { - // Env. var. OPENPMD_BP_BACKEND does not matter for this test as - // we always override it in the JSON config auto basename = "../samples/file_endings/groupbased" + std::to_string(filenameCounter++); auto name = basename + ext; @@ -4480,10 +4456,7 @@ TEST_CASE("adios2_engines_and_file_endings") { Series write(name, Access::CREATE, jsonCfg); } - bool isThisADIOS1 = - auxiliary::getEnvString("OPENPMD_BP_BACKEND", "") == "ADIOS1" && - ext == ".bp"; - if (directory && !isThisADIOS1) + if (directory) { REQUIRE(auxiliary::directory_exists(filesystemname)); } @@ -4495,10 +4468,8 @@ TEST_CASE("adios2_engines_and_file_endings") Series read( name, Access::READ_ONLY, - isThisADIOS1 - ? "backend = \"adios1\"" - : "backend = \"adios2\"\nadios2.engine.type = \"" + - requiredEngine + "\""); + "backend = \"adios2\"\nadios2.engine.type = \"" + + requiredEngine + "\""); } }; @@ -4630,10 +4601,7 @@ TEST_CASE("adios2_engines_and_file_endings") Series write(name, Access::CREATE, jsonCfg); write.writeIterations()[0]; } - bool isThisADIOS1 = - auxiliary::getEnvString("OPENPMD_BP_BACKEND", "") == "ADIOS1" && - ext == ".bp"; - if (directory && !isThisADIOS1) + if (directory) { REQUIRE(auxiliary::directory_exists(filesystemname)); } @@ -4655,10 +4623,8 @@ TEST_CASE("adios2_engines_and_file_endings") Series read( name, Access::READ_ONLY, - isThisADIOS1 - ? "backend = \"adios1\"" - : "backend = \"adios2\"\nadios2.engine.type = \"" + - requiredEngine + "\""); + "backend = \"adios2\"\nadios2.engine.type = \"" + + requiredEngine + "\""); } }; @@ -4695,11 +4661,6 @@ TEST_CASE("adios2_engines_and_file_endings") TEST_CASE("serial_adios2_backend_config", "[serial][adios2]") { - if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1") - { - // run this test for ADIOS2 only - return; - } std::string writeConfigBP3 = R"END( unused = "global parameter" diff --git a/test/python/unittest/API/APITest.py b/test/python/unittest/API/APITest.py index db1948ef3a..0aa71cf7a8 100644 --- a/test/python/unittest/API/APITest.py +++ b/test/python/unittest/API/APITest.py @@ -220,17 +220,12 @@ def attributeRoundTrip(self, file_ending): np.array([4.5, 6.7], dtype=np.double)) series.set_attribute("nparr_longdouble", np.array([8.9, 7.6], dtype=np.longdouble)) - # note: looks like ADIOS 1.13.1 cannot write arrays of complex - # as attributes (writes 1st value for single and crashes - # in write for complex double) - # https://github.com/ornladios/ADIOS/issues/212 - if series.backend != "ADIOS1": - series.set_attribute("nparr_csingle", - np.array([1.2 - 0.3j, 2.3 + 4.2j], - dtype=np.complex64)) - series.set_attribute("nparr_cdouble", - np.array([4.5 + 1.1j, 6.7 - 2.2j], - dtype=np.complex128)) + series.set_attribute("nparr_csingle", + np.array([1.2 - 0.3j, 2.3 + 4.2j], + dtype=np.complex64)) + series.set_attribute("nparr_cdouble", + np.array([4.5 + 1.1j, 6.7 - 2.2j], + dtype=np.complex128)) if file_ending not in ["bp", "bp4", "bp5"]: series.set_attribute("nparr_clongdouble", np.array([8.9 + 7.8j, 7.6 + 9.2j], @@ -335,15 +330,13 @@ def attributeRoundTrip(self, file_ending): series.get_attribute("nparr_double"), [4.5, 6.7]) np.testing.assert_almost_equal( series.get_attribute("nparr_longdouble"), [8.9, 7.6]) - # see https://github.com/ornladios/ADIOS/issues/212 - if series.backend != "ADIOS1": - np.testing.assert_almost_equal( - series.get_attribute("nparr_csingle"), - np.array([1.2 - 0.3j, 2.3 + 4.2j], - dtype=np.complex64)) - np.testing.assert_almost_equal( - series.get_attribute("nparr_cdouble"), - [4.5 + 1.1j, 6.7 - 2.2j]) + np.testing.assert_almost_equal( + series.get_attribute("nparr_csingle"), + np.array([1.2 - 0.3j, 2.3 + 4.2j], + dtype=np.complex64)) + np.testing.assert_almost_equal( + series.get_attribute("nparr_cdouble"), + [4.5 + 1.1j, 6.7 - 2.2j]) # not in ADIOS 1.13.1 nor ADIOS 2.7.0 if file_ending not in ["bp", "bp4", "bp5"]: np.testing.assert_almost_equal( @@ -811,7 +804,6 @@ def testEmptyRecords(self): backend_filesupport = { 'json': 'json', 'hdf5': 'h5', - 'adios1': 'bp', 'adios2': 'bp' } for b in io.variants: @@ -1709,25 +1701,22 @@ def makeCloseIterationRoundTrip(self, file_ending): E_x = it0.meshes["E"]["x"] E_x.reset_dataset(DS(np.dtype("int"), extent)) E_x.store_chunk(data, [0], extent) - is_adios1 = series.backend == 'ADIOS1' it0.close(flush=True) - # not supported in ADIOS1: can only open one ADIOS1 series at a time - if not is_adios1: - read = io.Series( - "../samples/unittest_closeIteration_%T." + file_ending, - io.Access_Type.read_only - ) - it0 = read.iterations[0] - E_x = it0.meshes["E"]["x"] - chunk = E_x.load_chunk([0], extent) - it0.close() # flush = True <- default argument + read = io.Series( + "../samples/unittest_closeIteration_%T." + file_ending, + io.Access_Type.read_only + ) + it0 = read.iterations[0] + E_x = it0.meshes["E"]["x"] + chunk = E_x.load_chunk([0], extent) + it0.close() # flush = True <- default argument - for i in range(len(data)): - self.assertEqual(data[i], chunk[i]) - self.assertTrue(read) - read.close() - self.assertFalse(read) + for i in range(len(data)): + self.assertEqual(data[i], chunk[i]) + self.assertTrue(read) + read.close() + self.assertFalse(read) it1 = series.iterations[1] E_x = it1.meshes["E"]["x"] @@ -1736,22 +1725,21 @@ def makeCloseIterationRoundTrip(self, file_ending): it1.close(flush=False) series.flush() - if not is_adios1: - read = io.Series( - "../samples/unittest_closeIteration_%T." + file_ending, - io.Access_Type.read_only - ) - it1 = read.iterations[1] - E_x = it1.meshes["E"]["x"] - chunk = E_x.load_chunk([0], extent) - it1.close(flush=False) - read.flush() + read = io.Series( + "../samples/unittest_closeIteration_%T." + file_ending, + io.Access_Type.read_only + ) + it1 = read.iterations[1] + E_x = it1.meshes["E"]["x"] + chunk = E_x.load_chunk([0], extent) + it1.close(flush=False) + read.flush() - for i in range(len(data)): - self.assertEqual(data[i], chunk[i]) - self.assertTrue(read) - read.close() - self.assertFalse(read) + for i in range(len(data)): + self.assertEqual(data[i], chunk[i]) + self.assertTrue(read) + read.close() + self.assertFalse(read) def testCloseIteration(self): for ext in tested_file_extensions: @@ -1829,7 +1817,6 @@ def testIterator(self): backend_filesupport = { 'json': 'json', 'hdf5': 'h5', - 'adios1': 'bp', 'adios2': 'bp' } for b in io.variants: @@ -2003,9 +1990,6 @@ def testJsonConfigADIOS2(self): "../samples/unittest_jsonConfiguredBP3.bp", io.Access_Type.create, global_config) - if series.backend != 'ADIOS2': - # might happen, if env. var. OPENPMD_BP_BACKEND is used - return DS = io.Dataset data = np.array(range(1000), dtype=np.dtype("double")) From 413b1c54004bb6c653e787e9954b4a4c0c4e6272 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Thu, 13 Apr 2023 17:59:41 +0200 Subject: [PATCH 72/82] RecordComponent: Properly handle uninitialized datasets (#1316) * Use a std::optional for BaseRecordComponentData::m_dataset * Fix the bugs found by this check Fix the examples, the unfinished_iteratoin_test and the Coretests * Don't flush in JSONIOHandlerImpl destructor It's not needed, and it avoids weird situations while recovering from an error. * Accept undefined dataset as long as no chunks are written yet Just ignore and skip the component in that case --- examples/7_extended_write_serial.cpp | 5 +- examples/7_extended_write_serial.py | 4 +- examples/9_particle_write_serial.py | 4 +- include/openPMD/RecordComponent.tpp | 8 +- .../openPMD/backend/BaseRecordComponent.hpp | 4 +- src/IO/JSON/JSONIOHandlerImpl.cpp | 18 +---- src/RecordComponent.cpp | 73 ++++++++++++++----- src/backend/BaseRecordComponent.cpp | 28 ++++++- src/backend/PatchRecordComponent.cpp | 31 +++++++- test/CoreTest.cpp | 28 ++++--- test/SerialIOTest.cpp | 8 ++ 11 files changed, 153 insertions(+), 58 deletions(-) diff --git a/examples/7_extended_write_serial.cpp b/examples/7_extended_write_serial.cpp index bfb64e1fff..580894a34f 100644 --- a/examples/7_extended_write_serial.cpp +++ b/examples/7_extended_write_serial.cpp @@ -83,8 +83,9 @@ int main() {{io::UnitDimension::M, 1}}); electrons["displacement"]["x"].setUnitSI(1e-6); electrons.erase("displacement"); - electrons["weighting"][io::RecordComponent::SCALAR].makeConstant( - 1.e-5); + electrons["weighting"][io::RecordComponent::SCALAR] + .resetDataset({io::Datatype::FLOAT, {1}}) + .makeConstant(1.e-5); } io::Mesh mesh = cur_it.meshes["lowRez_2D_field"]; diff --git a/examples/7_extended_write_serial.py b/examples/7_extended_write_serial.py index 84ca5002db..e16b4b993a 100755 --- a/examples/7_extended_write_serial.py +++ b/examples/7_extended_write_serial.py @@ -90,7 +90,9 @@ electrons["displacement"].unit_dimension = {Unit_Dimension.M: 1} electrons["displacement"]["x"].unit_SI = 1.e-6 del electrons["displacement"] - electrons["weighting"][SCALAR].make_constant(1.e-5) + electrons["weighting"][SCALAR] \ + .reset_dataset(Dataset(np.dtype("float32"), extent=[1])) \ + .make_constant(1.e-5) mesh = cur_it.meshes["lowRez_2D_field"] mesh.axis_labels = ["x", "y"] diff --git a/examples/9_particle_write_serial.py b/examples/9_particle_write_serial.py index aebd266528..0109a48a57 100644 --- a/examples/9_particle_write_serial.py +++ b/examples/9_particle_write_serial.py @@ -44,7 +44,9 @@ # don't like it anymore? remove it with: # del electrons["displacement"] - electrons["weighting"][SCALAR].make_constant(1.e-5) + electrons["weighting"][SCALAR] \ + .reset_dataset(Dataset(np.dtype("float32"), extent=[1])) \ + .make_constant(1.e-5) particlePos_x = np.random.rand(234).astype(np.float32) particlePos_y = np.random.rand(234).astype(np.float32) diff --git a/include/openPMD/RecordComponent.tpp b/include/openPMD/RecordComponent.tpp index 4256fc6fa8..00c67dcf2a 100644 --- a/include/openPMD/RecordComponent.tpp +++ b/include/openPMD/RecordComponent.tpp @@ -332,7 +332,13 @@ RecordComponent::storeChunk(Offset o, Extent e, F &&createBuffer) dCreate.name = rc.m_name; dCreate.extent = getExtent(); dCreate.dtype = getDatatype(); - dCreate.options = rc.m_dataset.options; + if (!rc.m_dataset.has_value()) + { + throw error::WrongAPIUsage( + "[RecordComponent] Must specify dataset type and extent before " + "using storeChunk() (see RecordComponent::resetDataset())."); + } + dCreate.options = rc.m_dataset.value().options; IOHandler()->enqueue(IOTask(this, dCreate)); } Parameter getBufferView; diff --git a/include/openPMD/backend/BaseRecordComponent.hpp b/include/openPMD/backend/BaseRecordComponent.hpp index a2ea09e23e..fd8279eb05 100644 --- a/include/openPMD/backend/BaseRecordComponent.hpp +++ b/include/openPMD/backend/BaseRecordComponent.hpp @@ -24,6 +24,8 @@ #include "openPMD/Error.hpp" #include "openPMD/backend/Attributable.hpp" +#include + // expose private and protected members for invasive testing #ifndef OPENPMD_protected #define OPENPMD_protected protected: @@ -39,7 +41,7 @@ namespace internal /** * The type and extent of the dataset defined by this component. */ - Dataset m_dataset{Datatype::UNDEFINED, {}}; + std::optional m_dataset; /** * True if this is defined as a constant record component as specified * in the openPMD standard. diff --git a/src/IO/JSON/JSONIOHandlerImpl.cpp b/src/IO/JSON/JSONIOHandlerImpl.cpp index a9e86f0cde..e17e4929eb 100644 --- a/src/IO/JSON/JSONIOHandlerImpl.cpp +++ b/src/IO/JSON/JSONIOHandlerImpl.cpp @@ -58,23 +58,7 @@ JSONIOHandlerImpl::JSONIOHandlerImpl(AbstractIOHandler *handler) : AbstractIOHandlerImpl(handler) {} -JSONIOHandlerImpl::~JSONIOHandlerImpl() -{ - // we must not throw in a destructor - try - { - flush(); - } - catch (std::exception const &ex) - { - std::cerr << "[~JSONIOHandlerImpl] An error occurred: " << ex.what() - << std::endl; - } - catch (...) - { - std::cerr << "[~JSONIOHandlerImpl] An error occurred." << std::endl; - } -} +JSONIOHandlerImpl::~JSONIOHandlerImpl() = default; std::future JSONIOHandlerImpl::flush() { diff --git a/src/RecordComponent.cpp b/src/RecordComponent.cpp index 164b38d127..a54373be59 100644 --- a/src/RecordComponent.cpp +++ b/src/RecordComponent.cpp @@ -42,7 +42,6 @@ namespace internal RecordComponent impl{ std::shared_ptr{this, [](auto const *) {}}}; impl.setUnitSI(1); - impl.resetDataset(Dataset(Datatype::CHAR, {1})); } } // namespace internal @@ -71,11 +70,17 @@ RecordComponent &RecordComponent::resetDataset(Dataset d) auto &rc = get(); if (written()) { + if (!rc.m_dataset.has_value()) + { + throw error::Internal( + "Internal control flow error: Written record component must " + "have defined datatype and extent."); + } if (d.dtype == Datatype::UNDEFINED) { - d.dtype = rc.m_dataset.dtype; + d.dtype = rc.m_dataset.value().dtype; } - else if (d.dtype != rc.m_dataset.dtype) + else if (d.dtype != rc.m_dataset.value().dtype) { throw std::runtime_error( "Cannot change the datatype of a dataset."); @@ -99,7 +104,7 @@ RecordComponent &RecordComponent::resetDataset(Dataset d) rc.m_isEmpty = false; if (written()) { - rc.m_dataset.extend(std::move(d.extent)); + rc.m_dataset.value().extend(std::move(d.extent)); } else { @@ -112,12 +117,28 @@ RecordComponent &RecordComponent::resetDataset(Dataset d) uint8_t RecordComponent::getDimensionality() const { - return get().m_dataset.rank; + auto &rc = get(); + if (rc.m_dataset.has_value()) + { + return rc.m_dataset.value().rank; + } + else + { + return 1; + } } Extent RecordComponent::getExtent() const { - return get().m_dataset.extent; + auto &rc = get(); + if (rc.m_dataset.has_value()) + { + return rc.m_dataset.value().extent; + } + else + { + return {1}; + } } namespace detail @@ -149,6 +170,12 @@ RecordComponent &RecordComponent::makeEmpty(Dataset d) auto &rc = get(); if (written()) { + if (!rc.m_dataset.has_value()) + { + throw error::Internal( + "Internal control flow error: Written record component must " + "have defined datatype and extent."); + } if (!constant()) { throw std::runtime_error( @@ -158,14 +185,14 @@ RecordComponent &RecordComponent::makeEmpty(Dataset d) } if (d.dtype == Datatype::UNDEFINED) { - d.dtype = rc.m_dataset.dtype; + d.dtype = rc.m_dataset.value().dtype; } - else if (d.dtype != rc.m_dataset.dtype) + else if (d.dtype != rc.m_dataset.value().dtype) { throw std::runtime_error( "Cannot change the datatype of a dataset."); } - rc.m_dataset.extend(std::move(d.extent)); + rc.m_dataset.value().extend(std::move(d.extent)); rc.m_hasBeenExtended = true; } else @@ -173,7 +200,7 @@ RecordComponent &RecordComponent::makeEmpty(Dataset d) rc.m_dataset = std::move(d); } - if (rc.m_dataset.extent.size() == 0) + if (rc.m_dataset.value().extent.size() == 0) throw std::runtime_error("Dataset extent must be at least 1D."); rc.m_isEmpty = true; @@ -181,7 +208,7 @@ RecordComponent &RecordComponent::makeEmpty(Dataset d) if (!written()) { switchType >( - rc.m_dataset.dtype, *this); + rc.m_dataset.value().dtype, *this); } return *this; } @@ -213,11 +240,23 @@ void RecordComponent::flush( /* * This catches when a user forgets to use resetDataset. */ - if (rc.m_dataset.dtype == Datatype::UNDEFINED) + if (!rc.m_dataset.has_value()) { - throw error::WrongAPIUsage( - "[RecordComponent] Must set specific datatype (Use " - "resetDataset call)."); + // The check for !written() is technically not needed, just + // defensive programming against internal bugs that go on us. + if (!written() && rc.m_chunks.empty()) + { + // No data written yet, just accessed the object so far without + // doing anything + // Just do nothing and skip this record component. + return; + } + else + { + throw error::WrongAPIUsage( + "[RecordComponent] Must specify dataset type and extent " + "before flushing (see RecordComponent::resetDataset())."); + } } if (!written()) { @@ -243,7 +282,7 @@ void RecordComponent::flush( dCreate.name = name; dCreate.extent = getExtent(); dCreate.dtype = getDatatype(); - dCreate.options = rc.m_dataset.options; + dCreate.options = rc.m_dataset.value().options; IOHandler()->enqueue(IOTask(this, dCreate)); } } @@ -262,7 +301,7 @@ void RecordComponent::flush( else { Parameter pExtend; - pExtend.extent = rc.m_dataset.extent; + pExtend.extent = rc.m_dataset.value().extent; IOHandler()->enqueue(IOTask(this, std::move(pExtend))); rc.m_hasBeenExtended = false; } diff --git a/src/backend/BaseRecordComponent.cpp b/src/backend/BaseRecordComponent.cpp index 4460b7ede0..79890b95ab 100644 --- a/src/backend/BaseRecordComponent.cpp +++ b/src/backend/BaseRecordComponent.cpp @@ -35,13 +35,29 @@ BaseRecordComponent &BaseRecordComponent::resetDatatype(Datatype d) "A Records Datatype can not (yet) be changed after it has been " "written."); - get().m_dataset.dtype = d; + auto &rc = get(); + if (rc.m_dataset.has_value()) + { + rc.m_dataset.value().dtype = d; + } + else + { + rc.m_dataset = Dataset{d, {1}}; + } return *this; } Datatype BaseRecordComponent::getDatatype() const { - return get().m_dataset.dtype; + auto &rc = get(); + if (rc.m_dataset.has_value()) + { + return rc.m_dataset.value().dtype; + } + else + { + return Datatype::UNDEFINED; + } } bool BaseRecordComponent::constant() const @@ -54,8 +70,12 @@ ChunkTable BaseRecordComponent::availableChunks() auto &rc = get(); if (rc.m_isConstant) { - Offset offset(rc.m_dataset.extent.size(), 0); - return ChunkTable{{std::move(offset), rc.m_dataset.extent}}; + if (!rc.m_dataset.has_value()) + { + return ChunkTable{}; + } + Offset offset(rc.m_dataset.value().extent.size(), 0); + return ChunkTable{{std::move(offset), rc.m_dataset.value().extent}}; } containingIteration().open(); Parameter param; diff --git a/src/backend/PatchRecordComponent.cpp b/src/backend/PatchRecordComponent.cpp index e1477ef7bd..c418eb5fed 100644 --- a/src/backend/PatchRecordComponent.cpp +++ b/src/backend/PatchRecordComponent.cpp @@ -67,7 +67,15 @@ uint8_t PatchRecordComponent::getDimensionality() const Extent PatchRecordComponent::getExtent() const { - return get().m_dataset.extent; + auto &rc = get(); + if (rc.m_dataset.has_value()) + { + return rc.m_dataset.value().extent; + } + else + { + return {1}; + } } PatchRecordComponent::PatchRecordComponent() : BaseRecordComponent{nullptr} @@ -94,13 +102,32 @@ void PatchRecordComponent::flush( } else { + if (!rc.m_dataset.has_value()) + { + // The check for !written() is technically not needed, just + // defensive programming against internal bugs that go on us. + if (!written() && rc.m_chunks.empty()) + { + // No data written yet, just accessed the object so far without + // doing anything + // Just do nothing and skip this record component. + return; + } + else + { + throw error::WrongAPIUsage( + "[PatchRecordComponent] Must specify dataset type and " + "extent before flushing (see " + "RecordComponent::resetDataset())."); + } + } if (!written()) { Parameter dCreate; dCreate.name = name; dCreate.extent = getExtent(); dCreate.dtype = getDatatype(); - dCreate.options = rc.m_dataset.options; + dCreate.options = rc.m_dataset.value().options; IOHandler()->enqueue(IOTask(this, dCreate)); } diff --git a/test/CoreTest.cpp b/test/CoreTest.cpp index f4ad2ce110..8c34faf29c 100644 --- a/test/CoreTest.cpp +++ b/test/CoreTest.cpp @@ -25,6 +25,8 @@ using namespace openPMD; +Dataset globalDataset(Datatype::CHAR, {1}); + TEST_CASE("versions_test", "[core]") { auto const apiVersion = getVersion(); @@ -439,11 +441,11 @@ TEST_CASE("record_constructor_test", "[core]") ps["position"][RecordComponent::SCALAR].resetDataset(dset); ps["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); - REQUIRE(r["x"].unitSI() == 1); + REQUIRE(r["x"].resetDataset(dset).unitSI() == 1); REQUIRE(r["x"].numAttributes() == 1); /* unitSI */ - REQUIRE(r["y"].unitSI() == 1); + REQUIRE(r["y"].resetDataset(dset).unitSI() == 1); REQUIRE(r["y"].numAttributes() == 1); /* unitSI */ - REQUIRE(r["z"].unitSI() == 1); + REQUIRE(r["z"].resetDataset(dset).unitSI() == 1); REQUIRE(r["z"].numAttributes() == 1); /* unitSI */ std::array zeros{{0., 0., 0., 0., 0., 0., 0.}}; REQUIRE(r.unitDimension() == zeros); @@ -488,13 +490,15 @@ TEST_CASE("recordComponent_modification_test", "[core]") r["x"].setUnitSI(2.55999e-7); r["y"].setUnitSI(4.42999e-8); - REQUIRE(r["x"].unitSI() == static_cast(2.55999e-7)); + REQUIRE( + r["x"].resetDataset(dset).unitSI() == static_cast(2.55999e-7)); REQUIRE(r["x"].numAttributes() == 1); /* unitSI */ - REQUIRE(r["y"].unitSI() == static_cast(4.42999e-8)); + REQUIRE( + r["y"].resetDataset(dset).unitSI() == static_cast(4.42999e-8)); REQUIRE(r["y"].numAttributes() == 1); /* unitSI */ r["z"].setUnitSI(1); - REQUIRE(r["z"].unitSI() == static_cast(1)); + REQUIRE(r["z"].resetDataset(dset).unitSI() == static_cast(1)); REQUIRE(r["z"].numAttributes() == 1); /* unitSI */ } @@ -505,13 +509,13 @@ TEST_CASE("mesh_constructor_test", "[core]") Mesh &m = o.iterations[42].meshes["E"]; std::vector pos{0}; - REQUIRE(m["x"].unitSI() == 1); + REQUIRE(m["x"].resetDataset(globalDataset).unitSI() == 1); REQUIRE(m["x"].numAttributes() == 2); /* unitSI, position */ REQUIRE(m["x"].position() == pos); - REQUIRE(m["y"].unitSI() == 1); + REQUIRE(m["y"].resetDataset(globalDataset).unitSI() == 1); REQUIRE(m["y"].numAttributes() == 2); /* unitSI, position */ REQUIRE(m["y"].position() == pos); - REQUIRE(m["z"].unitSI() == 1); + REQUIRE(m["z"].resetDataset(globalDataset).unitSI() == 1); REQUIRE(m["z"].numAttributes() == 2); /* unitSI, position */ REQUIRE(m["z"].position() == pos); REQUIRE(m.geometry() == Mesh::Geometry::cartesian); @@ -534,9 +538,9 @@ TEST_CASE("mesh_modification_test", "[core]") Series o = Series("./MyOutput_%T.json", Access::CREATE); Mesh &m = o.iterations[42].meshes["E"]; - m["x"]; - m["y"]; - m["z"]; + m["x"].resetDataset(globalDataset); + m["y"].resetDataset(globalDataset); + m["z"].resetDataset(globalDataset); m.setGeometry(Mesh::Geometry::spherical); REQUIRE(m.geometry() == Mesh::Geometry::spherical); diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index 0607a19a4a..ce37e4f352 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -6430,11 +6430,19 @@ void unfinished_iteration_test( */ it5.setAttribute("__openPMD_internal_fail", "asking for trouble"); auto it10 = write.writeIterations()[10]; + Dataset ds(Datatype::INT, {10}); auto E_x = it10.meshes["E"]["x"]; auto e_density = it10.meshes["e_density"][RecordComponent::SCALAR]; auto electron_x = it10.particles["e"]["position"]["x"]; auto electron_mass = it10.particles["e"]["mass"][RecordComponent::SCALAR]; + + RecordComponent *resetThese[] = { + &E_x, &e_density, &electron_x, &electron_mass}; + for (RecordComponent *rc : resetThese) + { + rc->resetDataset(ds); + } } auto tryReading = [&config, file, encoding]( Access access, From 3acf5659f16e9f66c0f6b917253c6e4382c8245f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Thu, 13 Apr 2023 18:00:47 +0200 Subject: [PATCH 73/82] Python bindings: Release GIL during IO wait operations (#1381) * Release GIL: Easy cases * ReadIterations iterator * Iteration::open, Iteration::close * Some fixes * Ensure that first iteration is seen in Python API --- include/openPMD/Iteration.hpp | 20 +++++++ include/openPMD/ReadIterations.hpp | 20 ------- include/openPMD/WriteIterations.hpp | 5 ++ src/WriteIterations.cpp | 34 +++++++++-- src/binding/python/Iteration.cpp | 16 +++++- src/binding/python/Series.cpp | 88 +++++++++++++++++++++++++++-- 6 files changed, 151 insertions(+), 32 deletions(-) diff --git a/include/openPMD/Iteration.hpp b/include/openPMD/Iteration.hpp index 3c7fffc545..fd1f14cd00 100644 --- a/include/openPMD/Iteration.hpp +++ b/include/openPMD/Iteration.hpp @@ -420,4 +420,24 @@ inline T Iteration::dt() const { return this->readFloatingpoint("dt"); } + +/** + * @brief Subclass of Iteration that knows its own index withing the containing + * Series. + */ +class IndexedIteration : public Iteration +{ + friend class SeriesIterator; + friend class WriteIterations; + +public: + using index_t = Iteration::IterationIndex_t; + index_t const iterationIndex; + +private: + template + IndexedIteration(Iteration_t &&it, index_t index) + : Iteration(std::forward(it)), iterationIndex(index) + {} +}; } // namespace openPMD diff --git a/include/openPMD/ReadIterations.hpp b/include/openPMD/ReadIterations.hpp index c6a1e4fc36..c381cdd62b 100644 --- a/include/openPMD/ReadIterations.hpp +++ b/include/openPMD/ReadIterations.hpp @@ -31,26 +31,6 @@ namespace openPMD { -/** - * @brief Subclass of Iteration that knows its own index withing the containing - * Series. - */ -class IndexedIteration : public Iteration -{ - friend class SeriesIterator; - -public: - using iterations_t = decltype(internal::SeriesData::iterations); - using index_t = iterations_t::key_type; - index_t const iterationIndex; - -private: - template - IndexedIteration(Iteration_t &&it, index_t index) - : Iteration(std::forward(it)), iterationIndex(index) - {} -}; - class SeriesIterator { using iteration_index_t = IndexedIteration::index_t; diff --git a/include/openPMD/WriteIterations.hpp b/include/openPMD/WriteIterations.hpp index 3099af7025..7c457e7cfe 100644 --- a/include/openPMD/WriteIterations.hpp +++ b/include/openPMD/WriteIterations.hpp @@ -87,5 +87,10 @@ class WriteIterations public: mapped_type &operator[](key_type const &key); mapped_type &operator[](key_type &&key); + + /** + * Return the iteration that is currently being written to, if it exists. + */ + std::optional currentIteration(); }; } // namespace openPMD diff --git a/src/WriteIterations.cpp b/src/WriteIterations.cpp index 2bc34f0416..f5e976a6f4 100644 --- a/src/WriteIterations.cpp +++ b/src/WriteIterations.cpp @@ -69,13 +69,17 @@ WriteIterations::mapped_type &WriteIterations::operator[](key_type &&key) "[WriteIterations] Trying to access after closing Series."); } auto &s = shared->value(); - if (s.currentlyOpen.has_value()) + auto lastIteration = currentIteration(); + if (lastIteration.has_value()) { - auto lastIterationIndex = s.currentlyOpen.value(); - auto &lastIteration = s.iterations.at(lastIterationIndex); - if (lastIterationIndex != key && !lastIteration.closed()) + auto lastIteration_v = lastIteration.value(); + if (lastIteration_v.iterationIndex == key) { - lastIteration.close(); + return s.iterations.at(std::move(key)); + } + else + { + lastIteration_v.close(); // continue below } } s.currentlyOpen = key; @@ -87,4 +91,24 @@ WriteIterations::mapped_type &WriteIterations::operator[](key_type &&key) } return res; } + +std::optional WriteIterations::currentIteration() +{ + if (!shared || !shared->has_value()) + { + return std::nullopt; + } + auto &s = shared->value(); + if (!s.currentlyOpen.has_value()) + { + return std::nullopt; + } + Iteration ¤tIteration = s.iterations.at(s.currentlyOpen.value()); + if (currentIteration.closed()) + { + return std::nullopt; + } + return std::make_optional( + IndexedIteration(currentIteration, s.currentlyOpen.value())); +} } // namespace openPMD diff --git a/src/binding/python/Iteration.cpp b/src/binding/python/Iteration.cpp index 0ac290f7ff..59a9322039 100644 --- a/src/binding/python/Iteration.cpp +++ b/src/binding/python/Iteration.cpp @@ -63,8 +63,20 @@ void init_Iteration(py::module &m) "dt", &Iteration::dt, &Iteration::setDt) .def_property( "time_unit_SI", &Iteration::timeUnitSI, &Iteration::setTimeUnitSI) - .def("open", &Iteration::open) - .def("close", &Iteration::close, py::arg("flush") = true) + .def( + "open", + [](Iteration &it) { + py::gil_scoped_release release; + return it.open(); + }) + .def( + "close", + /* + * Cannot release the GIL here since Python buffers might be + * accessed in deferred tasks + */ + &Iteration::close, + py::arg("flush") = true) // TODO remove in future versions (deprecated) .def("set_time", &Iteration::setTime) diff --git a/src/binding/python/Series.cpp b/src/binding/python/Series.cpp index cdff83fd43..8874c21e43 100644 --- a/src/binding/python/Series.cpp +++ b/src/binding/python/Series.cpp @@ -53,24 +53,90 @@ struct openPMD_PyMPICommObject using openPMD_PyMPIIntracommObject = openPMD_PyMPICommObject; #endif +struct SeriesIteratorPythonAdaptor : SeriesIterator +{ + SeriesIteratorPythonAdaptor(SeriesIterator it) + : SeriesIterator(std::move(it)) + {} + + /* + * Python iterators are weird and call `__next__()` already for getting the + * first element. + * In that case, no `operator++()` must be called... + */ + bool first_iteration = true; +}; + void init_Series(py::module &m) { py::class_(m, "WriteIterations") .def( "__getitem__", [](WriteIterations writeIterations, Series::IterationIndex_t key) { + auto lastIteration = writeIterations.currentIteration(); + if (lastIteration.has_value() && + lastIteration.value().iterationIndex != key) + { + // this must happen under the GIL + lastIteration.value().close(); + } + py::gil_scoped_release release; return writeIterations[key]; }, // copy + keepalive - py::return_value_policy::copy); + py::return_value_policy::copy) + .def( + "current_iteration", + &WriteIterations::currentIteration, + "Return the iteration that is currently being written to, if it " + "exists."); py::class_(m, "IndexedIteration") .def_readonly("iteration_index", &IndexedIteration::iterationIndex); + + py::class_(m, "SeriesIterator") + .def( + "__next__", + [](SeriesIteratorPythonAdaptor &iterator) { + if (iterator == SeriesIterator::end()) + { + throw py::stop_iteration(); + } + /* + * Closing the iteration must happen under the GIL lock since + * Python buffers might be accessed + */ + if (!iterator.first_iteration) + { + if (!(*iterator).closed()) + { + (*iterator).close(); + } + py::gil_scoped_release release; + ++iterator; + } + iterator.first_iteration = false; + if (iterator == SeriesIterator::end()) + { + throw py::stop_iteration(); + } + else + { + return *iterator; + } + } + + ); + py::class_(m, "ReadIterations") .def( "__iter__", [](ReadIterations &readIterations) { - return py::make_iterator( - readIterations.begin(), readIterations.end()); + // Simple iterator implementation: + // But we need to release the GIL inside + // SeriesIterator::operator++, so manually it is + // return py::make_iterator( + // readIterations.begin(), readIterations.end()); + return SeriesIteratorPythonAdaptor(readIterations.begin()); }, // keep handle alive while iterator exists py::keep_alive<0, 1>()); @@ -78,7 +144,12 @@ void init_Series(py::module &m) py::class_(m, "Series") .def( - py::init(), + py::init([](std::string const &filepath, + Access at, + std::string const &options) { + py::gil_scoped_release release; + return new Series(filepath, at, options); + }), py::arg("filepath"), py::arg("access"), py::arg("options") = "{}") @@ -145,6 +216,7 @@ void init_Series(py::module &m) "(Mismatched MPI at compile vs. runtime?)"); } + py::gil_scoped_release release; return new Series(filepath, at, *mpiCommPtr, options); }), py::arg("filepath"), @@ -232,7 +304,13 @@ this method. py::return_value_policy::reference, // garbage collection: return value must be freed before Series py::keep_alive<1, 0>()) - .def("read_iterations", &Series::readIterations, py::keep_alive<0, 1>()) + .def( + "read_iterations", + [](Series &s) { + py::gil_scoped_release release; + return s.readIterations(); + }, + py::keep_alive<0, 1>()) .def( "write_iterations", &Series::writeIterations, From e2a79ae6c24432b09a75752b0cc2b674f7c1eca7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Tue, 25 Apr 2023 19:32:26 +0200 Subject: [PATCH 74/82] Fix gcc9 warning (#1429) the implicitly-defined constructor does not initialize 'openPMD::Datatype openPMD::detail::BufferedUniquePtrPut::dtype' --- include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp index 6804d60ed7..53a46282a3 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp @@ -920,7 +920,7 @@ namespace detail Offset offset; Extent extent; UniquePtrWithLambda data; - Datatype dtype; + Datatype dtype = Datatype::UNDEFINED; void run(BufferedActions &); }; From 335071a386473a642a1b608fc86797f1e9caaf8f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Tue, 25 Apr 2023 19:38:31 +0200 Subject: [PATCH 75/82] More careful documentation of streaming API (#1430) --- examples/10_streaming_read.cpp | 5 ++ examples/10_streaming_read.py | 13 +++-- examples/10_streaming_write.cpp | 5 ++ examples/10_streaming_write.py | 14 ++--- examples/12_span_write.cpp | 5 ++ examples/12_span_write.py | 5 ++ examples/13_write_dynamic_configuration.cpp | 5 ++ examples/13_write_dynamic_configuration.py | 14 ++--- examples/1_structure.cpp | 8 ++- examples/3_write_serial.cpp | 5 ++ examples/3_write_serial.py | 5 ++ examples/3a_write_thetaMode_serial.cpp | 5 ++ examples/3a_write_thetaMode_serial.py | 5 ++ examples/3b_write_resizable_particles.cpp | 5 ++ examples/3b_write_resizable_particles.py | 2 +- examples/5_write_parallel.cpp | 10 +++- examples/5_write_parallel.py | 6 ++ examples/8a_benchmark_write_parallel.cpp | 10 ++++ examples/8b_benchmark_read_parallel.cpp | 5 ++ include/openPMD/Series.hpp | 16 +++++- include/openPMD/WriteIterations.hpp | 13 +++++ src/binding/python/Series.cpp | 61 +++++++++++++++++++-- 22 files changed, 191 insertions(+), 31 deletions(-) diff --git a/examples/10_streaming_read.cpp b/examples/10_streaming_read.cpp index eae79dd28a..2128297934 100644 --- a/examples/10_streaming_read.cpp +++ b/examples/10_streaming_read.cpp @@ -21,6 +21,11 @@ int main() Series series = Series("electrons.sst", Access::READ_LINEAR); + // `Series::writeIterations()` and `Series::readIterations()` are + // intentionally restricted APIs that ensure a workflow which also works + // in streaming setups, e.g. an iteration cannot be opened again once + // it has been closed. + // `Series::iterations` can be directly accessed in random-access workflows. for (IndexedIteration iteration : series.readIterations()) { std::cout << "Current iteration: " << iteration.iterationIndex diff --git a/examples/10_streaming_read.py b/examples/10_streaming_read.py index 5d0f688b94..4946c36e04 100755 --- a/examples/10_streaming_read.py +++ b/examples/10_streaming_read.py @@ -21,12 +21,13 @@ json.dumps(config)) # Read all available iterations and print electron position data. - # Use `series.read_iterations()` instead of `series.iterations` - # for streaming support (while still retaining file-reading support). - # Direct access to `series.iterations` is only necessary for random-access - # of iterations. By using `series.read_iterations()`, the openPMD-api will - # step through the iterations one by one, and going back to an iteration is - # not possible once it has been closed. + # Direct access to iterations is possible via `series.iterations`. + # For streaming support, `series.read_iterations()` needs to be used + # instead of `series.iterations`. + # `Series.write_iterations()` and `Series.read_iterations()` are + # intentionally restricted APIs that ensure a workflow which also works + # in streaming setups, e.g. an iteration cannot be opened again once + # it has been closed. for iteration in series.read_iterations(): print("Current iteration {}".format(iteration.iteration_index)) electronPositions = iteration.particles["e"]["position"] diff --git a/examples/10_streaming_write.cpp b/examples/10_streaming_write.cpp index 57bbcb6287..463b4c52e2 100644 --- a/examples/10_streaming_write.cpp +++ b/examples/10_streaming_write.cpp @@ -29,6 +29,11 @@ int main() std::shared_ptr local_data( new position_t[length], [](position_t const *ptr) { delete[] ptr; }); + // `Series::writeIterations()` and `Series::readIterations()` are + // intentionally restricted APIs that ensure a workflow which also works + // in streaming setups, e.g. an iteration cannot be opened again once + // it has been closed. + // `Series::iterations` can be directly accessed in random-access workflows. WriteIterations iterations = series.writeIterations(); for (size_t i = 0; i < 100; ++i) { diff --git a/examples/10_streaming_write.py b/examples/10_streaming_write.py index 956b683b05..bf92bcf14c 100755 --- a/examples/10_streaming_write.py +++ b/examples/10_streaming_write.py @@ -27,13 +27,13 @@ # now, write a number of iterations (or: snapshots, time steps) for i in range(10): - # Use `series.write_iterations()` instead of `series.iterations` - # for streaming support (while still retaining file-writing support). - # Direct access to `series.iterations` is only necessary for - # random-access of iterations. By using `series.write_iterations()`, - # the openPMD-api will adhere to streaming semantics while writing. - # In particular, this means that only one iteration can be written at a - # time and an iteration can no longer be modified after closing it. + # Direct access to iterations is possible via `series.iterations`. + # For streaming support, `series.write_iterations()` needs to be used + # instead of `series.iterations`. + # `Series.write_iterations()` and `Series.read_iterations()` are + # intentionally restricted APIs that ensure a workflow which also works + # in streaming setups, e.g. an iteration cannot be opened again once + # it has been closed. iteration = series.write_iterations()[i] ####################### diff --git a/examples/12_span_write.cpp b/examples/12_span_write.cpp index d53181cea0..089ceddff3 100644 --- a/examples/12_span_write.cpp +++ b/examples/12_span_write.cpp @@ -19,6 +19,11 @@ void span_write(std::string const &filename) std::vector fallbackBuffer; + // `Series::writeIterations()` and `Series::readIterations()` are + // intentionally restricted APIs that ensure a workflow which also works + // in streaming setups, e.g. an iteration cannot be opened again once + // it has been closed. + // `Series::iterations` can be directly accessed in random-access workflows. WriteIterations iterations = series.writeIterations(); for (size_t i = 0; i < 10; ++i) { diff --git a/examples/12_span_write.py b/examples/12_span_write.py index bfe0f69784..c985192383 100644 --- a/examples/12_span_write.py +++ b/examples/12_span_write.py @@ -10,6 +10,11 @@ def span_write(filename): extent = [length] dataset = io.Dataset(datatype, extent) + # `Series.write_iterations()` and `Series.read_iterations()` are + # intentionally restricted APIs that ensure a workflow which also works + # in streaming setups, e.g. an iteration cannot be opened again once + # it has been closed. + # `Series.iterations` can be directly accessed in random-access workflows. iterations = series.write_iterations() for i in range(12): iteration = iterations[i] diff --git a/examples/13_write_dynamic_configuration.cpp b/examples/13_write_dynamic_configuration.cpp index a398eccf27..b6e7f3694c 100644 --- a/examples/13_write_dynamic_configuration.cpp +++ b/examples/13_write_dynamic_configuration.cpp @@ -75,6 +75,11 @@ chunks = "auto" std::shared_ptr local_data( new position_t[length], [](position_t const *ptr) { delete[] ptr; }); + // `Series::writeIterations()` and `Series::readIterations()` are + // intentionally restricted APIs that ensure a workflow which also works + // in streaming setups, e.g. an iteration cannot be opened again once + // it has been closed. + // `Series::iterations` can be directly accessed in random-access workflows. WriteIterations iterations = series.writeIterations(); for (size_t i = 0; i < 100; ++i) { diff --git a/examples/13_write_dynamic_configuration.py b/examples/13_write_dynamic_configuration.py index 8670961592..bb7e81ce4a 100644 --- a/examples/13_write_dynamic_configuration.py +++ b/examples/13_write_dynamic_configuration.py @@ -63,13 +63,13 @@ def main(): # now, write a number of iterations (or: snapshots, time steps) for i in range(10): - # Use `series.write_iterations()` instead of `series.iterations` - # for streaming support (while still retaining file-writing support). - # Direct access to `series.iterations` is only necessary for - # random-access of iterations. By using `series.write_iterations()`, - # the openPMD-api will adhere to streaming semantics while writing. - # In particular, this means that only one iteration can be written at a - # time and an iteration can no longer be modified after closing it. + # Direct access to iterations is possible via `series.iterations`. + # For streaming support, `series.write_iterations()` needs to be used + # instead of `series.iterations`. + # `Series.write_iterations()` and `Series.read_iterations()` are + # intentionally restricted APIs that ensure a workflow which also works + # in streaming setups, e.g. an iteration cannot be opened again once + # it has been closed. iteration = series.write_iterations()[i] ####################### diff --git a/examples/1_structure.cpp b/examples/1_structure.cpp index fe4381884f..6e595c56ba 100644 --- a/examples/1_structure.cpp +++ b/examples/1_structure.cpp @@ -38,7 +38,13 @@ int main() /* Access to individual positions inside happens hierarchically, according * to the openPMD standard. Creation of new elements happens on access * inside the tree-like structure. Required attributes are initialized to - * reasonable defaults for every object. */ + * reasonable defaults for every object. + * `Series::writeIterations()` and `Series::readIterations()` are + * intentionally restricted APIs that ensure a workflow which also works + * in streaming setups, e.g. an iteration cannot be opened again once + * it has been closed. + * `Series::iterations` can be directly accessed in random-access workflows. + */ ParticleSpecies electrons = series.writeIterations()[1].particles["electrons"]; diff --git a/examples/3_write_serial.cpp b/examples/3_write_serial.cpp index a66db6c080..aeb62aef6c 100644 --- a/examples/3_write_serial.cpp +++ b/examples/3_write_serial.cpp @@ -44,6 +44,11 @@ int main(int argc, char *argv[]) Series series = Series("../samples/3_write_serial.h5", Access::CREATE); cout << "Created an empty " << series.iterationEncoding() << " Series\n"; + // `Series::writeIterations()` and `Series::readIterations()` are + // intentionally restricted APIs that ensure a workflow which also works + // in streaming setups, e.g. an iteration cannot be opened again once + // it has been closed. + // `Series::iterations` can be directly accessed in random-access workflows. MeshRecordComponent rho = series.writeIterations()[1].meshes["rho"][MeshRecordComponent::SCALAR]; cout << "Created a scalar mesh Record with all required openPMD " diff --git a/examples/3_write_serial.py b/examples/3_write_serial.py index 8e136f9512..b1bdd2d063 100644 --- a/examples/3_write_serial.py +++ b/examples/3_write_serial.py @@ -28,6 +28,11 @@ print("Created an empty {0} Series".format(series.iteration_encoding)) print(len(series.iterations)) + # `Series.write_iterations()` and `Series.read_iterations()` are + # intentionally restricted APIs that ensure a workflow which also works + # in streaming setups, e.g. an iteration cannot be opened again once + # it has been closed. + # `Series.iterations` can be directly accessed in random-access workflows. rho = series.write_iterations()[1]. \ meshes["rho"][io.Mesh_Record_Component.SCALAR] diff --git a/examples/3a_write_thetaMode_serial.cpp b/examples/3a_write_thetaMode_serial.cpp index 9367e43f70..1e5086303f 100644 --- a/examples/3a_write_thetaMode_serial.cpp +++ b/examples/3a_write_thetaMode_serial.cpp @@ -51,6 +51,11 @@ int main() geos << "m=" << num_modes << ";imag=+"; std::string const geometryParameters = geos.str(); + // `Series::writeIterations()` and `Series::readIterations()` are + // intentionally restricted APIs that ensure a workflow which also works + // in streaming setups, e.g. an iteration cannot be opened again once + // it has been closed. + // `Series::iterations` can be directly accessed in random-access workflows. Mesh E = series.writeIterations()[0].meshes["E"]; E.setGeometry(Mesh::Geometry::thetaMode); E.setGeometryParameters(geometryParameters); diff --git a/examples/3a_write_thetaMode_serial.py b/examples/3a_write_thetaMode_serial.py index ec81435558..c9aee2c9d7 100644 --- a/examples/3a_write_thetaMode_serial.py +++ b/examples/3a_write_thetaMode_serial.py @@ -30,6 +30,11 @@ geometry_parameters = "m={0};imag=+".format(num_modes) + # `Series.write_iterations()` and `Series.read_iterations()` are + # intentionally restricted APIs that ensure a workflow which also works + # in streaming setups, e.g. an iteration cannot be opened again once + # it has been closed. + # `Series.iterations` can be directly accessed in random-access workflows. E = series.write_iterations()[0].meshes["E"] E.geometry = io.Geometry.thetaMode E.geometry_parameters = geometry_parameters diff --git a/examples/3b_write_resizable_particles.cpp b/examples/3b_write_resizable_particles.cpp index d4be87a0fc..91face7f2b 100644 --- a/examples/3b_write_resizable_particles.cpp +++ b/examples/3b_write_resizable_particles.cpp @@ -32,6 +32,11 @@ int main() Series series = Series("../samples/3b_write_resizable_particles.h5", Access::CREATE); + // `Series::writeIterations()` and `Series::readIterations()` are + // intentionally restricted APIs that ensure a workflow which also works + // in streaming setups, e.g. an iteration cannot be opened again once + // it has been closed. + // `Series::iterations` can be directly accessed in random-access workflows. ParticleSpecies electrons = series.writeIterations()[0].particles["electrons"]; diff --git a/examples/3b_write_resizable_particles.py b/examples/3b_write_resizable_particles.py index 440fac7de6..f188559816 100644 --- a/examples/3b_write_resizable_particles.py +++ b/examples/3b_write_resizable_particles.py @@ -64,7 +64,7 @@ # The iteration's content will be flushed automatically. # An iteration once closed cannot (yet) be reopened. # after this call, the provided data buffers can be used again or deleted - series.write_iterations()[0].close() + series.iterations[0].close() # rinse and repeat as needed :) diff --git a/examples/5_write_parallel.cpp b/examples/5_write_parallel.cpp index bfe737d9be..2b70c775cb 100644 --- a/examples/5_write_parallel.cpp +++ b/examples/5_write_parallel.cpp @@ -55,8 +55,14 @@ int main(int argc, char *argv[]) << " MPI ranks\n"; // In parallel contexts, it's important to explicitly open iterations. - // This is done automatically when using `Series::writeIterations()`, - // or in read mode `Series::readIterations()`. + // You can either explicitly access Series::iterations and use + // Iteration::open() afterwards, or use `Series::writeIterations()`, + // or in read mode `Series::readIterations()` where iterations are opened + // automatically. + // `Series::writeIterations()` and `Series::readIterations()` are + // intentionally restricted APIs that ensure a workflow which also works + // in streaming setups, e.g. an iteration cannot be opened again once + // it has been closed. series.iterations[1].open(); MeshRecordComponent mymesh = series.iterations[1].meshes["mymesh"][MeshRecordComponent::SCALAR]; diff --git a/examples/5_write_parallel.py b/examples/5_write_parallel.py index c956b6eed1..27aa01f94c 100644 --- a/examples/5_write_parallel.py +++ b/examples/5_write_parallel.py @@ -40,6 +40,12 @@ # In parallel contexts, it's important to explicitly open iterations. # This is done automatically when using `Series.write_iterations()`, # or in read mode `Series.read_iterations()`. + # + # `Series.write_iterations()` and `Series.read_iterations()` are + # intentionally restricted APIs that ensure a workflow which also works + # in streaming setups, e.g. an iteration cannot be opened again once + # it has been closed. + # `Series.iterations` can be directly accessed in random-access workflows. series.iterations[1].open() mymesh = series.iterations[1]. \ meshes["mymesh"][io.Mesh_Record_Component.SCALAR] diff --git a/examples/8a_benchmark_write_parallel.cpp b/examples/8a_benchmark_write_parallel.cpp index 82c32ce73a..c509c879ff 100644 --- a/examples/8a_benchmark_write_parallel.cpp +++ b/examples/8a_benchmark_write_parallel.cpp @@ -748,6 +748,11 @@ void AbstractPattern::store(Series &series, int step) std::string scalar = openPMD::MeshRecordComponent::SCALAR; storeMesh(series, step, field_rho, scalar); + // `Series::writeIterations()` and `Series::readIterations()` are + // intentionally restricted APIs that ensure a workflow which also works + // in streaming setups, e.g. an iteration cannot be opened again once + // it has been closed. + // `Series::iterations` can be directly accessed in random-access workflows. ParticleSpecies &currSpecies = series.writeIterations()[step].particles["ion"]; storeParticles(currSpecies, step); @@ -770,6 +775,11 @@ void AbstractPattern::storeMesh( const std::string &fieldName, const std::string &compName) { + // `Series::writeIterations()` and `Series::readIterations()` are + // intentionally restricted APIs that ensure a workflow which also works + // in streaming setups, e.g. an iteration cannot be opened again once + // it has been closed. + // `Series::iterations` can be directly accessed in random-access workflows. MeshRecordComponent compA = series.writeIterations()[step].meshes[fieldName][compName]; Datatype datatype = determineDatatype(); diff --git a/examples/8b_benchmark_read_parallel.cpp b/examples/8b_benchmark_read_parallel.cpp index 3809707a72..98cd81add2 100644 --- a/examples/8b_benchmark_read_parallel.cpp +++ b/examples/8b_benchmark_read_parallel.cpp @@ -274,6 +274,11 @@ class TestInput << std::endl; } + // `Series::writeIterations()` and `Series::readIterations()` are + // intentionally restricted APIs that ensure a workflow which also + // works in streaming setups, e.g. an iteration cannot be opened + // again once it has been closed. `Series::iterations` can be + // directly accessed in random-access workflows. { int counter = 1; for (auto i : series.readIterations()) diff --git a/include/openPMD/Series.hpp b/include/openPMD/Series.hpp index 7b85986992..a4e8506a81 100644 --- a/include/openPMD/Series.hpp +++ b/include/openPMD/Series.hpp @@ -491,6 +491,11 @@ class Series : public Attributable * Creates and returns an instance of the ReadIterations class which can * be used for iterating over the openPMD iterations in a C++11-style for * loop. + * `Series::readIterations()` is an intentionally restricted API that + * ensures a workflow which also works in streaming setups, e.g. an + * iteration cannot be opened again once it has been closed. + * For a less restrictive API in non-streaming situations, + * `Series::iterations` can be accessed directly. * Look for the ReadIterations class for further documentation. * * @return ReadIterations @@ -500,11 +505,16 @@ class Series : public Attributable /** * @brief Entry point to the writing end of the streaming API. * - * Creates and returns an instance of the WriteIterations class which is a - * restricted container of iterations which takes care of - * streaming semantics. + * Creates and returns an instance of the WriteIterations class which is an + * intentionally restricted container of iterations that takes care of + * streaming semantics, e.g. ensuring that an iteration cannot be reopened + * once closed. + * For a less restrictive API in non-streaming situations, + * `Series::iterations` can be accessed directly. * The created object is stored as member of the Series object, hence this * method may be called as many times as a user wishes. + * There is only one shared iterator state per Series, even when calling + * this method twice. * Look for the WriteIterations class for further documentation. * * @return WriteIterations diff --git a/include/openPMD/WriteIterations.hpp b/include/openPMD/WriteIterations.hpp index 7c457e7cfe..fcf4a8fbfa 100644 --- a/include/openPMD/WriteIterations.hpp +++ b/include/openPMD/WriteIterations.hpp @@ -50,6 +50,19 @@ namespace internal class SeriesData; } +/** + * @brief Writing side of the streaming API. + * + * Create instance via Series::writeIterations(). + * Restricted Container of Iterations, designed to allow reading any kind + * of Series, streaming and non-streaming alike. + * Calling Iteration::close() manually before opening the next iteration is + * encouraged and will implicitly flush all deferred IO actions. + * Otherwise, Iteration::close() will be implicitly called upon + * opening the next iteration or upon destruction. + * Since this is designed for streaming mode, reopening an iteration is + * not possible once it has been closed. + */ class WriteIterations { friend class Series; diff --git a/src/binding/python/Series.cpp b/src/binding/python/Series.cpp index 8874c21e43..ae4f699fa2 100644 --- a/src/binding/python/Series.cpp +++ b/src/binding/python/Series.cpp @@ -69,7 +69,19 @@ struct SeriesIteratorPythonAdaptor : SeriesIterator void init_Series(py::module &m) { - py::class_(m, "WriteIterations") + py::class_(m, "WriteIterations", R"END( +Writing side of the streaming API. + +Create instance via Series.writeIterations(). +Restricted Container of Iterations, designed to allow reading any kind +of Series, streaming and non-streaming alike. +Calling Iteration.close() manually before opening the next iteration is +encouraged and will implicitly flush all deferred IO actions. +Otherwise, Iteration.close() will be implicitly called upon +opening the next iteration or upon destruction. +Since this is designed for streaming mode, reopening an iteration is +not possible once it has been closed. + )END") .def( "__getitem__", [](WriteIterations writeIterations, Series::IterationIndex_t key) { @@ -127,7 +139,20 @@ void init_Series(py::module &m) ); - py::class_(m, "ReadIterations") + py::class_(m, "ReadIterations", R"END( +Reading side of the streaming API. + +Create instance via Series.readIterations(). +For use in a foreach loop over iterations. +Designed to allow reading any kind of Series, streaming and non-streaming alike. +Calling Iteration.close() manually before opening the next iteration is +encouraged and will implicitly flush all deferred IO actions. +Otherwise, Iteration.close() will be implicitly called upon +SeriesIterator.__next__(), i.e. upon going to the next iteration in +the foreach loop. +Since this is designed for streaming mode, reopening an iteration is +not possible once it has been closed. + )END") .def( "__iter__", [](ReadIterations &readIterations) { @@ -310,11 +335,39 @@ this method. py::gil_scoped_release release; return s.readIterations(); }, - py::keep_alive<0, 1>()) + py::keep_alive<0, 1>(), + R"END( +Entry point to the reading end of the streaming API. + +Creates and returns an instance of the ReadIterations class which can +be used for iterating over the openPMD iterations in a C++11-style for +loop. +`Series.read_iterations()` is an intentionally restricted API that +ensures a workflow which also works in streaming setups, e.g. an +iteration cannot be opened again once it has been closed. +For a less restrictive API in non-streaming situations, +`Series.iterations` can be accessed directly. +Look for the ReadIterations class for further documentation. + )END") .def( "write_iterations", &Series::writeIterations, - py::keep_alive<0, 1>()); + py::keep_alive<0, 1>(), + R"END( +Entry point to the writing end of the streaming API. + +Creates and returns an instance of the WriteIterations class which is an +intentionally restricted container of iterations that takes care of +streaming semantics, e.g. ensuring that an iteration cannot be reopened +once closed. +For a less restrictive API in non-streaming situations, +`Series.iterations` can be accessed directly. +The created object is stored as member of the Series object, hence this +method may be called as many times as a user wishes. +There is only one shared iterator state per Series, even when calling +this method twice. +Look for the WriteIterations class for further documentation. + )END"); m.def( "merge_json", From 8fd938c11916a0e78cb9f1d67f6878ffe9035c05 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 25 Apr 2023 18:45:41 +0000 Subject: [PATCH 76/82] [pre-commit.ci] pre-commit autoupdate (#1425) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/mirrors-clang-format: v16.0.0 → v16.0.2](https://github.com/pre-commit/mirrors-clang-format/compare/v16.0.0...v16.0.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index b489431da0..38e8c0306f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,7 +66,7 @@ repos: # clang-format v13 # to run manually, use .github/workflows/clang-format/clang-format.sh - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v16.0.0 + rev: v16.0.2 hooks: - id: clang-format # By default, the clang-format hook configures: From 28fd5c81dfdf26d54d8e5f68cc101f8d339fdd4b Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 3 May 2023 18:00:15 -0700 Subject: [PATCH 77/82] Doc: Fix Bib Authors (#1434) Make sure the bib authors match the quoted openPMD-standard authors. --- docs/source/citation.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/citation.rst b/docs/source/citation.rst index 858f67bf1b..2291d8a3a9 100644 --- a/docs/source/citation.rst +++ b/docs/source/citation.rst @@ -30,6 +30,7 @@ The equivalent BibTeX code is: Sbalzarini, Ivo and Kuschel, Stephan and Sagan, David and + Mayes, Christopher and P{\'e}rez, Fr{\'e}d{\'e}ric and Koller, Fabian and Bussmann, Michael}, From 55b8b96c5eb6fcfdb276e33b3cb83ad466197450 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 9 May 2023 10:48:03 -0700 Subject: [PATCH 78/82] Update .readthedocs.yml (#1438) Update to newer Ubuntu, shipping a newer OpenSSL --- .readthedocs.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.readthedocs.yml b/.readthedocs.yml index 54a296c29d..2fc118e82d 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -10,5 +10,8 @@ formats: - epub build: + os: ubuntu-22.04 + tools: + python: "3.11" apt_packages: - librsvg2-bin From 77c73e208b55a9ce43293f0cdb7b2a667c6d2f5c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Franz=20P=C3=B6schel?= Date: Tue, 9 May 2023 19:48:12 +0200 Subject: [PATCH 79/82] Fix deprecated storeChunk APIs in first read/write examples (#1435) --- docs/source/usage/firstread.rst | 5 ++--- docs/source/usage/firstwrite.rst | 15 ++++++--------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/docs/source/usage/firstread.rst b/docs/source/usage/firstread.rst index f27e3c595b..673f69acc4 100644 --- a/docs/source/usage/firstread.rst +++ b/docs/source/usage/firstread.rst @@ -347,12 +347,11 @@ C++17 .. code-block:: cpp - // destruct series object, - // e.g. when out-of-scope + series.close() Python ^^^^^^ .. code-block:: python3 - del series + series.close() diff --git a/docs/source/usage/firstwrite.rst b/docs/source/usage/firstwrite.rst index d339d00ad1..3361214926 100644 --- a/docs/source/usage/firstwrite.rst +++ b/docs/source/usage/firstwrite.rst @@ -297,11 +297,9 @@ C++17 .. code-block:: cpp B_x.storeChunk( - io::shareRaw(x_data), - {0, 0}, {150, 300}); + x_data, {0, 0}, {150, 300}); B_z.storeChunk( - io::shareRaw(z_data), - {0, 0}, {150, 300}); + z_data, {0, 0}, {150, 300}); B_y.makeConstant(y_data); @@ -310,10 +308,10 @@ Python .. code-block:: python3 - B_x.store_chunk(x_data) + B_x[:, :] = x_data - B_z.store_chunk(z_data) + B_z[:, :] = z_data @@ -354,12 +352,11 @@ C++17 .. code-block:: cpp - // destruct series object, - // e.g. when out-of-scope + series.close() Python ^^^^^^ .. code-block:: python3 - del series + series.close() From 0beb905e8267af9f7a794dd4751ff55d72cea54b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 9 May 2023 13:43:55 -0700 Subject: [PATCH 80/82] [pre-commit.ci] pre-commit autoupdate (#1437) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/mirrors-clang-format: v16.0.2 → v16.0.3](https://github.com/pre-commit/mirrors-clang-format/compare/v16.0.2...v16.0.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 38e8c0306f..0df8ec9c72 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -66,7 +66,7 @@ repos: # clang-format v13 # to run manually, use .github/workflows/clang-format/clang-format.sh - repo: https://github.com/pre-commit/mirrors-clang-format - rev: v16.0.2 + rev: v16.0.3 hooks: - id: clang-format # By default, the clang-format hook configures: From a8b39b918164be118cd031f711c22d1d13587ad6 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 18 May 2023 13:45:16 -0700 Subject: [PATCH 81/82] CI: macOS-11 Update (#1446) The older macOS image is now removed. The latest points already to macOS-12. macoS-13 runners are marked experimental. https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources --- .github/workflows/macos.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 2295770ce2..287bbd8ca4 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -43,9 +43,8 @@ jobs: cmake --build build --parallel 3 ctest --test-dir build --verbose - appleclang12_py: - runs-on: macos-10.15 - # next: macOS-11 + appleclang13_py: + runs-on: macos-11 if: github.event.pull_request.draft == false steps: - uses: actions/checkout@v3 @@ -67,7 +66,8 @@ jobs: -DopenPMD_USE_MPI=OFF \ -DopenPMD_USE_HDF5=OFF \ -DopenPMD_USE_ADIOS2=OFF \ - -DopenPMD_USE_INVASIVE_TESTS=ON + -DopenPMD_USE_INVASIVE_TESTS=ON \ + -DPython_EXECUTABLE=$(which python3) cmake --build build --parallel 3 ctest --test-dir build --verbose From 4423be4284220d005818ce862f482594ac22cd31 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 18 May 2023 13:45:40 -0700 Subject: [PATCH 82/82] Docs: Linking to C++ Projects (#1445) Move a section only written in the README to our developer section on readthedocs. --- README.md | 8 ++-- docs/source/dev/linking.rst | 88 +++++++++++++++++++++++++++++++++++++ docs/source/index.rst | 1 + 3 files changed, 93 insertions(+), 4 deletions(-) create mode 100644 docs/source/dev/linking.rst diff --git a/README.md b/README.md index 7d596ad94e..1cf6bdba78 100644 --- a/README.md +++ b/README.md @@ -294,7 +294,7 @@ The install will contain header files and libraries in the path set with `-DCMAK ### CMake -If your project is using CMake for its build, one can conveniently use our provided `openPMDConfig.cmake` package which is installed alongside the library. +If your project is using CMake for its build, one can conveniently use our provided `openPMDConfig.cmake` package, which is installed alongside the library. First set the following environment hint if openPMD-api was *not* installed in a system path: @@ -306,7 +306,7 @@ export CMAKE_PREFIX_PATH=$HOME/somepath:$CMAKE_PREFIX_PATH Use the following lines in your project's `CMakeLists.txt`: ```cmake # supports: COMPONENTS MPI NOMPI HDF5 ADIOS2 -find_package(openPMD 0.9.0 CONFIG) +find_package(openPMD 0.15.0 CONFIG) if(openPMD_FOUND) target_link_libraries(YourTarget PRIVATE openPMD::openPMD) @@ -334,13 +334,13 @@ set(openPMD_INSTALL OFF) # or instead use: set(openPMD_USE_PYTHON OFF) FetchContent_Declare(openPMD GIT_REPOSITORY "https://github.com/openPMD/openPMD-api.git" - GIT_TAG "dev") + GIT_TAG "0.15.0") FetchContent_MakeAvailable(openPMD) ``` ### Manually -If your (Linux/OSX) project is build by calling the compiler directly or uses a manually written `Makefile`, consider using our `openPMD.pc` helper file for `pkg-config` which are installed alongside the library. +If your (Linux/OSX) project is build by calling the compiler directly or uses a manually written `Makefile`, consider using our `openPMD.pc` helper file for `pkg-config`, which are installed alongside the library. First set the following environment hint if openPMD-api was *not* installed in a system path: diff --git a/docs/source/dev/linking.rst b/docs/source/dev/linking.rst new file mode 100644 index 0000000000..64858cb25f --- /dev/null +++ b/docs/source/dev/linking.rst @@ -0,0 +1,88 @@ +.. _development-linking: + +Linking to C++ +============== + +The install will contain header files and libraries in the path set with the ``-DCMAKE_INSTALL_PREFIX`` option :ref:`from the previous section `. + + +CMake +----- + +If your project is using CMake for its build, one can conveniently use our provided ``openPMDConfig.cmake`` package, which is installed alongside the library. + +First set the following environment hint if openPMD-api was *not* installed in a system path: + +.. code-block:: bash + + # optional: only needed if installed outside of system paths + export CMAKE_PREFIX_PATH=$HOME/somepath:$CMAKE_PREFIX_PATH + +Use the following lines in your project's ``CMakeLists.txt``: + +.. code-block:: cmake + + # supports: COMPONENTS MPI NOMPI HDF5 ADIOS2 + find_package(openPMD 0.15.0 CONFIG) + + if(openPMD_FOUND) + target_link_libraries(YourTarget PRIVATE openPMD::openPMD) + endif() + +*Alternatively*, add the openPMD-api repository source directly to your project and use it via: + +.. code-block:: cmake + + add_subdirectory("path/to/source/of/openPMD-api") + + target_link_libraries(YourTarget PRIVATE openPMD::openPMD) + +For development workflows, you can even automatically download and build openPMD-api from within a depending CMake project. +Just replace the ``add_subdirectory`` call with: + +.. code-block:: cmake + + include(FetchContent) + set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) + set(openPMD_BUILD_CLI_TOOLS OFF) + set(openPMD_BUILD_EXAMPLES OFF) + set(openPMD_BUILD_TESTING OFF) + set(openPMD_BUILD_SHARED_LIBS OFF) # precedence over BUILD_SHARED_LIBS if needed + set(openPMD_INSTALL OFF) # or instead use: + # set(openPMD_INSTALL ${BUILD_SHARED_LIBS}) # only install if used as a shared library + set(openPMD_USE_PYTHON OFF) + FetchContent_Declare(openPMD + GIT_REPOSITORY "https://github.com/openPMD/openPMD-api.git" + GIT_TAG "0.15.0") + FetchContent_MakeAvailable(openPMD) + + +Manually +-------- + +If your (Linux/OSX) project is build by calling the compiler directly or uses a manually written ``Makefile``, consider using our ``openPMD.pc`` helper file for ``pkg-config``, which are installed alongside the library. + +First set the following environment hint if openPMD-api was *not* installed in a system path: + +.. code-block:: bash + + # optional: only needed if installed outside of system paths + export PKG_CONFIG_PATH=$HOME/somepath/lib/pkgconfig:$PKG_CONFIG_PATH + +Additional linker and compiler flags for your project are available via: + +.. code-block:: bash + + # switch to check if openPMD-api was build as static library + # (via BUILD_SHARED_LIBS=OFF) or as shared library (default) + if [ "$(pkg-config --variable=static openPMD)" == "true" ] + then + pkg-config --libs --static openPMD + # -L/usr/local/lib -L/usr/lib/x86_64-linux-gnu/openmpi/lib -lopenPMD -pthread /usr/lib/libmpi.so -pthread /usr/lib/x86_64-linux-gnu/openmpi/lib/libmpi_cxx.so /usr/lib/libmpi.so /usr/lib/x86_64-linux-gnu/hdf5/openmpi/libhdf5.so /usr/lib/x86_64-linux-gnu/libsz.so /usr/lib/x86_64-linux-gnu/libz.so /usr/lib/x86_64-linux-gnu/libdl.so /usr/lib/x86_64-linux-gnu/libm.so -pthread /usr/lib/libmpi.so -pthread /usr/lib/x86_64-linux-gnu/openmpi/lib/libmpi_cxx.so /usr/lib/libmpi.so + else + pkg-config --libs openPMD + # -L${HOME}/somepath/lib -lopenPMD + fi + + pkg-config --cflags openPMD + # -I${HOME}/somepath/include diff --git a/docs/source/index.rst b/docs/source/index.rst index 1d6ad14703..6c59a7997d 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -141,6 +141,7 @@ Development dev/backend dev/dependencies dev/buildoptions + dev/linking dev/sphinx Maintenance