diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000000..8111e316b7 --- /dev/null +++ b/.clang-format @@ -0,0 +1,66 @@ +Language: Cpp + +BasedOnStyle: LLVM +AccessModifierOffset: -4 +AlignAfterOpenBracket: AlwaysBreak +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +# AlignEscapedNewlinesLeft: false +AlignEscapedNewlines: Right +AlignOperands: false +AlignTrailingComments: false +AllowAllArgumentsOnNextLine: true +AllowAllConstructorInitializersOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: None +AllowShortIfStatementsOnASingleLine: false +AllowShortLambdasOnASingleLine: true +AllowShortLoopsOnASingleLine: false +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: true +BinPackArguments: false +BinPackParameters: false + +BreakBeforeBraces: Custom +BraceWrapping: + AfterClass: true + AfterControlStatement: true + AfterEnum: true + AfterExternBlock: true + AfterFunction: true + AfterNamespace: true + AfterStruct: true + AfterUnion: true + BeforeCatch: true + BeforeElse: true + # BeforeLambdaBody: true + SplitEmptyFunction: false + SplitEmptyNamespace: false + SplitEmptyRecord: false + +BreakConstructorInitializers: BeforeComma +BreakInheritanceList: BeforeComma +ColumnLimit: 80 +CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +FixNamespaceComments: true +IndentWidth: 4 +NamespaceIndentation: Inner +PointerAlignment: Right +SortUsingDeclarations: false +SpaceAfterCStyleCast: false +SpaceAfterTemplateKeyword: true +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesInAngles: false +SpacesInContainerLiterals: false +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Auto +StatementMacros: ["OPENPMD_private", "OPENPMD_protected"] diff --git a/.github/workflows/clang-format/clang-format.sh b/.github/workflows/clang-format/clang-format.sh new file mode 100755 index 0000000000..863b68ba67 --- /dev/null +++ b/.github/workflows/clang-format/clang-format.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +if (( $# > 0 )); then + # received arguments, format those files + clang-format-12 -i "$@" +else + # received no arguments, find files on our own + find include/ src/ test/ examples/ \ + -regextype egrep \ + -type f -regex '.*\.(hpp|cpp|hpp\.in)$' \ + | xargs clang-format-12 -i +fi diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ea9405f56e..9c31d5d2dc 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -62,6 +62,15 @@ repos: # C++ formatting # clang-format +- repo: local + hooks: + - id: clang-format + name: clang-format + description: Clang-format our code base + files: '^(include|src|test|examples)/.*\.(hpp|cpp|hpp\.in)$' + language: conda + entry: bash .github/workflows/clang-format/clang-format.sh + additional_dependencies: [-c, conda-forge, clang-format-12] # Autoremoves unused Python imports - repo: https://github.com/hadialqattan/pycln diff --git a/environment.yml b/environment.yml new file mode 100644 index 0000000000..5cdc6f6f02 --- /dev/null +++ b/environment.yml @@ -0,0 +1,7 @@ +# I am a conda environment, used for our pre-commit hooks +name: openPMD-api-dev +channels: + - conda-forge +dependencies: + - clang-format-12=12.0.1 + - bash=5 diff --git a/examples/10_streaming_read.cpp b/examples/10_streaming_read.cpp index e960447ad5..e271dd4393 100644 --- a/examples/10_streaming_read.cpp +++ b/examples/10_streaming_read.cpp @@ -8,49 +8,48 @@ using std::cout; using namespace openPMD; -int -main() +int main() { #if openPMD_HAVE_ADIOS2 using position_t = double; auto backends = openPMD::getFileExtensions(); - if( std::find( backends.begin(), backends.end(), "sst" ) == backends.end() ) + if (std::find(backends.begin(), backends.end(), "sst") == backends.end()) { std::cout << "SST engine not available in ADIOS2." << std::endl; return 0; } - Series series = Series( "electrons.sst", Access::READ_ONLY ); + Series series = Series("electrons.sst", Access::READ_ONLY); - for( IndexedIteration iteration : series.readIterations() ) + for (IndexedIteration iteration : series.readIterations()) { std::cout << "Current iteration: " << iteration.iterationIndex << std::endl; - Record electronPositions = iteration.particles[ "e" ][ "position" ]; - std::array< std::shared_ptr< position_t >, 3 > loadedChunks; - std::array< Extent, 3 > extents; - std::array< std::string, 3 > const dimensions{ { "x", "y", "z" } }; + Record electronPositions = iteration.particles["e"]["position"]; + std::array, 3> loadedChunks; + std::array extents; + std::array const dimensions{{"x", "y", "z"}}; - for( size_t i = 0; i < 3; ++i ) + for (size_t i = 0; i < 3; ++i) { - std::string dim = dimensions[ i ]; - RecordComponent rc = electronPositions[ dim ]; - loadedChunks[ i ] = rc.loadChunk< position_t >( - Offset( rc.getDimensionality(), 0 ), rc.getExtent() ); - extents[ i ] = rc.getExtent(); + std::string dim = dimensions[i]; + RecordComponent rc = electronPositions[dim]; + loadedChunks[i] = rc.loadChunk( + Offset(rc.getDimensionality(), 0), rc.getExtent()); + extents[i] = rc.getExtent(); } iteration.close(); - for( size_t i = 0; i < 3; ++i ) + for (size_t i = 0; i < 3; ++i) { - std::string dim = dimensions[ i ]; - Extent const & extent = extents[ i ]; + std::string dim = dimensions[i]; + Extent const &extent = extents[i]; std::cout << "\ndim: " << dim << "\n" << std::endl; - auto chunk = loadedChunks[ i ]; - for( size_t j = 0; j < extent[ 0 ]; ++j ) + auto chunk = loadedChunks[i]; + for (size_t j = 0; j < extent[0]; ++j) { - std::cout << chunk.get()[ j ] << ", "; + std::cout << chunk.get()[j] << ", "; } std::cout << "\n----------\n" << std::endl; } diff --git a/examples/10_streaming_write.cpp b/examples/10_streaming_write.cpp index c7905a51b5..1c12e034f1 100644 --- a/examples/10_streaming_write.cpp +++ b/examples/10_streaming_write.cpp @@ -8,41 +8,39 @@ using std::cout; using namespace openPMD; -int -main() +int main() { #if openPMD_HAVE_ADIOS2 using position_t = double; auto backends = openPMD::getFileExtensions(); - if( std::find( backends.begin(), backends.end(), "sst" ) == backends.end() ) + if (std::find(backends.begin(), backends.end(), "sst") == backends.end()) { std::cout << "SST engine not available in ADIOS2." << std::endl; return 0; } // open file for writing - Series series = Series( "electrons.sst", Access::CREATE ); + Series series = Series("electrons.sst", Access::CREATE); - Datatype datatype = determineDatatype< position_t >(); + Datatype datatype = determineDatatype(); constexpr unsigned long length = 10ul; - Extent global_extent = { length }; - Dataset dataset = Dataset( datatype, global_extent ); - std::shared_ptr< position_t > local_data( - new position_t[ length ], - []( position_t const * ptr ) { delete[] ptr; } ); + Extent global_extent = {length}; + Dataset dataset = Dataset(datatype, global_extent); + std::shared_ptr local_data( + new position_t[length], [](position_t const *ptr) { delete[] ptr; }); WriteIterations iterations = series.writeIterations(); - for( size_t i = 0; i < 100; ++i ) + for (size_t i = 0; i < 100; ++i) { - Iteration iteration = iterations[ i ]; - Record electronPositions = iteration.particles[ "e" ][ "position" ]; + Iteration iteration = iterations[i]; + Record electronPositions = iteration.particles["e"]["position"]; - std::iota( local_data.get(), local_data.get() + length, i * length ); - for( auto const & dim : { "x", "y", "z" } ) + std::iota(local_data.get(), local_data.get() + length, i * length); + for (auto const &dim : {"x", "y", "z"}) { - RecordComponent pos = electronPositions[ dim ]; - pos.resetDataset( dataset ); - pos.storeChunk( local_data, Offset{ 0 }, global_extent ); + RecordComponent pos = electronPositions[dim]; + pos.resetDataset(dataset); + pos.storeChunk(local_data, Offset{0}, global_extent); } iteration.close(); } diff --git a/examples/12_span_write.cpp b/examples/12_span_write.cpp index a1162edbb3..6afcb18fe4 100644 --- a/examples/12_span_write.cpp +++ b/examples/12_span_write.cpp @@ -5,31 +5,31 @@ #include // std::iota #include -void span_write( std::string const & filename ) +void span_write(std::string const &filename) { using namespace openPMD; using position_t = double; // open file for writing - Series series = Series( filename, Access::CREATE ); + Series series = Series(filename, Access::CREATE); - Datatype datatype = determineDatatype< position_t >(); + Datatype datatype = determineDatatype(); constexpr unsigned long length = 10ul; - Extent extent = { length }; - Dataset dataset = Dataset( datatype, extent ); + Extent extent = {length}; + Dataset dataset = Dataset(datatype, extent); - std::vector< position_t > fallbackBuffer; + std::vector fallbackBuffer; WriteIterations iterations = series.writeIterations(); - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - Iteration iteration = iterations[ i ]; - Record electronPositions = iteration.particles[ "e" ][ "position" ]; + Iteration iteration = iterations[i]; + Record electronPositions = iteration.particles["e"]["position"]; size_t j = 0; - for( auto const & dim : { "x", "y", "z" } ) + for (auto const &dim : {"x", "y", "z"}) { - RecordComponent pos = electronPositions[ dim ]; - pos.resetDataset( dataset ); + RecordComponent pos = electronPositions[dim]; + pos.resetDataset(dataset); /* * This demonstrates the storeChunk() strategy (to be) used in * PIConGPU: @@ -45,16 +45,15 @@ void span_write( std::string const & filename ) * flushed in each iteration to make the buffer reusable. */ bool fallbackBufferIsUsed = false; - auto dynamicMemoryView = pos.storeChunk< position_t >( - Offset{ 0 }, + auto dynamicMemoryView = pos.storeChunk( + Offset{0}, extent, - [ &fallbackBuffer, &fallbackBufferIsUsed ]( size_t size ) - { + [&fallbackBuffer, &fallbackBufferIsUsed](size_t size) { fallbackBufferIsUsed = true; - fallbackBuffer.resize( size ); - return std::shared_ptr< position_t >( - fallbackBuffer.data(), []( auto const * ) {} ); - } ); + fallbackBuffer.resize(size); + return std::shared_ptr( + fallbackBuffer.data(), [](auto const *) {}); + }); /* * ADIOS2 might reallocate its internal buffers when writing @@ -63,21 +62,21 @@ void span_write( std::string const & filename ) * directly before writing. */ auto span = dynamicMemoryView.currentBuffer(); - if( ( i + j ) % 2 == 0 ) + if ((i + j) % 2 == 0) { std::iota( span.begin(), span.end(), - position_t( 3 * i * length + j * length ) ); + position_t(3 * i * length + j * length)); } else { std::iota( span.rbegin(), span.rend(), - position_t( 3 * i * length + j * length ) ); + position_t(3 * i * length + j * length)); } - if( fallbackBufferIsUsed ) + if (fallbackBufferIsUsed) { iteration.seriesFlush(); } @@ -89,12 +88,12 @@ void span_write( std::string const & filename ) int main() { - for( auto const & ext : openPMD::getFileExtensions() ) + for (auto const &ext : openPMD::getFileExtensions()) { - if( ext == "sst" || ext == "ssc" ) + if (ext == "sst" || ext == "ssc") { continue; } - span_write( "../samples/span_write." + ext ); + span_write("../samples/span_write." + ext); } } diff --git a/examples/13_write_dynamic_configuration.cpp b/examples/13_write_dynamic_configuration.cpp index d67404afd6..06ef1e8e77 100644 --- a/examples/13_write_dynamic_configuration.cpp +++ b/examples/13_write_dynamic_configuration.cpp @@ -8,10 +8,9 @@ using std::cout; using namespace openPMD; - int main() { - if( !getVariants()["adios2"] ) + if (!getVariants()["adios2"]) { // Example configuration below selects the ADIOS2 backend return 0; @@ -67,28 +66,27 @@ chunks = "auto" // open file for writing Series series = - Series( "../samples/dynamicConfig.bp", Access::CREATE, defaults ); + Series("../samples/dynamicConfig.bp", Access::CREATE, defaults); - Datatype datatype = determineDatatype< position_t >(); + Datatype datatype = determineDatatype(); constexpr unsigned long length = 10ul; - Extent global_extent = { length }; - Dataset dataset = Dataset( datatype, global_extent ); - std::shared_ptr< position_t > local_data( - new position_t[ length ], - []( position_t const * ptr ) { delete[] ptr; } ); + Extent global_extent = {length}; + Dataset dataset = Dataset(datatype, global_extent); + std::shared_ptr local_data( + new position_t[length], [](position_t const *ptr) { delete[] ptr; }); WriteIterations iterations = series.writeIterations(); - for( size_t i = 0; i < 100; ++i ) + for (size_t i = 0; i < 100; ++i) { - Iteration iteration = iterations[ i ]; - Record electronPositions = iteration.particles[ "e" ][ "position" ]; + Iteration iteration = iterations[i]; + Record electronPositions = iteration.particles["e"]["position"]; - std::iota( local_data.get(), local_data.get() + length, i * length ); - for( auto const & dim : { "x", "y", "z" } ) + std::iota(local_data.get(), local_data.get() + length, i * length); + for (auto const &dim : {"x", "y", "z"}) { - RecordComponent pos = electronPositions[ dim ]; - pos.resetDataset( dataset ); - pos.storeChunk( local_data, Offset{ 0 }, global_extent ); + RecordComponent pos = electronPositions[dim]; + pos.resetDataset(dataset); + pos.storeChunk(local_data, Offset{0}, global_extent); } /* @@ -118,14 +116,14 @@ chunks = "auto" } } })END"; - Dataset differentlyCompressedDataset{ Datatype::INT, { 10 } }; + Dataset differentlyCompressedDataset{Datatype::INT, {10}}; differentlyCompressedDataset.options = differentCompressionSettings; - auto someMesh = iteration.meshes[ "differentCompressionSettings" ] - [ RecordComponent::SCALAR ]; - someMesh.resetDataset( differentlyCompressedDataset ); - std::vector< int > dataVec( 10, i ); - someMesh.storeChunk( dataVec, { 0 }, { 10 } ); + auto someMesh = iteration.meshes["differentCompressionSettings"] + [RecordComponent::SCALAR]; + someMesh.resetDataset(differentlyCompressedDataset); + std::vector dataVec(10, i); + someMesh.storeChunk(dataVec, {0}, {10}); iteration.close(); } diff --git a/examples/1_structure.cpp b/examples/1_structure.cpp index 4cbeeb36c2..dc5056a6c4 100644 --- a/examples/1_structure.cpp +++ b/examples/1_structure.cpp @@ -20,28 +20,33 @@ */ #include - using namespace openPMD; int main() { - /* The root of any openPMD output spans across all data for all iterations is a 'Series'. + /* The root of any openPMD output spans across all data for all iterations + * is a 'Series'. * Data is either in a single file or spread across multiple files. */ Series series = Series("../samples/1_structure.h5", Access::CREATE); - /* Every element that structures your file (groups and datasets for example) can be annotated with attributes. */ - series.setComment("This string will show up at the root ('/') of the output with key 'comment'."); + /* Every element that structures your file (groups and datasets for example) + * can be annotated with attributes. */ + series.setComment( + "This string will show up at the root ('/') of the output with key " + "'comment'."); - /* Access to individual positions inside happens hierarchically, according to the openPMD standard. - * Creation of new elements happens on access inside the tree-like structure. - * Required attributes are initialized to reasonable defaults for every object. */ + /* Access to individual positions inside happens hierarchically, according + * to the openPMD standard. Creation of new elements happens on access + * inside the tree-like structure. Required attributes are initialized to + * reasonable defaults for every object. */ ParticleSpecies electrons = series.iterations[1].particles["electrons"]; - /* Data to be moved from memory to persistent storage is structured into Records, - * each holding an unbounded number of RecordComponents. - * If a Record only contains a single (scalar) component, it is treated slightly differently. + /* Data to be moved from memory to persistent storage is structured into + * Records, each holding an unbounded number of RecordComponents. If a + * Record only contains a single (scalar) component, it is treated slightly + * differently. * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#scalar-vector-and-tensor-records*/ - Record mass = electrons["mass"]; + Record mass = electrons["mass"]; RecordComponent mass_scalar = mass[RecordComponent::SCALAR]; Dataset dataset = Dataset(Datatype::DOUBLE, Extent{1}); diff --git a/examples/2_read_serial.cpp b/examples/2_read_serial.cpp index 1bea5629a3..5cbea17ed2 100644 --- a/examples/2_read_serial.cpp +++ b/examples/2_read_serial.cpp @@ -20,52 +20,56 @@ */ #include +#include #include #include -#include - using std::cout; using namespace openPMD; int main() { - Series series = Series( - "../samples/git-sample/data%T.h5", - Access::READ_ONLY - ); - cout << "Read a Series with openPMD standard version " - << series.openPMD() << '\n'; + Series series = + Series("../samples/git-sample/data%T.h5", Access::READ_ONLY); + cout << "Read a Series with openPMD standard version " << series.openPMD() + << '\n'; - cout << "The Series contains " << series.iterations.size() << " iterations:"; - for( auto const& i : series.iterations ) + cout << "The Series contains " << series.iterations.size() + << " iterations:"; + for (auto const &i : series.iterations) cout << "\n\t" << i.first; cout << '\n'; Iteration i = series.iterations[100]; cout << "Iteration 100 contains " << i.meshes.size() << " meshes:"; - for( auto const& m : i.meshes ) + for (auto const &m : i.meshes) cout << "\n\t" << m.first; cout << '\n'; - cout << "Iteration 100 contains " << i.particles.size() << " particle species:"; - for( auto const& ps : i.particles ) { + cout << "Iteration 100 contains " << i.particles.size() + << " particle species:"; + for (auto const &ps : i.particles) + { cout << "\n\t" << ps.first; - for( auto const& r : ps.second ) { + for (auto const &r : ps.second) + { cout << "\n\t" << r.first; cout << '\n'; } } openPMD::ParticleSpecies electrons = i.particles["electrons"]; - std::shared_ptr charge = electrons["charge"][openPMD::RecordComponent::SCALAR].loadChunk(); + std::shared_ptr charge = + electrons["charge"][openPMD::RecordComponent::SCALAR] + .loadChunk(); series.flush(); - cout << "And the first electron particle has a charge = " << charge.get()[0]; + cout << "And the first electron particle has a charge = " + << charge.get()[0]; cout << '\n'; MeshRecordComponent E_x = i.meshes["E"]["x"]; Extent extent = E_x.getExtent(); cout << "Field E/x has shape ("; - for( auto const& dim : extent ) + for (auto const &dim : extent) cout << dim << ','; cout << ") and has datatype " << E_x.getDatatype() << '\n'; @@ -77,19 +81,19 @@ int main() series.flush(); cout << "Chunk has been read from disk\n" << "Read chunk contains:\n"; - for( size_t row = 0; row < chunk_extent[0]; ++row ) + for (size_t row = 0; row < chunk_extent[0]; ++row) { - for( size_t col = 0; col < chunk_extent[1]; ++col ) - cout << "\t" - << '(' << row + chunk_offset[0] << '|' << col + chunk_offset[1] << '|' << 1 << ")\t" - << chunk_data.get()[row*chunk_extent[1]+col]; + for (size_t col = 0; col < chunk_extent[1]; ++col) + cout << "\t" << '(' << row + chunk_offset[0] << '|' + << col + chunk_offset[1] << '|' << 1 << ")\t" + << chunk_data.get()[row * chunk_extent[1] + col]; cout << '\n'; } auto all_data = E_x.loadChunk(); series.flush(); cout << "Full E/x starts with:\n\t{"; - for( size_t col = 0; col < extent[1] && col < 5; ++col ) + for (size_t col = 0; col < extent[1] && col < 5; ++col) cout << all_data.get()[col] << ", "; cout << "...}\n"; diff --git a/examples/2a_read_thetaMode_serial.cpp b/examples/2a_read_thetaMode_serial.cpp index 3b52984b53..473a6e7d0f 100644 --- a/examples/2a_read_thetaMode_serial.cpp +++ b/examples/2a_read_thetaMode_serial.cpp @@ -20,31 +20,30 @@ */ #include +#include #include #include -#include - using std::cout; using namespace openPMD; int main() { - Series series = Series( - "../samples/git-sample/thetaMode/data%T.h5", - Access::READ_ONLY - ); + Series series = + Series("../samples/git-sample/thetaMode/data%T.h5", Access::READ_ONLY); Iteration i = series.iterations[500]; MeshRecordComponent E_z_modes = i.meshes["E"]["z"]; - Extent extent = E_z_modes.getExtent(); // (modal components, r, z) + Extent extent = E_z_modes.getExtent(); // (modal components, r, z) // read E_z in all modes auto E_z_raw = E_z_modes.loadChunk(); // read E_z in mode_0 (one scalar field) - auto E_z_m0 = E_z_modes.loadChunk(Offset{0, 0, 0}, Extent{1, extent[1], extent[2]}); + auto E_z_m0 = E_z_modes.loadChunk( + Offset{0, 0, 0}, Extent{1, extent[1], extent[2]}); // read E_z in mode_1 (two fields; skip mode_0 with one scalar field) - auto E_z_m1 = E_z_modes.loadChunk(Offset{1, 0, 0}, Extent{2, extent[1], extent[2]}); + auto E_z_m1 = E_z_modes.loadChunk( + Offset{1, 0, 0}, Extent{2, extent[1], extent[2]}); series.flush(); // all this is still mode-decomposed data, not too useful for users @@ -54,8 +53,8 @@ int main() // user change frequency: time ~= component >> theta >> selected modes // thetaMode::ToCylindrical toCylindrical("all"); // thetaMode::ToCylindricalSlice toCylindricalSlice(1.5708, "all") - // reconstruction to 2D slice in cylindrical coordinates (r, z) for a fixed theta - // E_z_90deg = toCylindricalSlice(E_z_modes).loadChunk(); + // reconstruction to 2D slice in cylindrical coordinates (r, z) for a fixed + // theta E_z_90deg = toCylindricalSlice(E_z_modes).loadChunk(); // E_r_90deg = toCylindricalSlice(i.meshes["E"]["r"]).loadChunk(); // E_t_90deg = toCylindricalSlice(i.meshes["E"]["t"]).loadChunk(); // reconstruction to 3D cylindrical coordinates (r, t, z) @@ -64,9 +63,10 @@ int main() // reconstruction to 3D and 2D cartesian: E_x, E_y, E_z // thetaMode::ToCylindrical toCartesian({'x': 1.e-6, 'y': 1.e-6}, "all"); - // ... toCartesianSliceYZ({'x': 1.e-6, 'y': 1.e-6}, 'x', 0., "all"); // and absolute slice position - // E_z_xyz = toCartesian(E_z_modes).loadChunk(); # (x, y, z) - // E_z_yz = toCartesianSliceYZ(E_z_modes).loadChunk(); # (y, z) + // ... toCartesianSliceYZ({'x': 1.e-6, 'y': 1.e-6}, 'x', 0., + // "all"); // and absolute slice position E_z_xyz = + // toCartesian(E_z_modes).loadChunk(); # (x, y, z) E_z_yz = + // toCartesianSliceYZ(E_z_modes).loadChunk(); # (y, z) // series.flush(); /* The files in 'series' are still open until the object is destroyed, on diff --git a/examples/3_write_serial.cpp b/examples/3_write_serial.cpp index cdd32a9e41..71628bc671 100644 --- a/examples/3_write_serial.cpp +++ b/examples/3_write_serial.cpp @@ -20,11 +20,10 @@ */ #include +#include #include #include #include -#include - using std::cout; using namespace openPMD; @@ -35,33 +34,30 @@ int main(int argc, char *argv[]) size_t size = (argc == 2 ? atoi(argv[1]) : 3); // matrix dataset to write with values 0...size*size-1 - std::vector global_data(size*size); + std::vector global_data(size * size); std::iota(global_data.begin(), global_data.end(), 0.); cout << "Set up a 2D square array (" << size << 'x' << size << ") that will be written\n"; // open file for writing - Series series = Series( - "../samples/3_write_serial.h5", - Access::CREATE - ); + Series series = Series("../samples/3_write_serial.h5", Access::CREATE); cout << "Created an empty " << series.iterationEncoding() << " Series\n"; MeshRecordComponent rho = - series - .iterations[1] - .meshes["rho"][MeshRecordComponent::SCALAR]; - cout << "Created a scalar mesh Record with all required openPMD attributes\n"; + series.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR]; + cout << "Created a scalar mesh Record with all required openPMD " + "attributes\n"; Datatype datatype = determineDatatype(shareRaw(global_data)); Extent extent = {size, size}; Dataset dataset = Dataset(datatype, extent); - cout << "Created a Dataset of size " << dataset.extent[0] << 'x' << dataset.extent[1] - << " and Datatype " << dataset.dtype << '\n'; + cout << "Created a Dataset of size " << dataset.extent[0] << 'x' + << dataset.extent[1] << " and Datatype " << dataset.dtype << '\n'; rho.resetDataset(dataset); - cout << "Set the dataset properties for the scalar field rho in iteration 1\n"; + cout << "Set the dataset properties for the scalar field rho in iteration " + "1\n"; series.flush(); cout << "File structure and required attributes have been written\n"; diff --git a/examples/3a_write_thetaMode_serial.cpp b/examples/3a_write_thetaMode_serial.cpp index 87c9315dfb..df7134a9f7 100644 --- a/examples/3a_write_thetaMode_serial.cpp +++ b/examples/3a_write_thetaMode_serial.cpp @@ -26,26 +26,24 @@ #include #include - using namespace openPMD; int main() { // open file for writing - Series series = Series( - "../samples/3_write_thetaMode_serial.h5", - Access::CREATE - ); + Series series = + Series("../samples/3_write_thetaMode_serial.h5", Access::CREATE); // configure and setup geometry unsigned int const num_modes = 5u; - unsigned int const num_fields = 1u + (num_modes-1u) * 2u; // the first mode is purely real + unsigned int const num_fields = + 1u + (num_modes - 1u) * 2u; // the first mode is purely real unsigned int const N_r = 60; unsigned int const N_z = 200; // write values 0...size-1 - std::vector< double > E_r_data(num_fields*N_r*N_z); - std::vector< float > E_t_data(num_fields*N_r*N_z); + std::vector E_r_data(num_fields * N_r * N_z); + std::vector E_t_data(num_fields * N_r * N_z); std::iota(E_r_data.begin(), E_r_data.end(), 0.0); std::iota(E_t_data.begin(), E_t_data.end(), 0.f); @@ -54,42 +52,36 @@ int main() std::string const geometryParameters = geos.str(); Mesh E = series.iterations[0].meshes["E"]; - E.setGeometry( Mesh::Geometry::thetaMode ); - E.setGeometryParameters( geometryParameters ); - E.setDataOrder( Mesh::DataOrder::C ); - E.setGridSpacing( std::vector{1.0, 1.0} ); - E.setGridGlobalOffset( std::vector{0.0, 0.0} ); - E.setGridUnitSI( 1.0 ); - E.setAxisLabels( std::vector< std::string >{"r", "z"} ); - std::map< UnitDimension, double > const unitDimensions{ - {UnitDimension::I, 1.0}, - {UnitDimension::J, 2.0} - }; - E.setUnitDimension( unitDimensions ); + E.setGeometry(Mesh::Geometry::thetaMode); + E.setGeometryParameters(geometryParameters); + E.setDataOrder(Mesh::DataOrder::C); + E.setGridSpacing(std::vector{1.0, 1.0}); + E.setGridGlobalOffset(std::vector{0.0, 0.0}); + E.setGridUnitSI(1.0); + E.setAxisLabels(std::vector{"r", "z"}); + std::map const unitDimensions{ + {UnitDimension::I, 1.0}, {UnitDimension::J, 2.0}}; + E.setUnitDimension(unitDimensions); // write components: E_z, E_r, E_t auto E_z = E["z"]; - E_z.setUnitSI( 10. ); - E_z.setPosition(std::vector< double >{0.0, 0.5}); + E_z.setUnitSI(10.); + E_z.setPosition(std::vector{0.0, 0.5}); // (modes, r, z) see setGeometryParameters - E_z.resetDataset( Dataset(Datatype::FLOAT, {num_fields, N_r, N_z}) ); - E_z.makeConstant( static_cast< float >(42.54) ); + E_z.resetDataset(Dataset(Datatype::FLOAT, {num_fields, N_r, N_z})); + E_z.makeConstant(static_cast(42.54)); // write all modes at once (otherwise iterate over modes and first index auto E_r = E["r"]; - E_r.setUnitSI( 10. ); - E_r.setPosition(std::vector< double >{0.5, 0.0}); - E_r.resetDataset( - Dataset(Datatype::DOUBLE, {num_fields, N_r, N_z}) - ); + E_r.setUnitSI(10.); + E_r.setPosition(std::vector{0.5, 0.0}); + E_r.resetDataset(Dataset(Datatype::DOUBLE, {num_fields, N_r, N_z})); E_r.storeChunk(E_r_data, Offset{0, 0, 0}, Extent{num_fields, N_r, N_z}); auto E_t = E["t"]; - E_t.setUnitSI( 10. ); - E_t.setPosition(std::vector< double >{0.0, 0.0}); - E_t.resetDataset( - Dataset(Datatype::FLOAT, {num_fields, N_r, N_z}) - ); + E_t.setUnitSI(10.); + E_t.setPosition(std::vector{0.0, 0.0}); + E_t.resetDataset(Dataset(Datatype::FLOAT, {num_fields, N_r, N_z})); E_t.storeChunk(E_t_data, Offset{0, 0, 0}, Extent{num_fields, N_r, N_z}); series.flush(); diff --git a/examples/3b_write_resizable_particles.cpp b/examples/3b_write_resizable_particles.cpp index c228bae654..d15dba92c6 100644 --- a/examples/3b_write_resizable_particles.cpp +++ b/examples/3b_write_resizable_particles.cpp @@ -20,67 +20,63 @@ */ #include +#include #include #include -#include - using namespace openPMD; int main() { // open file for writing - Series series = Series( - "../samples/3b_write_resizable_particles.h5", - Access::CREATE - ); + Series series = + Series("../samples/3b_write_resizable_particles.h5", Access::CREATE); - ParticleSpecies electrons = - series.iterations[0].particles["electrons"]; + ParticleSpecies electrons = series.iterations[0].particles["electrons"]; // our initial data to write - std::vector< double > x{ 0., 1., 2., 3., 4. }; - std::vector< double > y{ -2., -3., -4., -5., -6. }; + std::vector x{0., 1., 2., 3., 4.}; + std::vector y{-2., -3., -4., -5., -6.}; // both x and y the same type, otherwise we use two distinct datasets - Datatype dtype = determineDatatype( shareRaw( x ) ); - Extent size = { x.size() }; - auto dataset = Dataset( dtype, size, "{ \"resizable\": true }" ); + Datatype dtype = determineDatatype(shareRaw(x)); + Extent size = {x.size()}; + auto dataset = Dataset(dtype, size, "{ \"resizable\": true }"); RecordComponent rc_x = electrons["position"]["x"]; RecordComponent rc_y = electrons["position"]["y"]; - rc_x.resetDataset( dataset ); - rc_y.resetDataset( dataset ); + rc_x.resetDataset(dataset); + rc_y.resetDataset(dataset); - Offset offset = { 0 }; - rc_x.storeChunk( x, offset, { x.size() } ); - rc_y.storeChunk( y, offset, { y.size() } ); + Offset offset = {0}; + rc_x.storeChunk(x, offset, {x.size()}); + rc_y.storeChunk(y, offset, {y.size()}); // openPMD allows additional position offsets: set to zero here RecordComponent rc_xo = electrons["positionOffset"]["x"]; RecordComponent rc_yo = electrons["positionOffset"]["y"]; - rc_xo.resetDataset( dataset ); - rc_yo.resetDataset( dataset ); - rc_xo.makeConstant( 0.0 ); - rc_yo.makeConstant( 0.0 ); + rc_xo.resetDataset(dataset); + rc_yo.resetDataset(dataset); + rc_xo.makeConstant(0.0); + rc_yo.makeConstant(0.0); // after this call, the provided data buffers can be used again or deleted series.flush(); // extend and append more particles - x = { 5., 6., 7. }; - y = {-7., -8., -9. }; + x = {5., 6., 7.}; + y = {-7., -8., -9.}; offset.at(0) += dataset.extent.at(0); - dataset = Dataset( { dataset.extent.at(0) + x.size() } ); + dataset = Dataset({dataset.extent.at(0) + x.size()}); - rc_x.resetDataset( dataset ); - rc_y.resetDataset( dataset ); + rc_x.resetDataset(dataset); + rc_y.resetDataset(dataset); - rc_x.storeChunk( x, offset, { x.size() } ); - rc_y.storeChunk( y, offset, { x.size() } ); + rc_x.storeChunk(x, offset, {x.size()}); + rc_y.storeChunk(y, offset, {x.size()}); - rc_xo.resetDataset( dataset ); - rc_yo.resetDataset( dataset ); + rc_xo.resetDataset(dataset); + rc_yo.resetDataset(dataset); // after this call, the provided data buffers can be used again or deleted series.flush(); diff --git a/examples/4_read_parallel.cpp b/examples/4_read_parallel.cpp index a88af11b0e..530b00fe5d 100644 --- a/examples/4_read_parallel.cpp +++ b/examples/4_read_parallel.cpp @@ -22,10 +22,9 @@ #include +#include #include #include -#include - using std::cout; using namespace openPMD; @@ -40,7 +39,6 @@ int main(int argc, char *argv[]) MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - /* note: this scope is intentional to destruct the openPMD::Series object * prior to MPI_Finalize(); */ @@ -48,41 +46,39 @@ int main(int argc, char *argv[]) Series series = Series( "../samples/git-sample/data%T.h5", Access::READ_ONLY, - MPI_COMM_WORLD - ); - if( 0 == mpi_rank ) - cout << "Read a series in parallel with " << mpi_size << " MPI ranks\n"; + MPI_COMM_WORLD); + if (0 == mpi_rank) + cout << "Read a series in parallel with " << mpi_size + << " MPI ranks\n"; MeshRecordComponent E_x = series.iterations[100].meshes["E"]["x"]; Offset chunk_offset = { - static_cast< long unsigned int >(mpi_rank) + 1, - 1, - 1 - }; + static_cast(mpi_rank) + 1, 1, 1}; Extent chunk_extent = {2, 2, 1}; auto chunk_data = E_x.loadChunk(chunk_offset, chunk_extent); - if( 0 == mpi_rank ) - cout << "Queued the loading of a single chunk per MPI rank from disk, " + if (0 == mpi_rank) + cout << "Queued the loading of a single chunk per MPI rank from " + "disk, " "ready to execute\n"; series.flush(); - if( 0 == mpi_rank ) + if (0 == mpi_rank) cout << "Chunks have been read from disk\n"; - for( int i = 0; i < mpi_size; ++i ) + for (int i = 0; i < mpi_size; ++i) { - if( i == mpi_rank ) + if (i == mpi_rank) { cout << "Rank " << mpi_rank << " - Read chunk contains:\n"; - for( size_t row = 0; row < chunk_extent[0]; ++row ) + for (size_t row = 0; row < chunk_extent[0]; ++row) { - for( size_t col = 0; col < chunk_extent[1]; ++col ) - cout << "\t" - << '(' << row + chunk_offset[0] << '|' << col + chunk_offset[1] << '|' << 1 << ")\t" - << chunk_data.get()[row*chunk_extent[1]+col]; + for (size_t col = 0; col < chunk_extent[1]; ++col) + cout << "\t" << '(' << row + chunk_offset[0] << '|' + << col + chunk_offset[1] << '|' << 1 << ")\t" + << chunk_data.get()[row * chunk_extent[1] + col]; cout << std::endl; } } diff --git a/examples/5_write_parallel.cpp b/examples/5_write_parallel.cpp index cdbbbe3ec0..b8875504a5 100644 --- a/examples/5_write_parallel.cpp +++ b/examples/5_write_parallel.cpp @@ -24,8 +24,7 @@ #include #include -#include // std::vector - +#include // std::vector using std::cout; using namespace openPMD; @@ -47,51 +46,47 @@ int main(int argc, char *argv[]) // global data set to write: [MPI_Size * 10, 300] // each rank writes a 10x300 slice with its MPI rank as values auto const value = float(mpi_size); - std::vector local_data( - 10 * 300, value); - if( 0 == mpi_rank ) - cout << "Set up a 2D array with 10x300 elements per MPI rank (" << mpi_size - << "x) that will be written to disk\n"; + std::vector local_data(10 * 300, value); + if (0 == mpi_rank) + cout << "Set up a 2D array with 10x300 elements per MPI rank (" + << mpi_size << "x) that will be written to disk\n"; // open file for writing Series series = Series( - "../samples/5_parallel_write.h5", - Access::CREATE, - MPI_COMM_WORLD - ); - if( 0 == mpi_rank ) - cout << "Created an empty series in parallel with " - << mpi_size << " MPI ranks\n"; + "../samples/5_parallel_write.h5", Access::CREATE, MPI_COMM_WORLD); + if (0 == mpi_rank) + cout << "Created an empty series in parallel with " << mpi_size + << " MPI ranks\n"; MeshRecordComponent mymesh = - series - .iterations[1] - .meshes["mymesh"][MeshRecordComponent::SCALAR]; + series.iterations[1].meshes["mymesh"][MeshRecordComponent::SCALAR]; // example 1D domain decomposition in first index Datatype datatype = determineDatatype(); Extent global_extent = {10ul * mpi_size, 300}; Dataset dataset = Dataset(datatype, global_extent); - if( 0 == mpi_rank ) - cout << "Prepared a Dataset of size " << dataset.extent[0] - << "x" << dataset.extent[1] - << " and Datatype " << dataset.dtype << '\n'; + if (0 == mpi_rank) + cout << "Prepared a Dataset of size " << dataset.extent[0] << "x" + << dataset.extent[1] << " and Datatype " << dataset.dtype + << '\n'; mymesh.resetDataset(dataset); - if( 0 == mpi_rank ) - cout << "Set the global Dataset properties for the scalar field mymesh in iteration 1\n"; + if (0 == mpi_rank) + cout << "Set the global Dataset properties for the scalar field " + "mymesh in iteration 1\n"; // example shows a 1D domain decomposition in first index Offset chunk_offset = {10ul * mpi_rank, 0}; Extent chunk_extent = {10, 300}; mymesh.storeChunk(local_data, chunk_offset, chunk_extent); - if( 0 == mpi_rank ) - cout << "Registered a single chunk per MPI rank containing its contribution, " + if (0 == mpi_rank) + cout << "Registered a single chunk per MPI rank containing its " + "contribution, " "ready to write content to disk\n"; series.flush(); - if( 0 == mpi_rank ) + if (0 == mpi_rank) cout << "Dataset content has been fully written to disk\n"; } diff --git a/examples/6_dump_filebased_series.cpp b/examples/6_dump_filebased_series.cpp index 96fbce5b6f..99b2b0939d 100644 --- a/examples/6_dump_filebased_series.cpp +++ b/examples/6_dump_filebased_series.cpp @@ -3,7 +3,6 @@ #include #include - using namespace openPMD; int main() @@ -11,10 +10,10 @@ int main() Series o = Series("../samples/git-sample/data%T.h5", Access::READ_ONLY); std::cout << "Read iterations "; - for( auto const& val : o.iterations ) + for (auto const &val : o.iterations) std::cout << '\t' << val.first; std::cout << "Read attributes in the root:\n"; - for( auto const& val : o.attributes() ) + for (auto const &val : o.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; @@ -28,122 +27,141 @@ int main() << '\n'; std::cout << "Read attributes in basePath:\n"; - for( auto const& a : o.iterations.attributes() ) + for (auto const &a : o.iterations.attributes()) std::cout << '\t' << a << '\n'; std::cout << '\n'; std::cout << "Read iterations in basePath:\n"; - for( auto const& i : o.iterations ) + for (auto const &i : o.iterations) std::cout << '\t' << i.first << '\n'; std::cout << '\n'; - for( auto const& i : o.iterations ) + for (auto const &i : o.iterations) { std::cout << "Read attributes in iteration " << i.first << ":\n"; - for( auto const& val : i.second.attributes() ) + for (auto const &val : i.second.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; - std::cout << i.first << ".time - " << i.second.time< float >() << '\n' - << i.first << ".dt - " << i.second.dt< float >() << '\n' - << i.first << ".timeUnitSI - " << i.second.timeUnitSI() << '\n' + std::cout << i.first << ".time - " << i.second.time() << '\n' + << i.first << ".dt - " << i.second.dt() << '\n' + << i.first << ".timeUnitSI - " << i.second.timeUnitSI() + << '\n' << '\n'; - std::cout << "Read attributes in meshesPath in iteration " << i.first << ":\n"; - for( auto const& a : i.second.meshes.attributes() ) + std::cout << "Read attributes in meshesPath in iteration " << i.first + << ":\n"; + for (auto const &a : i.second.meshes.attributes()) std::cout << '\t' << a << '\n'; std::cout << '\n'; std::cout << "Read meshes in iteration " << i.first << ":\n"; - for( auto const& m : i.second.meshes ) + for (auto const &m : i.second.meshes) std::cout << '\t' << m.first << '\n'; std::cout << '\n'; - for( auto const& m : i.second.meshes ) + for (auto const &m : i.second.meshes) { - std::cout << "Read attributes for mesh " << m.first << " in iteration " << i.first << ":\n"; - for( auto const& val : m.second.attributes() ) + std::cout << "Read attributes for mesh " << m.first + << " in iteration " << i.first << ":\n"; + for (auto const &val : m.second.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; std::string meshPrefix = std::to_string(i.first) + '.' + m.first; std::string axisLabels = ""; - for( auto const& val : m.second.axisLabels() ) + for (auto const &val : m.second.axisLabels()) axisLabels += val + ", "; std::string gridSpacing = ""; - for( auto const& val : m.second.gridSpacing< float >() ) + for (auto const &val : m.second.gridSpacing()) gridSpacing += std::to_string(val) + ", "; std::string gridGlobalOffset = ""; - for( auto const& val : m.second.gridGlobalOffset() ) + for (auto const &val : m.second.gridGlobalOffset()) gridGlobalOffset += std::to_string(val) + ", "; std::string unitDimension = ""; - for( auto const& val : m.second.unitDimension() ) + for (auto const &val : m.second.unitDimension()) unitDimension += std::to_string(val) + ", "; - std::cout << meshPrefix << ".geometry - " << m.second.geometry() << '\n' - << meshPrefix << ".dataOrder - " << m.second.dataOrder() << '\n' + std::cout << meshPrefix << ".geometry - " << m.second.geometry() + << '\n' + << meshPrefix << ".dataOrder - " << m.second.dataOrder() + << '\n' << meshPrefix << ".axisLabels - " << axisLabels << '\n' << meshPrefix << ".gridSpacing - " << gridSpacing << '\n' - << meshPrefix << ".gridGlobalOffset - " << gridGlobalOffset << '\n' - << meshPrefix << ".gridUnitSI - " << m.second.gridUnitSI() << '\n' - << meshPrefix << ".unitDimension - " << unitDimension << '\n' - << meshPrefix << ".timeOffset - " << m.second.timeOffset< float >() << '\n' + << meshPrefix << ".gridGlobalOffset - " + << gridGlobalOffset << '\n' + << meshPrefix << ".gridUnitSI - " << m.second.gridUnitSI() + << '\n' + << meshPrefix << ".unitDimension - " << unitDimension + << '\n' + << meshPrefix << ".timeOffset - " + << m.second.timeOffset() << '\n' << '\n'; std::cout << "Read recordComponents for mesh " << m.first << ":\n"; - for( auto const& rc : m.second ) + for (auto const &rc : m.second) std::cout << '\t' << rc.first << '\n'; std::cout << '\n'; - for( auto const& rc : m.second ) + for (auto const &rc : m.second) { - std::cout << "Read attributes for recordComponent " << rc.first << " for mesh " << m.first << '\n'; - for( auto const& val : rc.second.attributes() ) + std::cout << "Read attributes for recordComponent " << rc.first + << " for mesh " << m.first << '\n'; + for (auto const &val : rc.second.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; - std::string componentPrefix = std::to_string(i.first) + '.' + m.first + '.' + rc.first; + std::string componentPrefix = + std::to_string(i.first) + '.' + m.first + '.' + rc.first; std::string position = ""; - for( auto const& val : rc.second.position< double >() ) + for (auto const &val : rc.second.position()) position += std::to_string(val) + ", "; - std::cout << componentPrefix << ".unitSI - " << rc.second.unitSI() << '\n' - << componentPrefix << ".position - " << position << '\n' + std::cout << componentPrefix << ".unitSI - " + << rc.second.unitSI() << '\n' + << componentPrefix << ".position - " << position + << '\n' << '\n'; } } - std::cout << "Read attributes in particlesPath in iteration " << i.first << ":\n"; - for( auto const& a : i.second.particles.attributes() ) + std::cout << "Read attributes in particlesPath in iteration " << i.first + << ":\n"; + for (auto const &a : i.second.particles.attributes()) std::cout << '\t' << a << '\n'; std::cout << '\n'; std::cout << "Read particleSpecies in iteration " << i.first << ":\n"; - for( auto const& val : i.second.particles ) + for (auto const &val : i.second.particles) std::cout << '\t' << val.first << '\n'; std::cout << '\n'; - for( auto const& p : i.second.particles ) + for (auto const &p : i.second.particles) { - std::cout << "Read attributes for particle species " << p.first << " in iteration " << i.first << ":\n"; - for( auto const& val : p.second.attributes() ) + std::cout << "Read attributes for particle species " << p.first + << " in iteration " << i.first << ":\n"; + for (auto const &val : p.second.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; - std::cout << "Read particle records for particle species " << p.first << " in iteration " << i.first << ":\n"; - for( auto const& r : p.second ) + std::cout << "Read particle records for particle species " + << p.first << " in iteration " << i.first << ":\n"; + for (auto const &r : p.second) std::cout << '\t' << r.first << '\n'; std::cout << '\n'; - for( auto const& r : p.second ) + for (auto const &r : p.second) { - std::cout << "Read recordComponents for particle record " << r.first << ":\n"; - for( auto const& rc : r.second ) + std::cout << "Read recordComponents for particle record " + << r.first << ":\n"; + for (auto const &rc : r.second) std::cout << '\t' << rc.first << '\n'; std::cout << '\n'; - for( auto const& rc : r.second ) + for (auto const &rc : r.second) { - std::cout << "Read attributes for recordComponent " << rc.first << " for particle record " << r.first << '\n'; - for( auto const& val : rc.second.attributes() ) + std::cout << "Read attributes for recordComponent " + << rc.first << " for particle record " << r.first + << '\n'; + for (auto const &val : rc.second.attributes()) std::cout << '\t' << val << '\n'; std::cout << '\n'; } diff --git a/examples/7_extended_write_serial.cpp b/examples/7_extended_write_serial.cpp index 02831dc11d..62d8752a6e 100644 --- a/examples/7_extended_write_serial.cpp +++ b/examples/7_extended_write_serial.cpp @@ -3,45 +3,48 @@ #include #include - -int -main() +int main() { namespace io = openPMD; { - auto f = io::Series("working/directory/2D_simData.h5", io::Access::CREATE); + auto f = + io::Series("working/directory/2D_simData.h5", io::Access::CREATE); - // all required openPMD attributes will be set to reasonable default values (all ones, all zeros, empty strings,...) - // manually setting them enforces the openPMD standard + // all required openPMD attributes will be set to reasonable default + // values (all ones, all zeros, empty strings,...) manually setting them + // enforces the openPMD standard f.setMeshesPath("custom_meshes_path"); f.setParticlesPath("long_and_very_custom_particles_path"); // it is possible to add and remove attributes f.setComment("This is fine and actually encouraged by the standard"); f.setAttribute( - "custom_attribute_name", - std::string("This attribute is manually added and can contain about any datatype you would want") - ); - // note that removing attributes required by the standard typically makes the file unusable for post-processing + "custom_attribute_name", + std::string("This attribute is manually added and can contain " + "about any datatype you would want")); + // note that removing attributes required by the standard typically + // makes the file unusable for post-processing f.deleteAttribute("custom_attribute_name"); - // everything that is accessed with [] should be interpreted as permanent storage - // the objects sunk into these locations are deep copies + // everything that is accessed with [] should be interpreted as + // permanent storage the objects sunk into these locations are deep + // copies { - // setting attributes can be chained in JS-like syntax for compact code - f.iterations[1] - .setTime(42.0) - .setDt(1.0) - .setTimeUnitSI(1.39e-16); - f.iterations[2].setComment("This iteration will not appear in any output"); + // setting attributes can be chained in JS-like syntax for compact + // code + f.iterations[1].setTime(42.0).setDt(1.0).setTimeUnitSI(1.39e-16); + f.iterations[2].setComment( + "This iteration will not appear in any output"); f.iterations.erase(2); } { // everything is a reference io::Iteration reference = f.iterations[1]; - reference.setComment("Modifications to a copied iteration refer to the same iteration"); + reference.setComment( + "Modifications to a copied iteration refer to the same " + "iteration"); } f.iterations[1].deleteAttribute("comment"); @@ -50,16 +53,19 @@ main() // the underlying concept for numeric data is the openPMD Record // https://github.com/openPMD/openPMD-standard/blob/1.0.1/STANDARD.md#scalar-vector-and-tensor-records // Meshes are specialized records - cur_it.meshes["generic_2D_field"].setUnitDimension({{io::UnitDimension::L, -3}, - {io::UnitDimension::M, 1}}); + cur_it.meshes["generic_2D_field"].setUnitDimension( + {{io::UnitDimension::L, -3}, {io::UnitDimension::M, 1}}); { - // copies of objects are handles/references to the same underlying object + // copies of objects are handles/references to the same underlying + // object io::Mesh lowRez = cur_it.meshes["generic_2D_field"]; - lowRez.setGridSpacing(std::vector{6, 1}).setGridGlobalOffset({0, 600}); + lowRez.setGridSpacing(std::vector{6, 1}) + .setGridGlobalOffset({0, 600}); io::Mesh highRez = cur_it.meshes["generic_2D_field"]; - highRez.setGridSpacing(std::vector{6, 0.5}).setGridGlobalOffset({0, 1200}); + highRez.setGridSpacing(std::vector{6, 0.5}) + .setGridGlobalOffset({0, 1200}); cur_it.meshes.erase("generic_2D_field"); cur_it.meshes["lowRez_2D_field"] = lowRez; @@ -70,26 +76,32 @@ main() { // particles are handled very similar io::ParticleSpecies electrons = cur_it.particles["electrons"]; - electrons.setAttribute("NoteWorthyParticleSpeciesProperty", - std::string("Observing this species was a blast.")); - electrons["displacement"].setUnitDimension({{io::UnitDimension::M, 1}}); + electrons.setAttribute( + "NoteWorthyParticleSpeciesProperty", + std::string("Observing this species was a blast.")); + electrons["displacement"].setUnitDimension( + {{io::UnitDimension::M, 1}}); electrons["displacement"]["x"].setUnitSI(1e-6); electrons.erase("displacement"); - electrons["weighting"][io::RecordComponent::SCALAR].makeConstant(1.e-5); + electrons["weighting"][io::RecordComponent::SCALAR].makeConstant( + 1.e-5); } io::Mesh mesh = cur_it.meshes["lowRez_2D_field"]; mesh.setAxisLabels({"x", "y"}); - // data is assumed to reside behind a pointer as a contiguous column-major array - // shared data ownership during IO is indicated with a smart pointer - std::shared_ptr partial_mesh(new double[5], [](double const *p) { - delete[] p; - p = nullptr; - }); - - // before storing record data, you must specify the dataset once per component - // this describes the datatype and shape of data as it should be written to disk + // data is assumed to reside behind a pointer as a contiguous + // column-major array shared data ownership during IO is indicated with + // a smart pointer + std::shared_ptr partial_mesh( + new double[5], [](double const *p) { + delete[] p; + p = nullptr; + }); + + // before storing record data, you must specify the dataset once per + // component this describes the datatype and shape of data as it should + // be written to disk io::Datatype dtype = io::determineDatatype(partial_mesh); auto d = io::Dataset(dtype, io::Extent{2, 5}); std::string datasetConfig = R"END( @@ -118,56 +130,68 @@ main() io::ParticleSpecies electrons = cur_it.particles["electrons"]; io::Extent mpiDims{4}; - std::shared_ptr partial_particlePos(new float[2], [](float const *p) { - delete[] p; - p = nullptr; - }); + std::shared_ptr partial_particlePos( + new float[2], [](float const *p) { + delete[] p; + p = nullptr; + }); dtype = io::determineDatatype(partial_particlePos); d = io::Dataset(dtype, mpiDims); electrons["position"]["x"].resetDataset(d); - std::shared_ptr partial_particleOff(new uint64_t[2], [](uint64_t const *p) { - delete[] p; - p = nullptr; - }); + std::shared_ptr partial_particleOff( + new uint64_t[2], [](uint64_t const *p) { + delete[] p; + p = nullptr; + }); dtype = io::determineDatatype(partial_particleOff); d = io::Dataset(dtype, mpiDims); electrons["positionOffset"]["x"].resetDataset(d); auto dset = io::Dataset(io::determineDatatype(), {2}); - electrons.particlePatches["numParticles"][io::RecordComponent::SCALAR].resetDataset(dset); - electrons.particlePatches["numParticlesOffset"][io::RecordComponent::SCALAR].resetDataset(dset); + electrons.particlePatches["numParticles"][io::RecordComponent::SCALAR] + .resetDataset(dset); + electrons + .particlePatches["numParticlesOffset"][io::RecordComponent::SCALAR] + .resetDataset(dset); dset = io::Dataset(io::Datatype::FLOAT, {2}); - electrons.particlePatches["offset"].setUnitDimension({{io::UnitDimension::L, 1}}); + electrons.particlePatches["offset"].setUnitDimension( + {{io::UnitDimension::L, 1}}); electrons.particlePatches["offset"]["x"].resetDataset(dset); - electrons.particlePatches["extent"].setUnitDimension({{io::UnitDimension::L, 1}}); + electrons.particlePatches["extent"].setUnitDimension( + {{io::UnitDimension::L, 1}}); electrons.particlePatches["extent"]["x"].resetDataset(dset); - // at any point in time you may decide to dump already created output to disk - // note that this will make some operations impossible (e.g. renaming files) + // at any point in time you may decide to dump already created output to + // disk note that this will make some operations impossible (e.g. + // renaming files) f.flush(); // chunked writing of the final dataset at a time is supported // this loop writes one row at a time - double mesh_x[2][5] = {{1, 3, 5, 7, 9}, - {11, 13, 15, 17, 19}}; + double mesh_x[2][5] = {{1, 3, 5, 7, 9}, {11, 13, 15, 17, 19}}; float particle_position[4] = {0.1f, 0.2f, 0.3f, 0.4f}; uint64_t particle_positionOffset[4] = {0u, 1u, 2u, 3u}; - for (uint64_t i = 0u; i < 2u; ++i) { + for (uint64_t i = 0u; i < 2u; ++i) + { for (int col = 0; col < 5; ++col) partial_mesh.get()[col] = mesh_x[i][col]; io::Offset o = io::Offset{i, 0}; io::Extent e = io::Extent{1, 5}; mesh["x"].storeChunk(partial_mesh, o, e); - // operations between store and flush MUST NOT modify the pointed-to data + // operations between store and flush MUST NOT modify the pointed-to + // data f.flush(); - // after the flush completes successfully, access to the shared resource is returned to the caller + // after the flush completes successfully, access to the shared + // resource is returned to the caller - for (int idx = 0; idx < 2; ++idx) { + for (int idx = 0; idx < 2; ++idx) + { partial_particlePos.get()[idx] = particle_position[idx + 2 * i]; - partial_particleOff.get()[idx] = particle_positionOffset[idx + 2 * i]; + partial_particleOff.get()[idx] = + particle_positionOffset[idx + 2 * i]; } uint64_t numParticlesOffset = 2 * i; @@ -176,14 +200,23 @@ main() o = io::Offset{numParticlesOffset}; e = io::Extent{numParticles}; electrons["position"]["x"].storeChunk(partial_particlePos, o, e); - electrons["positionOffset"]["x"].storeChunk(partial_particleOff, o, e); - - electrons.particlePatches["numParticles"][io::RecordComponent::SCALAR].store(i, numParticles); - electrons.particlePatches["numParticlesOffset"][io::RecordComponent::SCALAR].store(i, numParticlesOffset); - - electrons.particlePatches["offset"]["x"].store(i, particle_position[numParticlesOffset]); - electrons.particlePatches["extent"]["x"].store(i, particle_position[numParticlesOffset + numParticles - 1] - - particle_position[numParticlesOffset]); + electrons["positionOffset"]["x"].storeChunk( + partial_particleOff, o, e); + + electrons + .particlePatches["numParticles"][io::RecordComponent::SCALAR] + .store(i, numParticles); + electrons + .particlePatches["numParticlesOffset"] + [io::RecordComponent::SCALAR] + .store(i, numParticlesOffset); + + electrons.particlePatches["offset"]["x"].store( + i, particle_position[numParticlesOffset]); + electrons.particlePatches["extent"]["x"].store( + i, + particle_position[numParticlesOffset + numParticles - 1] - + particle_position[numParticlesOffset]); } mesh["y"].resetDataset(d); @@ -195,9 +228,10 @@ main() /* The files in 'f' are still open until the object is destroyed, on * which it cleanly flushes and closes all open file handles. - * When running out of scope on return, the 'Series' destructor is called. + * When running out of scope on return, the 'Series' destructor is + * called. */ - } + } // namespace ; return 0; } diff --git a/examples/8_benchmark_parallel.cpp b/examples/8_benchmark_parallel.cpp index 8fef3e86b4..7cad63f7d7 100644 --- a/examples/8_benchmark_parallel.cpp +++ b/examples/8_benchmark_parallel.cpp @@ -1,25 +1,24 @@ -#include #include -#include #include +#include +#include #if openPMD_HAVE_MPI -# include +#include #endif #include #include #include - #if openPMD_HAVE_MPI -inline void -print_help( std::string const program_name ) +inline void print_help(std::string const program_name) { std::cout << "Usage: " << program_name << "\n"; std::cout << "Run a simple parallel write and read benchmark.\n\n"; std::cout << "Options:\n"; - std::cout << " -w, --weak run a weak scaling (default: strong scaling)\n"; + std::cout + << " -w, --weak run a weak scaling (default: strong scaling)\n"; std::cout << " -h, --help display this help and exit\n"; std::cout << " -v, --version output version information and exit\n"; std::cout << "\n"; @@ -28,49 +27,49 @@ print_help( std::string const program_name ) std::cout << " " << program_name << " # for a strong scaling\n"; } -inline void -print_version( std::string const program_name ) +inline void print_version(std::string const program_name) { - std::cout << program_name << " (openPMD-api) " - << openPMD::getVersion() << "\n"; + std::cout << program_name << " (openPMD-api) " << openPMD::getVersion() + << "\n"; std::cout << "Copyright 2017-2021 openPMD contributors\n"; std::cout << "Authors: Franz Poeschel, Axel Huebl et al.\n"; std::cout << "License: LGPLv3+\n"; - std::cout << "This is free software: you are free to change and redistribute it.\n" + std::cout << "This is free software: you are free to change and " + "redistribute it.\n" "There is NO WARRANTY, to the extent permitted by law.\n"; } -int main( - int argc, - char *argv[] -) +int main(int argc, char *argv[]) { using namespace std; - MPI_Init( - &argc, - &argv - ); + MPI_Init(&argc, &argv); // CLI parsing - std::vector< std::string > str_argv; - for( int i = 0; i < argc; ++i ) str_argv.emplace_back( argv[i] ); + std::vector str_argv; + for (int i = 0; i < argc; ++i) + str_argv.emplace_back(argv[i]); bool weak_scaling = false; - for (int c = 1; c < int(argc); c++) { - if (std::string("--help") == argv[c] || std::string("-h") == argv[c]) { + for (int c = 1; c < int(argc); c++) + { + if (std::string("--help") == argv[c] || std::string("-h") == argv[c]) + { print_help(argv[0]); return 0; } - if (std::string("--version") == argv[c] || std::string("-v") == argv[c]) { + if (std::string("--version") == argv[c] || std::string("-v") == argv[c]) + { print_version(argv[0]); return 0; } - if (std::string("--weak") == argv[c] || std::string("-w") == argv[c]) { + if (std::string("--weak") == argv[c] || std::string("-w") == argv[c]) + { weak_scaling = true; } } - if (argc > 2) { + if (argc > 2) + { std::cerr << "Too many arguments! See: " << argv[0] << " --help\n"; return 1; } @@ -85,53 +84,46 @@ int main( openPMD::Datatype dt = openPMD::determineDatatype(); #endif - int rank, size; - MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - MPI_Comm_size( MPI_COMM_WORLD, &size ); - const unsigned scale_up = weak_scaling ? unsigned( size ) : 1u; + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + MPI_Comm_size(MPI_COMM_WORLD, &size); + const unsigned scale_up = weak_scaling ? unsigned(size) : 1u; // Total (in this case 3D) dataset across all MPI ranks. // Will be the same for all configured benchmarks. - openPMD::Extent total{ - 100 * scale_up, - 100, - 1000 - }; - - // The blockslicer assigns to each rank its part of the dataset. The rank will - // write to and read from that part. OneDimensionalBlockSlicer is a simple - // implementation of the BlockSlicer abstract class that will divide the - // dataset into hyperslab along one given dimension. - // If you wish to partition your dataset in a different manner, you can - // replace this with your own implementation of BlockSlicer. + openPMD::Extent total{100 * scale_up, 100, 1000}; + + // The blockslicer assigns to each rank its part of the dataset. The rank + // will write to and read from that part. OneDimensionalBlockSlicer is a + // simple implementation of the BlockSlicer abstract class that will divide + // the dataset into hyperslab along one given dimension. If you wish to + // partition your dataset in a different manner, you can replace this with + // your own implementation of BlockSlicer. auto blockSlicer = std::make_shared(0); - // Set up the DatasetFiller. The benchmarks will later inquire the DatasetFiller - // to get data for writing. - std::uniform_int_distribution distr( - 0, - 200000000 - ); + // Set up the DatasetFiller. The benchmarks will later inquire the + // DatasetFiller to get data for writing. + std::uniform_int_distribution distr(0, 200000000); openPMD::RandomDatasetFiller df{distr}; // The Benchmark class will in principle allow a user to configure // runs that write and read different datatypes. - // For this, the class is templated with a type called DatasetFillerProvider. - // This class serves as a factory for DatasetFillers for concrete types and - // should have a templated operator()() returning a value - // that can be dynamically casted to a std::shared_ptr> - // The openPMD API provides only one implementation of a DatasetFillerProvider, - // namely the SimpleDatasetFillerProvider being used in this example. - // Its purpose is to leverage a DatasetFiller for a concrete type (df in this example) - // to a DatasetFillerProvider whose operator()() will fail during runtime if T does - // not correspond with the underlying DatasetFiller. - // Use this implementation if you only wish to run the benchmark for one Datatype, + // For this, the class is templated with a type called + // DatasetFillerProvider. This class serves as a factory for DatasetFillers + // for concrete types and should have a templated operator()() returning + // a value that can be dynamically casted to a + // std::shared_ptr> The openPMD API provides only + // one implementation of a DatasetFillerProvider, namely the + // SimpleDatasetFillerProvider being used in this example. Its purpose is to + // leverage a DatasetFiller for a concrete type (df in this example) to a + // DatasetFillerProvider whose operator()() will fail during runtime if T + // does not correspond with the underlying DatasetFiller. Use this + // implementation if you only wish to run the benchmark for one Datatype, // otherwise provide your own implementation of DatasetFillerProvider. openPMD::SimpleDatasetFillerProvider dfp{df}; - // Create the Benchmark object. The file name (first argument) will be extended - // with the backends' file extensions. + // Create the Benchmark object. The file name (first argument) will be + // extended with the backends' file extensions. openPMD::MPIBenchmark benchmark{ "../benchmarks/benchmark", total, @@ -139,50 +131,50 @@ int main( dfp, }; - // Add benchmark runs to be executed. This will only store the configuration and not - // run the benchmark yet. Each run is configured by: - // * The compression scheme to use (first two parameters). The first parameter chooses + // Add benchmark runs to be executed. This will only store the configuration + // and not run the benchmark yet. Each run is configured by: + // * The compression scheme to use (first two parameters). The first + // parameter chooses // the compression scheme, the second parameter is the compression level. // * The backend (by file extension). // * The datatype to use for this run. - // * The number of iterations. Effectively, the benchmark will be repeated for this many + // * The number of iterations. Effectively, the benchmark will be repeated + // for this many // times. #if openPMD_HAVE_ADIOS1 || openPMD_HAVE_ADIOS2 benchmark.addConfiguration( R"({"adios2": {"dataset":{"operators":[{"type": "blosc"}]}}})", "bp", dt, - 10 ); + 10); #endif #if openPMD_HAVE_HDF5 - benchmark.addConfiguration( "{}", "h5", dt, 10 ); + benchmark.addConfiguration("{}", "h5", dt, 10); #endif - // Execute all previously configured benchmarks. Will return a MPIBenchmarkReport object - // with write and read times for each configured run. - // Take notice that results will be collected into the root rank's report object, the other - // ranks' reports will be empty. The root rank is specified by the first parameter of runBenchmark, - // the default being 0. - auto res = - benchmark.runBenchmark(); + // Execute all previously configured benchmarks. Will return a + // MPIBenchmarkReport object with write and read times for each configured + // run. Take notice that results will be collected into the root rank's + // report object, the other ranks' reports will be empty. The root rank is + // specified by the first parameter of runBenchmark, the default being 0. + auto res = benchmark.runBenchmark(); - if( rank == 0 ) + if (rank == 0) { - for( auto it = res.durations.begin(); - it != res.durations.end(); - it++ ) + for (auto it = res.durations.begin(); it != res.durations.end(); it++) { auto time = it->second; std::cout << "on rank " << std::get(it->first) - << "\t with backend " - << std::get(it->first) + << "\t with backend " << std::get(it->first) << "\twrite time: " << std::chrono::duration_cast( - time.first - ).count() << "\tread time: " + time.first) + .count() + << "\tread time: " << std::chrono::duration_cast( - time.second - ).count() << std::endl; + time.second) + .count() + << std::endl; } } diff --git a/examples/8a_benchmark_write_parallel.cpp b/examples/8a_benchmark_write_parallel.cpp index 280c6e1aa7..af2f283d12 100644 --- a/examples/8a_benchmark_write_parallel.cpp +++ b/examples/8a_benchmark_write_parallel.cpp @@ -18,34 +18,34 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include #include +#include #include +#include #include +#include #include -#include -#include -#include #include -#include +#include #include +#include #if openPMD_HAVE_ADIOS2 -# include +#include #endif using std::cout; using namespace openPMD; - /** The Memory profiler class for profiling purpose * * Simple Memory usage report that works on linux system */ -static std::chrono::time_point m_ProgStart = std::chrono::system_clock::now(); +static std::chrono::time_point m_ProgStart = + std::chrono::system_clock::now(); class MemoryProfiler { @@ -55,56 +55,60 @@ class MemoryProfiler * @param[in] rank MPI rank * @param[in] tag item name to measure */ - MemoryProfiler(int rank, const std::string& tag) { - m_Rank = rank; + MemoryProfiler(int rank, const std::string &tag) + { + m_Rank = rank; #if defined(__linux) - //m_Name = "/proc/meminfo"; - m_Name = "/proc/self/status"; - Display(tag); + // m_Name = "/proc/meminfo"; + m_Name = "/proc/self/status"; + Display(tag); #else - (void)tag; - m_Name = ""; + (void)tag; + m_Name = ""; #endif - } + } /** * - * Read from /proc/self/status and display the Virtual Memory info at rank 0 on console + * Read from /proc/self/status and display the Virtual Memory info at rank 0 + * on console * * @param tag item name to measure * @param rank MPI rank */ - void Display(const std::string& tag){ - if (0 == m_Name.size()) - return; + void Display(const std::string &tag) + { + if (0 == m_Name.size()) + return; - if (m_Rank > 0) - return; + if (m_Rank > 0) + return; - std::cout<<" memory at: "<( m_End - m_Start ).count(); - double secs = millis/1000.0; - if( m_Rank > 0 ) - return; + double millis = std::chrono::duration_cast( + m_End - m_Start) + .count(); + double secs = millis / 1000.0; + if (m_Rank > 0) + return; std::cout << " [" << m_Tag << "] took:" << secs << " seconds\n"; - std::cout<<" " << m_Tag <<" From ProgStart in seconds "<< - std::chrono::duration_cast(m_End - m_ProgStart).count()/1000.0<( + m_End - m_ProgStart) + .count() / + 1000.0 + << std::endl; + + std::cout << std::endl; } + private: std::chrono::time_point m_Start; std::chrono::time_point m_End; @@ -150,7 +163,6 @@ class Timer int m_Rank = 0; }; - /** createData * generate a shared ptr of given size with given type & default value * @@ -161,32 +173,32 @@ class Timer * */ -template -std::shared_ptr< T > createData(const unsigned long& size, const T& val, const T& increment) - { - auto E = std::shared_ptr< T > { - new T[size], []( T * d ) {delete[] d;} - }; +template +std::shared_ptr +createData(const unsigned long &size, const T &val, const T &increment) +{ + auto E = std::shared_ptr{new T[size], [](T *d) { delete[] d; }}; - for(unsigned long i = 0ul; i < size; i++ ) - { - if (increment != 0) - //E.get()[i] = val+i; - E.get()[i] = val+i*increment; - else - E.get()[i] = val; - } + for (unsigned long i = 0ul; i < size; i++) + { + if (increment != 0) + // E.get()[i] = val+i; + E.get()[i] = val + i * increment; + else + E.get()[i] = val; + } return E; - } +} /** Find supported backends * (looking for ADIOS2 or H5) * */ -std::vector getBackends() { +std::vector getBackends() +{ std::vector res; #if openPMD_HAVE_ADIOS2 - if( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "NOT_SET" ) != "ADIOS1" ) + if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") != "ADIOS1") res.emplace_back(".bp"); #endif @@ -206,296 +218,324 @@ class TestInput; class AbstractPattern { public: - AbstractPattern(const TestInput& input); - virtual bool setLayOut(int step) = 0; - unsigned long getNthMeshExtent( unsigned int n, Offset& offset, Extent& count ); - virtual void getNthParticleExtent( unsigned int n, unsigned long& offset, unsigned long& count ) = 0; - unsigned int getNumBlocks(); - - unsigned long getTotalNumParticles(); - void run(); - void store(Series& series, int step); - void storeMesh(Series& series, int step, const std::string& fieldName, const std::string& compName); - void storeParticles( ParticleSpecies& currSpecies, int& step ); - - unsigned long countMe(const Extent& count); - unsigned long indexMe(const Offset& count); - - Extent m_GlobalMesh; - Extent m_MinBlock; - const TestInput& m_Input; - - Extent m_GlobalUnitMesh; - std::vector> m_InRankMeshLayout; - - void PrintMe(); -}; // class Abstractpatter - - + AbstractPattern(const TestInput &input); + virtual bool setLayOut(int step) = 0; + unsigned long + getNthMeshExtent(unsigned int n, Offset &offset, Extent &count); + virtual void getNthParticleExtent( + unsigned int n, unsigned long &offset, unsigned long &count) = 0; + unsigned int getNumBlocks(); + + unsigned long getTotalNumParticles(); + void run(); + void store(Series &series, int step); + void storeMesh( + Series &series, + int step, + const std::string &fieldName, + const std::string &compName); + void storeParticles(ParticleSpecies &currSpecies, int &step); + + unsigned long countMe(const Extent &count); + unsigned long indexMe(const Offset &count); + + Extent m_GlobalMesh; + Extent m_MinBlock; + const TestInput &m_Input; + + Extent m_GlobalUnitMesh; + std::vector> m_InRankMeshLayout; + + void PrintMe(); +}; // class Abstractpatter /* * Class defining 1D mesh layout * */ -class OneDimPattern: public AbstractPattern +class OneDimPattern : public AbstractPattern { public: - OneDimPattern(const TestInput& input); - bool setLayOut(int step) override; - unsigned long getNthMeshExtent( unsigned int n, Offset& offset, Extent& count ); - void getNthParticleExtent( unsigned int n, unsigned long& offset, unsigned long& count ) override; - unsigned int getNumBlocks(); + OneDimPattern(const TestInput &input); + bool setLayOut(int step) override; + unsigned long + getNthMeshExtent(unsigned int n, Offset &offset, Extent &count); + void getNthParticleExtent( + unsigned int n, unsigned long &offset, unsigned long &count) override; + unsigned int getNumBlocks(); }; /* * Class defining 2D mesh layout * */ -class TwoDimPattern: public AbstractPattern +class TwoDimPattern : public AbstractPattern { public: - TwoDimPattern(const TestInput& input); + TwoDimPattern(const TestInput &input); - bool setLayOut(int step) override; - void getNthParticleExtent( unsigned int n, unsigned long& offset, unsigned long& count ) override; - void coordinate(unsigned long idx, const Extent& grid, Offset& o); + bool setLayOut(int step) override; + void getNthParticleExtent( + unsigned int n, unsigned long &offset, unsigned long &count) override; + void coordinate(unsigned long idx, const Extent &grid, Offset &o); - Extent m_PatchUnitMesh; // based on m_GlobalUnitMesh + Extent m_PatchUnitMesh; // based on m_GlobalUnitMesh - - std::vector> m_InRankParticleLayout; + std::vector> m_InRankParticleLayout; }; /* * Class defining 3D mesh layout * */ -class ThreeDimPattern: public AbstractPattern +class ThreeDimPattern : public AbstractPattern { public: - ThreeDimPattern(const TestInput& input); + ThreeDimPattern(const TestInput &input); - bool setLayOut(int step) override; - void getNthParticleExtent( unsigned int n, unsigned long& offset, unsigned long& count ) override; - void coordinate(unsigned long idx, const Extent& grid, Offset& o); + bool setLayOut(int step) override; + void getNthParticleExtent( + unsigned int n, unsigned long &offset, unsigned long &count) override; + void coordinate(unsigned long idx, const Extent &grid, Offset &o); - Extent m_PatchUnitMesh; // based on m_GlobalUnitMesh + Extent m_PatchUnitMesh; // based on m_GlobalUnitMesh - std::vector> m_InRankParticleLayout; + std::vector> m_InRankParticleLayout; }; - /** Class TestInput * */ class TestInput { public: - TestInput() = default; - - /** GetSeg() - * return number of partitions along the long dimension - * m_Seg can be set from input - * exception is when h5 collective mode is on. m_Seg=1 - */ - [[nodiscard]] unsigned int GetSeg() const - { - if (m_Backend == ".h5") - if (auxiliary::getEnvString( "OPENPMD_HDF5_INDEPENDENT", "ON" ) != "ON") - return 1; - if (m_Seg > 0) - return m_Seg; - return 1; - } - - - int m_MPISize = 1; //!< MPI size - int m_MPIRank = 0; //!< MPI rank - - unsigned long m_XBulk = 64ul; //!< min num of elements at X dimension - unsigned long m_YBulk = 32ul; //!< min num of elements at Y dimension - unsigned long m_ZBulk = 32ul; - - /** relative expansion of min grid(m_XBulk, m_YBulk, m_ZBulk) - * to form a max block. By default max:min=1, meaning suggested - * max block is the same as min block. This parameter is effective - * when the suggested max block size x m_MPISize = global_mesh. - * In other words, this option is set to let per rank workload be - * the max block (and the multiple mini blocks will be from there) - */ - Extent m_MaxOverMin = {1,1,1}; - - int m_Dim = 3; // mesh dim; - /** number of subdivisions for the elements - * - * note that with h5collect mode, m_Seg must be 1 - */ - unsigned int m_Seg = 1; - int m_Steps = 1; //!< num of iterations - std::string m_Backend = ".bp"; //!< I/O backend by file ending - bool m_Unbalance = false; //! load is different among processors - - int m_Ratio = 1; //! particle:mesh ratio - unsigned long m_XFactor = 0; // if not overwritten, use m_MPISize - unsigned long m_YFactor = 8; - unsigned long m_ZFactor = 8; - - //! prefix for the output directory - std::string m_Prefix = "../samples"; -}; // class TestInput + TestInput() = default; + /** GetSeg() + * return number of partitions along the long dimension + * m_Seg can be set from input + * exception is when h5 collective mode is on. m_Seg=1 + */ + [[nodiscard]] unsigned int GetSeg() const + { + if (m_Backend == ".h5") + if (auxiliary::getEnvString("OPENPMD_HDF5_INDEPENDENT", "ON") != + "ON") + return 1; + if (m_Seg > 0) + return m_Seg; + return 1; + } + + int m_MPISize = 1; //!< MPI size + int m_MPIRank = 0; //!< MPI rank -void parse(TestInput& input, std::string line) + unsigned long m_XBulk = 64ul; //!< min num of elements at X dimension + unsigned long m_YBulk = 32ul; //!< min num of elements at Y dimension + unsigned long m_ZBulk = 32ul; + + /** relative expansion of min grid(m_XBulk, m_YBulk, m_ZBulk) + * to form a max block. By default max:min=1, meaning suggested + * max block is the same as min block. This parameter is effective + * when the suggested max block size x m_MPISize = global_mesh. + * In other words, this option is set to let per rank workload be + * the max block (and the multiple mini blocks will be from there) + */ + Extent m_MaxOverMin = {1, 1, 1}; + + int m_Dim = 3; // mesh dim; + /** number of subdivisions for the elements + * + * note that with h5collect mode, m_Seg must be 1 + */ + unsigned int m_Seg = 1; + int m_Steps = 1; //!< num of iterations + std::string m_Backend = ".bp"; //!< I/O backend by file ending + bool m_Unbalance = false; //! load is different among processors + + int m_Ratio = 1; //! particle:mesh ratio + unsigned long m_XFactor = 0; // if not overwritten, use m_MPISize + unsigned long m_YFactor = 8; + unsigned long m_ZFactor = 8; + + //! prefix for the output directory + std::string m_Prefix = "../samples"; +}; // class TestInput + +void parse(TestInput &input, std::string line) { - // no valid input a=b - if ( line.size() <= 3 ) - return; - if ( line[0] == '#' ) - return; - - std::istringstream iline(line); - - std::string s; - std::vector vec; - while ( std::getline( iline, s, '=' ) ) - vec.push_back(s); - - if ( vec.size() != 2 ) - return; - - if ( vec[0].compare("dim") == 0 ) { - input.m_Dim = atoi(vec[1].c_str()); - return; - } - - if ( vec[0].compare("balanced") == 0 ) { - if ( vec[1].compare("false") == 0 ) - input.m_Unbalance = true; - return; - } - - if ( vec[0].compare("ratio") == 0 ) { - input.m_Ratio = atoi(vec[1].c_str()); - return; - } - - if ( vec[0].compare("steps") == 0 ) { - input.m_Steps = atoi(vec[1].c_str()); - return; - } - - if ( vec[0].compare("rankBlocks") == 0 ) { - if ( vec[1].compare("false") == 0 ) - input.m_Seg = 10; - return; - } - - if ( vec[0].compare("fileLocation") == 0 ) { - input.m_Prefix = vec[1]; - return; - } - - // now vec[1] is N-dim integers - std::vector numbers; - std::istringstream tmp(vec[1]); - while ( std::getline( tmp, s, ' ' ) ) - numbers.push_back(strtoul( s.c_str(), nullptr, 0 )); - - if ( (numbers.size() == 0) || ((numbers.size() - input.m_Dim) != 0) ) { - if ( input.m_MPIRank == 0 ) - std::cout< 0) input.m_YBulk = numbers[1]; - if (numbers.size() > 1) input.m_ZBulk = numbers[2]; - } - - if ( vec[0].compare("grid") == 0 ) { - input.m_XFactor = numbers[0]; - if (numbers.size() > 0) input.m_YFactor = numbers[1]; - if (numbers.size() > 1) input.m_ZFactor = numbers[2]; - } + // no valid input a=b + if (line.size() <= 3) + return; + if (line[0] == '#') + return; + + std::istringstream iline(line); + + std::string s; + std::vector vec; + while (std::getline(iline, s, '=')) + vec.push_back(s); + + if (vec.size() != 2) + return; + + if (vec[0].compare("dim") == 0) + { + input.m_Dim = atoi(vec[1].c_str()); + return; + } + + if (vec[0].compare("balanced") == 0) + { + if (vec[1].compare("false") == 0) + input.m_Unbalance = true; + return; + } + + if (vec[0].compare("ratio") == 0) + { + input.m_Ratio = atoi(vec[1].c_str()); + return; + } + + if (vec[0].compare("steps") == 0) + { + input.m_Steps = atoi(vec[1].c_str()); + return; + } + + if (vec[0].compare("rankBlocks") == 0) + { + if (vec[1].compare("false") == 0) + input.m_Seg = 10; + return; + } + + if (vec[0].compare("fileLocation") == 0) + { + input.m_Prefix = vec[1]; + return; + } + + // now vec[1] is N-dim integers + std::vector numbers; + std::istringstream tmp(vec[1]); + while (std::getline(tmp, s, ' ')) + numbers.push_back(strtoul(s.c_str(), nullptr, 0)); + + if ((numbers.size() == 0) || ((numbers.size() - input.m_Dim) != 0)) + { + if (input.m_MPIRank == 0) + std::cout << vec[1] << " Expecting " << input.m_Dim + << " dimensions. But given input is " << numbers.size() + << std::endl; + return; + } + + if (vec[0].compare("minBlock") == 0) + { + input.m_XBulk = numbers[0]; + if (numbers.size() > 0) + input.m_YBulk = numbers[1]; + if (numbers.size() > 1) + input.m_ZBulk = numbers[2]; + } + + if (vec[0].compare("grid") == 0) + { + input.m_XFactor = numbers[0]; + if (numbers.size() > 0) + input.m_YFactor = numbers[1]; + if (numbers.size() > 1) + input.m_ZFactor = numbers[2]; + } } -int parseArgs( int argc, char *argv[], TestInput& input ) +int parseArgs(int argc, char *argv[], TestInput &input) { - if ( argc == 2 ) { - std::fstream infile; - infile.open(argv[1], std::ios::in); - if ( !infile.is_open() ) { - if ( input.m_MPIRank == 0 ) std::cout<< "No such file: "<= 2 ) { - // coded as: b..b/aaa/c/d=[Yfactor][Xfactor][Balance][Ratio] - // e.g. 200413 => ratio:3; Unbalance:yes; xfactor=4; yfactor=2 - int num = atoi( argv[1] ) ; - if ( num > 10 ) - input.m_Unbalance = (num/10 % 10 > 0); - - if ( num <= 0) - num = 1; - input.m_Ratio = (num-1) % 10 + 1; - - if ( num > 100 ) { - input.m_XFactor = num/100; - if ( input.m_XFactor > 1000 ) { - input.m_YFactor = input.m_XFactor/1000 % 1000; - if ( input.m_XFactor > 1000000 ) - input.m_ZFactor = input.m_XFactor/1000000 % 1000; + if (argc >= 2) + { + // coded as: b..b/aaa/c/d=[Yfactor][Xfactor][Balance][Ratio] + // e.g. 200413 => ratio:3; Unbalance:yes; xfactor=4; yfactor=2 + int num = atoi(argv[1]); + if (num > 10) + input.m_Unbalance = (num / 10 % 10 > 0); + + if (num <= 0) + num = 1; + input.m_Ratio = (num - 1) % 10 + 1; + + if (num > 100) + { + input.m_XFactor = num / 100; + if (input.m_XFactor > 1000) + { + input.m_YFactor = input.m_XFactor / 1000 % 1000; + if (input.m_XFactor > 1000000) + input.m_ZFactor = input.m_XFactor / 1000000 % 1000; else - input.m_ZFactor = input.m_YFactor; + input.m_ZFactor = input.m_YFactor; input.m_XFactor = input.m_XFactor % 1000; - } - } + } + } } - if( argc >= 3 ) - input.m_XBulk = strtoul( argv[2], nullptr, 0 ); + if (argc >= 3) + input.m_XBulk = strtoul(argv[2], nullptr, 0); // e.g. 32064 => [64,32] - if ( input.m_XBulk > 1000 ) + if (input.m_XBulk > 1000) { - input.m_YBulk = input.m_XBulk/1000 % 1000; - if ( input.m_XBulk > 1000000 ) - input.m_ZBulk = input.m_XBulk/1000000 % 1000; - else - input.m_ZBulk = input.m_YBulk; - input.m_XBulk = input.m_XBulk % 1000; + input.m_YBulk = input.m_XBulk / 1000 % 1000; + if (input.m_XBulk > 1000000) + input.m_ZBulk = input.m_XBulk / 1000000 % 1000; + else + input.m_ZBulk = input.m_YBulk; + input.m_XBulk = input.m_XBulk % 1000; } // if m_Seg > 1; then data of var will be stored as chunks of minigrid // else store as one big block - if( argc >= 4 ) - input.m_Seg = atoi( argv[3] ); + if (argc >= 4) + input.m_Seg = atoi(argv[3]); - if( argc >= 5 ) - input.m_Steps = atoi( argv[4] ); + if (argc >= 5) + input.m_Steps = atoi(argv[4]); if (argc >= 6) - input.m_Dim = atoi( argv[5] ); + input.m_Dim = atoi(argv[5]); - if (argc >= 7) { - long val = strtoul( argv[6], nullptr, 0 ); - input.m_MaxOverMin[0] = val % 1000; + if (argc >= 7) + { + long val = strtoul(argv[6], nullptr, 0); + input.m_MaxOverMin[0] = val % 1000; - if ( val >= 1000 ) - input.m_MaxOverMin[1] = (val/1000) % 1000; - if ( val >= 1000000 ) - input.m_MaxOverMin[2] = (val/1000000) % 1000; + if (val >= 1000) + input.m_MaxOverMin[1] = (val / 1000) % 1000; + if (val >= 1000000) + input.m_MaxOverMin[2] = (val / 1000000) % 1000; } return input.m_Dim; @@ -504,48 +544,56 @@ int parseArgs( int argc, char *argv[], TestInput& input ) * * description of runtime options/flags */ -int -main( int argc, char *argv[] ) +int main(int argc, char *argv[]) { - MPI_Init( &argc, &argv ); + MPI_Init(&argc, &argv); TestInput input; - MPI_Comm_size( MPI_COMM_WORLD, &input.m_MPISize ); - MPI_Comm_rank( MPI_COMM_WORLD, &input.m_MPIRank ); + MPI_Comm_size(MPI_COMM_WORLD, &input.m_MPISize); + MPI_Comm_rank(MPI_COMM_WORLD, &input.m_MPIRank); int dataDim = parseArgs(argc, argv, input); - if ( ( dataDim <= 0 ) || ( dataDim > 3 ) ) { - if ( 0 == input.m_MPIRank) - std::cerr<<" Sorry, Only supports data 1D 2D 3D! not "< 3)) + { + if (0 == input.m_MPIRank) + std::cerr << " Sorry, Only supports data 1D 2D 3D! not " << dataDim + << std::endl; + return -1; } - Timer g( " Main ", input.m_MPIRank ); + Timer g(" Main ", input.m_MPIRank); - if ( 0 == input.m_XFactor ) - input.m_XFactor = input.m_MPISize; + if (0 == input.m_XFactor) + input.m_XFactor = input.m_MPISize; auto const backends = getBackends(); - try { - for( auto const & which: backends ) - { - input.m_Backend = which; - if ( 1 == dataDim ) { - OneDimPattern p1(input); - p1.run(); - } else if ( 2 == dataDim ) { - TwoDimPattern p2(input); - p2.run(); - } else { - ThreeDimPattern p3(input); - p3.run(); + try + { + for (auto const &which : backends) + { + input.m_Backend = which; + if (1 == dataDim) + { + OneDimPattern p1(input); + p1.run(); + } + else if (2 == dataDim) + { + TwoDimPattern p2(input); + p2.run(); + } + else + { + ThreeDimPattern p3(input); + p3.run(); + } } - } } - catch (std::exception const & ex ) + catch (std::exception const &ex) { - if (0 == input.m_MPIRank) std::cout<<"Error: "<(); - Dataset dataset = Dataset( datatype, m_GlobalMesh ); + Datatype datatype = determineDatatype(); + Dataset dataset = Dataset(datatype, m_GlobalMesh); - compA.resetDataset( dataset ); + compA.resetDataset(dataset); auto nBlocks = getNumBlocks(); - for ( unsigned int n=0; n 0) { - auto const value = double(1.0*n + 0.01*step); - auto A = createData( blockSize, value, 0.0001 ) ; - compA.storeChunk( A, meshOffset, meshExtent ); + if (blockSize > 0) + { + auto const value = double(1.0 * n + 0.01 * step); + auto A = createData(blockSize, value, 0.0001); + compA.storeChunk(A, meshOffset, meshExtent); } - } - } - - /* - * Write particles. (always 1D) - * - * @param ParticleSpecies Input - * @param step Iteration step - * - */ - void - AbstractPattern::storeParticles( ParticleSpecies& currSpecies, int& step ) - { - currSpecies.setAttribute( "particleSmoothing", "none" ); - currSpecies.setAttribute( "openPMD_STEP", step ); - currSpecies.setAttribute( "p2mRatio", m_Input.m_Ratio ); + } +} + +/* + * Write particles. (always 1D) + * + * @param ParticleSpecies Input + * @param step Iteration step + * + */ +void AbstractPattern::storeParticles(ParticleSpecies &currSpecies, int &step) +{ + currSpecies.setAttribute("particleSmoothing", "none"); + currSpecies.setAttribute("openPMD_STEP", step); + currSpecies.setAttribute("p2mRatio", m_Input.m_Ratio); auto np = getTotalNumParticles(); - auto const intDataSet = openPMD::Dataset(openPMD::determineDatatype< uint64_t >(), {np}); - auto const realDataSet = openPMD::Dataset(openPMD::determineDatatype< double >(), {np}); - currSpecies["id"][RecordComponent::SCALAR].resetDataset( intDataSet ); - currSpecies["charge"][RecordComponent::SCALAR].resetDataset( realDataSet ); + auto const intDataSet = + openPMD::Dataset(openPMD::determineDatatype(), {np}); + auto const realDataSet = + openPMD::Dataset(openPMD::determineDatatype(), {np}); + currSpecies["id"][RecordComponent::SCALAR].resetDataset(intDataSet); + currSpecies["charge"][RecordComponent::SCALAR].resetDataset(realDataSet); - currSpecies["position"]["x"].resetDataset( realDataSet ); + currSpecies["position"]["x"].resetDataset(realDataSet); - currSpecies["positionOffset"]["x"].resetDataset( realDataSet ); - currSpecies["positionOffset"]["x"].makeConstant( 0. ); + currSpecies["positionOffset"]["x"].resetDataset(realDataSet); + currSpecies["positionOffset"]["x"].makeConstant(0.); auto nBlocks = getNumBlocks(); - for ( unsigned int n=0; n 0) { - auto ids = createData( count, offset, 1 ) ; - currSpecies["id"][RecordComponent::SCALAR].storeChunk(ids, {offset}, {count}); - - auto charges = createData(count, 0.1*step, 0.0001) ; - currSpecies["charge"][RecordComponent::SCALAR].storeChunk(charges, - {offset}, {count}); + // std::cout< 0) + { + auto ids = createData(count, offset, 1); + currSpecies["id"][RecordComponent::SCALAR].storeChunk( + ids, {offset}, {count}); - auto mx = createData(count, 1.0*step, 0.0002) ; - currSpecies["position"]["x"].storeChunk(mx, - {offset}, {count}); + auto charges = createData(count, 0.1 * step, 0.0001); + currSpecies["charge"][RecordComponent::SCALAR].storeChunk( + charges, {offset}, {count}); + auto mx = createData(count, 1.0 * step, 0.0002); + currSpecies["position"]["x"].storeChunk(mx, {offset}, {count}); + } } - } - } // storeParticles - +} // storeParticles - /* - * Return total number of particles - * set to be a multiple of mesh size - * - */ +/* + * Return total number of particles + * set to be a multiple of mesh size + * + */ unsigned long AbstractPattern::getTotalNumParticles() - { +{ unsigned long result = m_Input.m_Ratio; for (unsigned long i : m_GlobalMesh) - result *= i; + result *= i; return result; - } +} /* * Print pattern layout */ void AbstractPattern::PrintMe() - { +{ int ndim = m_MinBlock.size(); - if ( !m_Input.m_MPIRank ) + if (!m_Input.m_MPIRank) { - std::ostringstream g; g<<"\nGlobal: [ "; - std::ostringstream u; u<<" Unit: [ "; - std::ostringstream m; m<<" Block: [ "; - for ( auto i=0; i m_InRankMeshLayout.size()) - return; + return; offset = m_InRankParticleLayout[n].first; - count = m_InRankParticleLayout[n].second; - } - + count = m_InRankParticleLayout[n].second; +} /* * Get nth particel extent in a rank @@ -841,14 +902,15 @@ void TwoDimPattern::getNthParticleExtent( unsigned int n, unsigned long& offset, * @param offset: return * @param count: return */ -void ThreeDimPattern::getNthParticleExtent( unsigned int n, unsigned long& offset, unsigned long& count ) - { +void ThreeDimPattern::getNthParticleExtent( + unsigned int n, unsigned long &offset, unsigned long &count) +{ if (n > m_InRankMeshLayout.size()) - return; + return; offset = m_InRankParticleLayout[n].first; - count = m_InRankParticleLayout[n].second; - } + count = m_InRankParticleLayout[n].second; +} /* * Set layout @@ -863,48 +925,50 @@ bool OneDimPattern::setLayOut(int step) unsigned long unitOffset = m_Input.m_MPIRank * unitCount; - if ( m_Input.m_MPISize >= 2 ) + if (m_Input.m_MPISize >= 2) { - if ( m_Input.m_Unbalance ) - { - if (step % 3 == 1) + if (m_Input.m_Unbalance) { - if ( m_Input.m_MPIRank % 10 == 0 ) // no load - unitCount = 0; - if ( m_Input.m_MPIRank % 10 == 1 ) // double load - { - unitOffset -= unitCount; - unitCount += unitCount; + if (step % 3 == 1) + { + if (m_Input.m_MPIRank % 10 == 0) // no load + unitCount = 0; + if (m_Input.m_MPIRank % 10 == 1) // double load + { + unitOffset -= unitCount; + unitCount += unitCount; + } + } } } - } - } if (0 == unitCount) - return true; + return true; - auto numPartition = m_Input.GetSeg(); - if ( unitCount < numPartition ) - numPartition = unitCount; + auto numPartition = m_Input.GetSeg(); + if (unitCount < numPartition) + numPartition = unitCount; auto avg = unitCount / numPartition; - for ( unsigned int i=0 ; i< numPartition; i++ ) + for (unsigned int i = 0; i < numPartition; i++) { - Offset offset = { unitOffset * m_MinBlock[0] }; - if ( i < (numPartition - 1) ) { - Extent count = { avg * m_MinBlock[0] }; - m_InRankMeshLayout.emplace_back(offset, count); - } else { - auto res = unitCount - avg * (numPartition - 1); - Extent count = { res * m_MinBlock[0] }; - m_InRankMeshLayout.emplace_back(offset, count); - } + Offset offset = {unitOffset * m_MinBlock[0]}; + if (i < (numPartition - 1)) + { + Extent count = {avg * m_MinBlock[0]}; + m_InRankMeshLayout.emplace_back(offset, count); + } + else + { + auto res = unitCount - avg * (numPartition - 1); + Extent count = {res * m_MinBlock[0]}; + m_InRankMeshLayout.emplace_back(offset, count); + } } return true; } - /* * Retrieves ParticleExtent * @param n: nth block for this rank @@ -912,143 +976,153 @@ bool OneDimPattern::setLayOut(int step) * @param count: return * */ -void OneDimPattern::getNthParticleExtent( unsigned int n, unsigned long& offset, unsigned long& count ) - { +void OneDimPattern::getNthParticleExtent( + unsigned int n, unsigned long &offset, unsigned long &count) +{ if (n > m_InRankMeshLayout.size()) - return; + return; offset = indexMe(m_InRankMeshLayout[n].first) * m_Input.m_Ratio; - count = countMe(m_InRankMeshLayout[n].second) * m_Input.m_Ratio; - } - + count = countMe(m_InRankMeshLayout[n].second) * m_Input.m_Ratio; +} /* Constructor TwoDimPattern * Defines 2D layout * @param input: user specifications */ -TwoDimPattern::TwoDimPattern(const TestInput& input) - :AbstractPattern(input) +TwoDimPattern::TwoDimPattern(const TestInput &input) : AbstractPattern(input) { - m_GlobalMesh = { input.m_XBulk * input.m_XFactor, input.m_YBulk * input.m_YFactor }; - m_MinBlock = { input.m_XBulk, input.m_YBulk }; - - m_GlobalUnitMesh = { input.m_XFactor, input.m_YFactor }; - - auto m = (input.m_XFactor * input.m_YFactor) % input.m_MPISize; - if ( m != 0) - throw std::runtime_error( "Unable to balance load for 2D mesh among ranks "); - - m = (input.m_XFactor * input.m_YFactor) / input.m_MPISize; - - if ( input.m_XFactor % input.m_MPISize == 0 ) - m_PatchUnitMesh = { input.m_XFactor / input.m_MPISize, m_GlobalUnitMesh[1] }; - else if ( input.m_YFactor % input.m_MPISize == 0 ) - m_PatchUnitMesh = { m_GlobalUnitMesh[0], input.m_YFactor / input.m_MPISize }; - else if ( input.m_XFactor % m == 0 ) - m_PatchUnitMesh = {m, 1}; - else if ( input.m_YFactor % m == 0 ) - m_PatchUnitMesh = {1, m}; - else // e.g. unitMesh={8,9} mpisize=12, m=6, patch unit needs to be {4,3} + m_GlobalMesh = { + input.m_XBulk * input.m_XFactor, input.m_YBulk * input.m_YFactor}; + m_MinBlock = {input.m_XBulk, input.m_YBulk}; + + m_GlobalUnitMesh = {input.m_XFactor, input.m_YFactor}; + + auto m = (input.m_XFactor * input.m_YFactor) % input.m_MPISize; + if (m != 0) + throw std::runtime_error( + "Unable to balance load for 2D mesh among ranks "); + + m = (input.m_XFactor * input.m_YFactor) / input.m_MPISize; + + if (input.m_XFactor % input.m_MPISize == 0) + m_PatchUnitMesh = { + input.m_XFactor / input.m_MPISize, m_GlobalUnitMesh[1]}; + else if (input.m_YFactor % input.m_MPISize == 0) + m_PatchUnitMesh = { + m_GlobalUnitMesh[0], input.m_YFactor / input.m_MPISize}; + else if (input.m_XFactor % m == 0) + m_PatchUnitMesh = {m, 1}; + else if (input.m_YFactor % m == 0) + m_PatchUnitMesh = {1, m}; + else // e.g. unitMesh={8,9} mpisize=12, m=6, patch unit needs to be {4,3} { - throw std::runtime_error( "Wait for next version with other 2D patch configurations" ); + throw std::runtime_error( + "Wait for next version with other 2D patch configurations"); } - PrintMe(); + PrintMe(); } - - /* * Set layout * @param step: iteration step * */ -bool TwoDimPattern::setLayOut(int step) { +bool TwoDimPattern::setLayOut(int step) +{ m_InRankMeshLayout.clear(); m_InRankParticleLayout.clear(); unsigned long patchOffset = m_Input.m_MPIRank; - unsigned long patchCount = 1; + unsigned long patchCount = 1; - if ( m_Input.m_MPISize >= 2 ) + if (m_Input.m_MPISize >= 2) { - if ( m_Input.m_Unbalance ) - { - if (step % 3 == 1) + if (m_Input.m_Unbalance) { - if ( m_Input.m_MPIRank % 4 == 0 ) // no load - patchCount = 0; - if ( m_Input.m_MPIRank % 4 == 1 ) // double load - { - patchOffset -= patchCount; - patchCount += patchCount; + if (step % 3 == 1) + { + if (m_Input.m_MPIRank % 4 == 0) // no load + patchCount = 0; + if (m_Input.m_MPIRank % 4 == 1) // double load + { + patchOffset -= patchCount; + patchCount += patchCount; + } } - } - } + } } if (0 == patchCount) - return true; + return true; - auto numPartition = m_Input.GetSeg(); + auto numPartition = m_Input.GetSeg(); - Extent patchGrid = { m_GlobalUnitMesh[0]/m_PatchUnitMesh[0], - m_GlobalUnitMesh[1]/m_PatchUnitMesh[1] }; + Extent patchGrid = { + m_GlobalUnitMesh[0] / m_PatchUnitMesh[0], + m_GlobalUnitMesh[1] / m_PatchUnitMesh[1]}; - Offset p {0,0}; + Offset p{0, 0}; coordinate(patchOffset, patchGrid, p); - Offset c {1,1}; - if ( patchCount > 1 ) { - coordinate( patchCount -1, patchGrid, c); - c[0] += 1; - c[1] += 1; + Offset c{1, 1}; + if (patchCount > 1) + { + coordinate(patchCount - 1, patchGrid, c); + c[0] += 1; + c[1] += 1; } // particle offset at this rank - unsigned long pOff = countMe(m_PatchUnitMesh) * patchOffset * countMe(m_MinBlock) * m_Input.m_Ratio; + unsigned long pOff = countMe(m_PatchUnitMesh) * patchOffset * + countMe(m_MinBlock) * m_Input.m_Ratio; - if ( 1 == numPartition ) - { - Offset offset = { p[0] * m_PatchUnitMesh[0] * m_MinBlock[0], - p[1] * m_PatchUnitMesh[1] * m_MinBlock[1] }; + if (1 == numPartition) + { + Offset offset = { + p[0] * m_PatchUnitMesh[0] * m_MinBlock[0], + p[1] * m_PatchUnitMesh[1] * m_MinBlock[1]}; - Extent count = { c[0] * m_PatchUnitMesh[0] * m_MinBlock[0], - c[1] * m_PatchUnitMesh[1] * m_MinBlock[1] }; + Extent count = { + c[0] * m_PatchUnitMesh[0] * m_MinBlock[0], + c[1] * m_PatchUnitMesh[1] * m_MinBlock[1]}; - m_InRankMeshLayout.emplace_back(offset, count); + m_InRankMeshLayout.emplace_back(offset, count); - auto pCount = countMe(count) * m_Input.m_Ratio; - m_InRankParticleLayout.emplace_back(pOff, pCount); - } + auto pCount = countMe(count) * m_Input.m_Ratio; + m_InRankParticleLayout.emplace_back(pOff, pCount); + } else - { - Offset unitOffset = { p[0] * m_PatchUnitMesh[0], p[1] * m_PatchUnitMesh[1] }; - Extent unitExtent = { c[0] * m_PatchUnitMesh[0], c[1] * m_PatchUnitMesh[1] }; + { + Offset unitOffset = { + p[0] * m_PatchUnitMesh[0], p[1] * m_PatchUnitMesh[1]}; + Extent unitExtent = { + c[0] * m_PatchUnitMesh[0], c[1] * m_PatchUnitMesh[1]}; - auto counter = pOff; + auto counter = pOff; - for ( unsigned long i=0; i m_InRankMeshLayout.size()) - return 0; + return 0; offset = m_InRankMeshLayout[n].first; - count = m_InRankMeshLayout[n].second; + count = m_InRankMeshLayout[n].second; return countMe(count); - } +} /* * Get coordinate given c order index @@ -1080,52 +1155,53 @@ unsigned long AbstractPattern::getNthMeshExtent( unsigned int n, Offset& offset * @param grid: layout * @param result: return */ -inline void TwoDimPattern::coordinate(unsigned long idx, const Extent& grid, Offset& result) +inline void +TwoDimPattern::coordinate(unsigned long idx, const Extent &grid, Offset &result) { - auto yy = idx % grid[1]; - auto xx = ( idx - yy ) / grid[1]; + auto yy = idx % grid[1]; + auto xx = (idx - yy) / grid[1]; - result[0] = xx; - result[1] = yy; + result[0] = xx; + result[1] = yy; } /* Returns c order index in the global mesh * @param offset: input, offset in the global mesh */ -inline unsigned long AbstractPattern::indexMe(const Offset& offset) +inline unsigned long AbstractPattern::indexMe(const Offset &offset) { - if (offset.size() == 0) - return 0; + if (offset.size() == 0) + return 0; - if (offset.size() == 1) - return offset[0]; + if (offset.size() == 1) + return offset[0]; - if (offset.size() == 2) + if (offset.size() == 2) { - unsigned long result = offset[1]; - result += offset[0] * m_GlobalMesh[1]; - return result; + unsigned long result = offset[1]; + result += offset[0] * m_GlobalMesh[1]; + return result; } - return 0; + return 0; } /* computes size of a block * @param count: block extent */ -inline unsigned long AbstractPattern::countMe(const Extent& count) +inline unsigned long AbstractPattern::countMe(const Extent &count) { - if (count.size() == 0) - return 0; + if (count.size() == 0) + return 0; - unsigned long result = count[0]; - if ( count.size() >= 2 ) - result *= count[1]; + unsigned long result = count[0]; + if (count.size() >= 2) + result *= count[1]; - if ( count.size() >= 3 ) - result *= count[2]; + if (count.size() >= 3) + result *= count[2]; - return result; + return result; } /* @@ -1134,168 +1210,201 @@ inline unsigned long AbstractPattern::countMe(const Extent& count) * @param grid: layout * @param result: return */ -inline void ThreeDimPattern::coordinate(unsigned long idx, const Extent& grid, Offset& result) +inline void ThreeDimPattern::coordinate( + unsigned long idx, const Extent &grid, Offset &result) { - auto zz = idx % grid[2]; - auto m = (idx - zz)/grid[2]; - auto yy = m % grid[1]; - auto xx = ( m - yy ) / grid[1]; - - result[0] = xx; - result[1] = yy; - result[2] = zz; + auto zz = idx % grid[2]; + auto m = (idx - zz) / grid[2]; + auto yy = m % grid[1]; + auto xx = (m - yy) / grid[1]; + + result[0] = xx; + result[1] = yy; + result[2] = zz; } - /* * Constructor ThreeDimPattern * Defines 3D layout * @param input: user specifications * */ -ThreeDimPattern::ThreeDimPattern(const TestInput& input) - :AbstractPattern(input) +ThreeDimPattern::ThreeDimPattern(const TestInput &input) + : AbstractPattern(input) { - { - m_GlobalMesh = { input.m_XBulk * input.m_XFactor, - input.m_YBulk * input.m_YFactor, - input.m_ZBulk * input.m_ZFactor }; // Z & Y have same size + { + m_GlobalMesh = { + input.m_XBulk * input.m_XFactor, + input.m_YBulk * input.m_YFactor, + input.m_ZBulk * input.m_ZFactor}; // Z & Y have same size - m_MinBlock = { input.m_XBulk, input.m_YBulk, input.m_ZBulk }; - m_GlobalUnitMesh = { input.m_XFactor, input.m_YFactor, input.m_ZFactor }; + m_MinBlock = {input.m_XBulk, input.m_YBulk, input.m_ZBulk}; + m_GlobalUnitMesh = {input.m_XFactor, input.m_YFactor, input.m_ZFactor}; - PrintMe(); - } - - //unsigned long zFactor = input.m_YFactor; - auto m = (input.m_ZFactor * input.m_XFactor * input.m_YFactor) % input.m_MPISize; - if ( m != 0) - throw std::runtime_error( "Unable to balance load for 3D mesh among ranks "); - - m = (input.m_ZFactor * input.m_XFactor * input.m_YFactor) / input.m_MPISize; - auto maxRatio = input.m_MaxOverMin[0] * input.m_MaxOverMin[1] * input.m_MaxOverMin[2]; - if ( maxRatio == m ) { - m_PatchUnitMesh = { input.m_MaxOverMin[0], input.m_MaxOverMin[1], input.m_MaxOverMin[2] }; - if ( !m_Input.m_MPIRank ) - std::cout<<" Using maxOverMin="< 0) && ( (input.m_XFactor * input.m_YFactor) % input.m_MPISize == 0 )) - { - if ( input.m_XFactor % m == 0 ) - m_PatchUnitMesh = {m, 1, input.m_ZFactor}; - else if ( input.m_YFactor % m == 0 ) - m_PatchUnitMesh = {1, m, input.m_ZFactor}; - else - throw std::runtime_error( "Wait for next version with other 3D patch configurations" ); - } - } + PrintMe(); + } + + // unsigned long zFactor = input.m_YFactor; + auto m = + (input.m_ZFactor * input.m_XFactor * input.m_YFactor) % input.m_MPISize; + if (m != 0) + throw std::runtime_error( + "Unable to balance load for 3D mesh among ranks "); + + m = (input.m_ZFactor * input.m_XFactor * input.m_YFactor) / input.m_MPISize; + auto maxRatio = + input.m_MaxOverMin[0] * input.m_MaxOverMin[1] * input.m_MaxOverMin[2]; + if (maxRatio == m) + { + m_PatchUnitMesh = { + input.m_MaxOverMin[0], + input.m_MaxOverMin[1], + input.m_MaxOverMin[2]}; + if (!m_Input.m_MPIRank) + std::cout << " Using maxOverMin=" << input.m_MaxOverMin[0] << ", " + << input.m_MaxOverMin[1] << ", " << input.m_MaxOverMin[2] + << std::endl; + ; + return; + } + + if (input.m_XFactor % input.m_MPISize == 0) + m_PatchUnitMesh = { + input.m_XFactor / input.m_MPISize, + m_GlobalUnitMesh[1], + m_GlobalUnitMesh[2]}; + else if (input.m_YFactor % input.m_MPISize == 0) + m_PatchUnitMesh = { + m_GlobalUnitMesh[0], + input.m_YFactor / input.m_MPISize, + m_GlobalUnitMesh[2]}; + else if (input.m_XFactor % m == 0) + m_PatchUnitMesh = {m, 1, 1}; + else if (input.m_YFactor % m == 0) + m_PatchUnitMesh = {1, m, 1}; + else if (input.m_ZFactor % m == 0) + m_PatchUnitMesh = {1, 1, m}; + else + { + m = (input.m_XFactor * input.m_YFactor) / input.m_MPISize; + if ((m > 0) && + ((input.m_XFactor * input.m_YFactor) % input.m_MPISize == 0)) + { + if (input.m_XFactor % m == 0) + m_PatchUnitMesh = {m, 1, input.m_ZFactor}; + else if (input.m_YFactor % m == 0) + m_PatchUnitMesh = {1, m, input.m_ZFactor}; + else + throw std::runtime_error( + "Wait for next version with other 3D patch configurations"); + } + } } /* * set layout of grids * @ param step: iteration step */ -bool ThreeDimPattern::setLayOut(int step) { +bool ThreeDimPattern::setLayOut(int step) +{ m_InRankMeshLayout.clear(); m_InRankParticleLayout.clear(); unsigned long patchOffset = m_Input.m_MPIRank; - unsigned long patchCount = 1; + unsigned long patchCount = 1; - if ( m_Input.m_MPISize >= 2 ) + if (m_Input.m_MPISize >= 2) { - if ( m_Input.m_Unbalance ) - { - if (step % 3 == 1) + if (m_Input.m_Unbalance) { - if ( m_Input.m_MPIRank % 4 == 0 ) // no load - patchCount = 0; - if ( m_Input.m_MPIRank % 4 == 1 ) // double load - { - patchOffset -= patchCount; - patchCount += patchCount; - } + if (step % 3 == 1) + { + if (m_Input.m_MPIRank % 4 == 0) // no load + patchCount = 0; + if (m_Input.m_MPIRank % 4 == 1) // double load + { + patchOffset -= patchCount; + patchCount += patchCount; + } + } } - } } if (0 == patchCount) - return true; + return true; - auto numPartition = m_Input.GetSeg(); + auto numPartition = m_Input.GetSeg(); - Extent patchGrid = { m_GlobalUnitMesh[0]/m_PatchUnitMesh[0], - m_GlobalUnitMesh[1]/m_PatchUnitMesh[1], - m_GlobalUnitMesh[2]/m_PatchUnitMesh[2] }; + Extent patchGrid = { + m_GlobalUnitMesh[0] / m_PatchUnitMesh[0], + m_GlobalUnitMesh[1] / m_PatchUnitMesh[1], + m_GlobalUnitMesh[2] / m_PatchUnitMesh[2]}; - - Offset p {0, 0, 0}; + Offset p{0, 0, 0}; coordinate(patchOffset, patchGrid, p); - Offset c {1,1,1}; - if ( patchCount > 1 ) { - coordinate(patchCount -1, patchGrid, c); - c[0] += 1; - c[1] += 1; - c[2] += 1; + Offset c{1, 1, 1}; + if (patchCount > 1) + { + coordinate(patchCount - 1, patchGrid, c); + c[0] += 1; + c[1] += 1; + c[2] += 1; } // particle offset at this rank - unsigned long pOff = countMe(m_PatchUnitMesh) * patchOffset * countMe(m_MinBlock) * m_Input.m_Ratio; + unsigned long pOff = countMe(m_PatchUnitMesh) * patchOffset * + countMe(m_MinBlock) * m_Input.m_Ratio; if (1 == numPartition) - { - Offset offset = { p[0] * m_PatchUnitMesh[0] * m_MinBlock[0], - p[1] * m_PatchUnitMesh[1] * m_MinBlock[1], - p[2] * m_PatchUnitMesh[2] * m_MinBlock[2] }; + { + Offset offset = { + p[0] * m_PatchUnitMesh[0] * m_MinBlock[0], + p[1] * m_PatchUnitMesh[1] * m_MinBlock[1], + p[2] * m_PatchUnitMesh[2] * m_MinBlock[2]}; - Extent count = { c[0] * m_PatchUnitMesh[0] * m_MinBlock[0], - c[1] * m_PatchUnitMesh[1] * m_MinBlock[1], - c[2] * m_PatchUnitMesh[2] * m_MinBlock[2] }; + Extent count = { + c[0] * m_PatchUnitMesh[0] * m_MinBlock[0], + c[1] * m_PatchUnitMesh[1] * m_MinBlock[1], + c[2] * m_PatchUnitMesh[2] * m_MinBlock[2]}; - m_InRankMeshLayout.emplace_back(offset, count); + m_InRankMeshLayout.emplace_back(offset, count); - auto pCount = countMe(count) * m_Input.m_Ratio; - m_InRankParticleLayout.emplace_back(pOff, pCount); - } + auto pCount = countMe(count) * m_Input.m_Ratio; + m_InRankParticleLayout.emplace_back(pOff, pCount); + } else - { - Offset unitOffset = { p[0] * m_PatchUnitMesh[0], p[1] * m_PatchUnitMesh[1], p[2] * m_PatchUnitMesh[2] }; - Extent unitExtent = { c[0] * m_PatchUnitMesh[0], c[1] * m_PatchUnitMesh[1], c[2] * m_PatchUnitMesh[2] }; - - auto counter = pOff; - - for ( unsigned long i=0; i. */ -#include #include +#include #include +#include +#include #include +#include #include -#include -#include -#include -#include #include -#include +#include #include +#include #if openPMD_HAVE_ADIOS2 -# include +#include #endif using std::cout; using namespace openPMD; - /** The Memory profiler class for profiling purpose * * Simple Memory usage report that works on linux system */ -static std::chrono::time_point m_ProgStart = std::chrono::system_clock::now(); +static std::chrono::time_point m_ProgStart = + std::chrono::system_clock::now(); class MemoryProfiler { @@ -56,56 +56,60 @@ class MemoryProfiler * @param[in] rank MPI rank * @param[in] tag item name to measure */ - MemoryProfiler(int rank, const std::string& tag) { - m_Rank = rank; + MemoryProfiler(int rank, const std::string &tag) + { + m_Rank = rank; #if defined(__linux) - //m_Name = "/proc/meminfo"; - m_Name = "/proc/self/status"; - Display(tag); + // m_Name = "/proc/meminfo"; + m_Name = "/proc/self/status"; + Display(tag); #else - (void)tag; - m_Name = ""; + (void)tag; + m_Name = ""; #endif - } + } /** * - * Read from /proc/self/status and display the Virtual Memory info at rank 0 on console + * Read from /proc/self/status and display the Virtual Memory info at rank 0 + * on console * * @param tag item name to measure * @param rank MPI rank */ - void Display(const std::string& tag){ - if (0 == m_Name.size()) - return; + void Display(const std::string &tag) + { + if (0 == m_Name.size()) + return; - if (m_Rank > 0) - return; + if (m_Rank > 0) + return; - std::cout<<" memory at: "<( m_End - m_Start ).count(); - double secs = millis/1000.0; - if( m_Rank > 0 ) - return; + double millis = std::chrono::duration_cast( + m_End - m_Start) + .count(); + double secs = millis / 1000.0; + if (m_Rank > 0) + return; std::cout << " [" << m_Tag << "] took:" << secs << " seconds.\n"; - std::cout <<" \t From ProgStart in seconds "<< - std::chrono::duration_cast(m_End - m_ProgStart).count()/1000.0<( + m_End - m_ProgStart) + .count() / + 1000.0 + << std::endl; + + std::cout << std::endl; } + private: std::chrono::time_point m_Start; std::chrono::time_point m_End; @@ -153,7 +165,6 @@ class Timer int m_Rank = 0; }; - /** createData * generate a shared ptr of given size with given type & default value * @@ -163,38 +174,40 @@ class Timer * */ -template -std::shared_ptr< T > createData(const unsigned long& size, const T& val, bool increment=false) - { - auto E = std::shared_ptr< T > { - new T[size], []( T * d ) {delete[] d;} - }; - - for(unsigned long i = 0ul; i < size; i++ ) - { - if (increment) - E.get()[i] = val+i; - else - E.get()[i] = val; - } +template +std::shared_ptr +createData(const unsigned long &size, const T &val, bool increment = false) +{ + auto E = std::shared_ptr{new T[size], [](T *d) { delete[] d; }}; + + for (unsigned long i = 0ul; i < size; i++) + { + if (increment) + E.get()[i] = val + i; + else + E.get()[i] = val; + } return E; - } +} /** Find supported backends * (looking for ADIOS2 or H5) * */ -std::vector getBackends() { +std::vector getBackends() +{ std::vector res; #if openPMD_HAVE_ADIOS2 - if( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "NOT_SET" ) != "ADIOS1" ) + if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") != "ADIOS1") res.emplace_back(".bp"); - if( auxiliary::getEnvString( "OPENPMD_BENCHMARK_USE_BACKEND", "NOT_SET" ) == "ADIOS" ) + if (auxiliary::getEnvString("OPENPMD_BENCHMARK_USE_BACKEND", "NOT_SET") == + "ADIOS") return res; #endif #if openPMD_HAVE_HDF5 - if( auxiliary::getEnvString( "OPENPMD_BENCHMARK_USE_BACKEND", "NOT_SET" ) == "HDF5" ) + if (auxiliary::getEnvString("OPENPMD_BENCHMARK_USE_BACKEND", "NOT_SET") == + "HDF5") res.clear(); res.emplace_back(".h5"); #endif @@ -210,588 +223,633 @@ std::vector getBackends() { class TestInput { public: - TestInput() = default; - - /* - * Run the read tests - * assumes both GroupBased and fileBased series of this prefix exist. - * @ param prefix file prefix - * e.g. abc.bp (for group/variable based encoding) - * abc (for file based encoding) - * - */ - void run(const std::string& prefix) - { - if (prefix.find(m_Backend) == std::string::npos) { - // file based, default to %07T - std::ostringstream s; - s << prefix << "_%07T" << m_Backend; - std::string filename = s.str(); - read(filename); - } else { - // group or variable based, or filebased with fullname - read(prefix); - } - } // run - - - /* - * read a file - * - * @param filename - * - */ - void - read(const std::string& filename) - { - try { - std::string tag = "Reading: "+filename ; - Timer kk(tag, m_MPIRank); - Series series = Series(filename, Access::READ_ONLY, MPI_COMM_WORLD); - - int numIterations = series.iterations.size(); - - if ( 0 == m_MPIRank ) - { - std::cout << " "< (unsigned long)m_MPISize) || + ((unsigned long)m_MPISize % (grid[0] * grid[1]) != 0)) + { + if (0 == m_MPIRank) + std::cerr << " please check the grid decompisition. need to " + "fit given mpi size:" + << m_MPISize << std::endl; + return; + } + + if ((meshExtent[0] % grid[0] != 0) || (meshExtent[1] % grid[1] != 0)) + { + if (0 == m_MPIRank) + std::cerr + << " Not able to divide rho mesh by specified grid on X-Y: " + << grid[0] << "*" << grid[1] << std::endl; + return; + } - if ( grid[0] * grid[1] == 0 ) return; + Extent count(meshExtent.size(), 1); + count[0] = meshExtent[0] / grid[0]; + count[1] = meshExtent[1] / grid[1]; - if ( (grid[0] * grid[1] > (unsigned long) m_MPISize) || ((unsigned long)m_MPISize % (grid[0]*grid[1]) != 0) ) - { - if ( 0 == m_MPIRank ) - std::cerr<<" please check the grid decompisition. need to fit given mpi size:"<= 0; i--) + { + offset[i] = m % grid[i]; + m = (m - offset[i]) / grid[i]; + } + + for (unsigned int i = 0; i < grid.size(); i++) + offset[i] *= count[i]; + + auto slice_data = rho.loadChunk(offset, count); + series.flush(); } - if ( (meshExtent[0] % grid[0] != 0) || (meshExtent[1] % grid[1] != 0) ) + /* + * Read a block on a mesh. + * Chooses block according to 3 digit m_Pattern input: FDP: + * F = fraction (block will be 1/F along a dimension) + * D = blocks grows with this dimenstion among all ranks. + * Invalid D means only rank 0 will read a block + * P = when only rank 0 is active, pick where the block will locate: + * center(0), top left(1), bottom right(2) + * + * @param series input + * @param rho a mesh + * + */ + void block(Series &series, MeshRecordComponent &rho) { - if ( 0 == m_MPIRank ) - std::cerr<<" Not able to divide rho mesh by specified grid on X-Y: "<< grid[0] <<"*"<< grid[1] <= 10000) + return; // full scan - if ( meshExtent.size() == 3 ) - { - grid[2] = m_MPISize / (grid[0]*grid[1]) ; - count[2] = meshExtent[2]/grid[2]; - } + unsigned int alongDim = m_Pattern / 10 % 10; - unsigned long c=1; - for (unsigned long i : grid) { - c = c*i; - } + unsigned int fractionOnDim = m_Pattern / 100; - if ( c != (unsigned long) m_MPISize ) - { - if ( 0 == m_MPIRank ) - std::cerr<<" Not able to divide full scan according to input. "<=0; i-- ) - { - offset[i] = m % grid[i]; - m = (m - offset[i])/grid[i]; - } - - for (unsigned int i=0; i(offset, count); - series.flush(); - } - - /* - * Read a block on a mesh. - * Chooses block according to 3 digit m_Pattern input: FDP: - * F = fraction (block will be 1/F along a dimension) - * D = blocks grows with this dimenstion among all ranks. - * Invalid D means only rank 0 will read a block - * P = when only rank 0 is active, pick where the block will locate: - * center(0), top left(1), bottom right(2) - * - * @param series input - * @param rho a mesh - * - */ - void - block(Series& series, MeshRecordComponent& rho) - { - if (m_Pattern < 100) return; // slicer - - if (m_Pattern >= 10000) return; // full scan - - unsigned int alongDim = m_Pattern/10 % 10; - - unsigned int fractionOnDim = m_Pattern/100; - - Extent meshExtent = rho.getExtent(); - for (unsigned long i : meshExtent) - { - unsigned long blob = i/fractionOnDim; - if ( 0 == blob ) { - if ( m_MPIRank == 0 ) - std::cout<<"Unable to use franction:"< 1) ); - bool atBottomRight = ( (m_Pattern % 10 == 2) && (fractionOnDim > 1) ); - bool overlay = ( (m_Pattern % 10 == 3) && (fractionOnDim > 1) ); - - bool rankZeroOnly = ( alongDim == 4); - bool diagnalBlocks = ( alongDim > meshExtent.size() ) && !rankZeroOnly; - - std::ostringstream s; - s <<" Block retrieval fraction=1/"< 1)); + bool atBottomRight = ((m_Pattern % 10 == 2) && (fractionOnDim > 1)); + bool overlay = ((m_Pattern % 10 == 3) && (fractionOnDim > 1)); + + bool rankZeroOnly = (alongDim == 4); + bool diagnalBlocks = (alongDim > meshExtent.size()) && !rankZeroOnly; + + std::ostringstream s; + s << " Block retrieval fraction=1/" << fractionOnDim; + + if (rankZeroOnly) { - if ( atTopLeft ) - off[i] = 0; // top corner - else if ( atBottomRight ) - off[i] = (meshExtent[i]-blob); // bottom corner - else if (atCenter) - off[i] = (fractionOnDim/2) * blob; // middle corner + s << " rank 0 only, location:"; + if (atCenter) + s << " center "; + else if (atTopLeft) + s << " topleft "; + else if (atBottomRight) + s << " bottomRight "; else if (overlay) - off[i] = (fractionOnDim/2) * blob - blob/3; // near middle corner + s << " near center "; + } + else if (diagnalBlocks) + s << " blockStyle = diagnal"; + else + s << " blockStyle = alongDim" << alongDim; + + if (rankZeroOnly && m_MPIRank) + return; + Timer blockTime(s.str(), m_MPIRank); + + Offset off(meshExtent.size(), 0); + Extent ext(meshExtent.size(), 1); + + for (unsigned int i = 0; i < meshExtent.size(); i++) + { + unsigned long blob = meshExtent[i] / fractionOnDim; + ext[i] = blob; + + if (rankZeroOnly) + { + if (atTopLeft) + off[i] = 0; // top corner + else if (atBottomRight) + off[i] = (meshExtent[i] - blob); // bottom corner + else if (atCenter) + off[i] = (fractionOnDim / 2) * blob; // middle corner + else if (overlay) + off[i] = (fractionOnDim / 2) * blob - + blob / 3; // near middle corner + } + else + { + off[i] = m_MPIRank * blob; + + if (!diagnalBlocks) + if (i != alongDim) + off[i] = (fractionOnDim / 2) * blob; // middle corner + } } + + auto prettyLambda = [&](Offset oo, Extent cc) { + std::ostringstream o; + o << "[ "; + std::ostringstream c; + c << "[ "; + for (unsigned int k = 0; k < oo.size(); k++) + { + o << oo[k] << " "; + c << cc[k] << " "; + } + std::cout << o.str() << "] + " << c.str() << "]" << std::endl; + ; + }; + + if ((unsigned int)m_MPIRank < fractionOnDim) + { + auto slice_data = rho.loadChunk(off, ext); + series.flush(); + + std::cout << " Rank: " << m_MPIRank; + + prettyLambda(off, ext); + } + } + + /* + * read a slice on a mesh + * + * @param series input + * @param rho a mesh + * @param rankZeroOnly only read on rank 0. Other ranks idle + * + */ + bool getSlice( + Extent meshExtent, + unsigned int whichDim, + bool rankZeroOnly, + Offset &off, + Extent &ext, + std::ostringstream &s) + { + if (rankZeroOnly && m_MPIRank) + return false; + + if (!rankZeroOnly && (m_MPISize == 1)) // rankZero has to be on + // return false; + rankZeroOnly = true; + + // if ( whichDim < 0 ) return false; + + if (whichDim >= meshExtent.size()) + return false; + + // std::ostringstream s; + if (whichDim == 0) + s << "Row slice time: "; + else if (whichDim == 1) + s << "Col slice time: "; else + s << "Z slice time: "; + if (rankZeroOnly) + s << " rank 0 only"; + + off[whichDim] = m_MPIRank % meshExtent[whichDim]; + for (unsigned int i = 0; i < meshExtent.size(); i++) { - off[i] = m_MPIRank * blob; + if (1 == meshExtent.size()) + whichDim = 100; + if (i != whichDim) + ext[i] = meshExtent[i]; + } - if ( !diagnalBlocks ) - if ( i != alongDim ) - off[i] = (fractionOnDim/2) * blob; // middle corner + std::ostringstream so, sc; + so << " Rank: " << m_MPIRank << " offset [ "; + sc << " count[ "; + for (unsigned int i = 0; i < meshExtent.size(); i++) + { + so << off[i] << " "; + sc << ext[i] << " "; } - } - - auto prettyLambda = [&](Offset oo, Extent cc) { - std::ostringstream o; o<<"[ "; - std::ostringstream c; c<<"[ "; - for (unsigned int k=0; k(off, ext); series.flush(); + } + + /* + * Handles 3D mesh read + * @param series openPMD series + * @param rho a mesh + */ + void sliceMe(Series &series, MeshRecordComponent &rho) + { + if (m_Pattern >= 100) + return; + + if ((m_Pattern % 10 != 3) && (m_Pattern % 10 != 5)) + return; - std::cout << " Rank: " << m_MPIRank; - - prettyLambda(off,ext); - } - } - - - /* - * read a slice on a mesh - * - * @param series input - * @param rho a mesh - * @param rankZeroOnly only read on rank 0. Other ranks idle - * - */ - bool - getSlice(Extent meshExtent, unsigned int whichDim, bool rankZeroOnly, - Offset& off, Extent& ext, std::ostringstream& s) - { - if ( rankZeroOnly && m_MPIRank ) - return false; - - if ( !rankZeroOnly && (m_MPISize == 1) ) // rankZero has to be on - //return false; - rankZeroOnly = true; - - //if ( whichDim < 0 ) return false; - - if ( whichDim >= meshExtent.size() ) return false; - - //std::ostringstream s; - if ( whichDim == 0 ) - s << "Row slice time: "; - else if ( whichDim == 1 ) - s << "Col slice time: "; - else - s << "Z slice time: "; - if ( rankZeroOnly ) - s <<" rank 0 only"; - - - off[whichDim] = m_MPIRank % meshExtent[whichDim]; - for ( unsigned int i=0; i(off, ext); - series.flush(); - } - - /* - * Handles 3D mesh read - * @param series openPMD series - * @param rho a mesh - */ - void - sliceMe( Series& series, MeshRecordComponent& rho ) - { - if ( m_Pattern >= 100 ) - return; - - if ( ( m_Pattern % 10 != 3 ) && ( m_Pattern % 10 != 5 ) ) - return; - - bool rankZeroOnly = true; - - if ( m_Pattern % 10 == 5 ) - rankZeroOnly = false; - - unsigned int whichDim = (m_Pattern/10 % 10); // second digit - - slice(series, rho, whichDim, rankZeroOnly); - } - - - /* - * Handles 3D mesh read of magnetic field - * @param series openPMD series - */ - void - sliceField(Series& series, IndexedIteration& iter) - { - if ( m_Pattern >= 100 ) - return; - - if ( ( m_Pattern % 10 != 3 ) && ( m_Pattern % 10 != 5 ) ) - return; - - bool rankZeroOnly = true; - - if ( m_Pattern % 10 == 5 ) - rankZeroOnly = false; - - int whichDim = (m_Pattern/10 % 10); // second digit - - if (whichDim < 5) - return; - whichDim -= 5; - - MeshRecordComponent bx = iter.meshes["B"]["x"]; - Extent meshExtent = bx.getExtent(); - - if ( bx.getExtent().size() != 3) { - if (m_MPIRank == 0) - std::cerr<<" Field needs to be on 3D mesh. "<(off, ext); - auto by_data = by.loadChunk(off, ext); - auto bz_data = bz.loadChunk(off, ext); - - series.flush(); - - } - - /* - * Read an iteration step, mesh & particles - * - * @param Series openPMD series - * @param iter iteration (actual iteration step may not equal to ts) - * @param ts timestep - * - */ - void - readStep( Series& series, IndexedIteration& iter, int ts ) - { - std::string comp_name = openPMD::MeshRecordComponent::SCALAR; - - MeshRecordComponent rho = iter.meshes["rho"][comp_name]; - Extent meshExtent = rho.getExtent(); - - if ( 0 == m_MPIRank ) - { - std::cout << "===> rho meshExtent : ts=" << ts << " ["; - for (unsigned long i : meshExtent) - std::cout< currPatterns; - if (m_Pattern > 0) - currPatterns.push_back(m_Pattern); - else - currPatterns.insert(currPatterns.end(), { 1, 5, 15, 25, 55, 65, 75, 440, 441, 442, 443, 7 }); + bool rankZeroOnly = true; - for(int i : currPatterns) { - m_Pattern = i; - sliceMe(series, rho); - block(series, rho); - fullscan(series, rho); + if (m_Pattern % 10 == 5) + rankZeroOnly = false; - sliceField(series, iter); + unsigned int whichDim = (m_Pattern / 10 % 10); // second digit - sliceParticles(series, iter); + slice(series, rho, whichDim, rankZeroOnly); } - if (currPatterns.size() > 1) - m_Pattern = 0; - } - - /* - * Read a slice of id of the first particle - * - * @param series openPMD Series - * @param iter current iteration - * - */ - void sliceParticles(Series& series, IndexedIteration& iter) - { - // read id of the first particle found - if ( m_Pattern != 7 ) - return; - - if ( 0 == iter.particles.size() ) + + /* + * Handles 3D mesh read of magnetic field + * @param series openPMD series + */ + void sliceField(Series &series, IndexedIteration &iter) { - if ( 0 == m_MPIRank ) - std::cerr << " No Particles found. Skipping particle slicing. " << std::endl; - return; + if (m_Pattern >= 100) + return; + + if ((m_Pattern % 10 != 3) && (m_Pattern % 10 != 5)) + return; + + bool rankZeroOnly = true; + + if (m_Pattern % 10 == 5) + rankZeroOnly = false; + + int whichDim = (m_Pattern / 10 % 10); // second digit + + if (whichDim < 5) + return; + whichDim -= 5; + + MeshRecordComponent bx = iter.meshes["B"]["x"]; + Extent meshExtent = bx.getExtent(); + + if (bx.getExtent().size() != 3) + { + if (m_MPIRank == 0) + std::cerr << " Field needs to be on 3D mesh. " << std::endl; + return; + } + + MeshRecordComponent by = iter.meshes["B"]["y"]; + MeshRecordComponent bz = iter.meshes["B"]["z"]; + + Offset off(meshExtent.size(), 0); + Extent ext(meshExtent.size(), 1); + + std::ostringstream s; + s << " Electric Field slice: "; + if (!getSlice(meshExtent, whichDim, rankZeroOnly, off, ext, s)) + return; + + Timer sliceTime(s.str(), m_MPIRank); + auto bx_data = bx.loadChunk(off, ext); + auto by_data = by.loadChunk(off, ext); + auto bz_data = bz.loadChunk(off, ext); + + series.flush(); } - openPMD::ParticleSpecies p = iter.particles.begin()->second; - RecordComponent idVal = p["id"][RecordComponent::SCALAR]; + /* + * Read an iteration step, mesh & particles + * + * @param Series openPMD series + * @param iter iteration (actual iteration step may not equal to + * ts) + * @param ts timestep + * + */ + void readStep(Series &series, IndexedIteration &iter, int ts) + { + std::string comp_name = openPMD::MeshRecordComponent::SCALAR; - Extent pExtent = idVal.getExtent(); + MeshRecordComponent rho = iter.meshes["rho"][comp_name]; + Extent meshExtent = rho.getExtent(); - auto blob = pExtent[0]/(10*m_MPISize); - if (0 == blob) - return; + if (0 == m_MPIRank) + { + std::cout << "===> rho meshExtent : ts=" << ts << " ["; + for (unsigned long i : meshExtent) + std::cout << i << " "; + std::cout << "]" << std::endl; + } - auto start = pExtent[0]/4; + std::vector currPatterns; + if (m_Pattern > 0) + currPatterns.push_back(m_Pattern); + else + currPatterns.insert( + currPatterns.end(), + {1, 5, 15, 25, 55, 65, 75, 440, 441, 442, 443, 7}); - if (m_MPIRank > 0) - return; + for (int i : currPatterns) + { + m_Pattern = i; + sliceMe(series, rho); + block(series, rho); + fullscan(series, rho); - std::ostringstream s; - s << "particle retrievel time, ["< 1) + m_Pattern = 0; + } - Offset colOff = {m_MPIRank*blob}; - Extent colExt = {blob}; - auto col_data = idVal.loadChunk(colOff, colExt); - series.flush(); - } + /* + * Read a slice of id of the first particle + * + * @param series openPMD Series + * @param iter current iteration + * + */ + void sliceParticles(Series &series, IndexedIteration &iter) + { + // read id of the first particle found + if (m_Pattern != 7) + return; + if (0 == iter.particles.size()) + { + if (0 == m_MPIRank) + std::cerr << " No Particles found. Skipping particle slicing. " + << std::endl; + return; + } - int m_MPISize = 1; - int m_MPIRank = 0; + openPMD::ParticleSpecies p = iter.particles.begin()->second; + RecordComponent idVal = p["id"][RecordComponent::SCALAR]; - unsigned int m_Pattern = 30; - std::string m_Backend = ".bp"; + Extent pExtent = idVal.getExtent(); - //std::vector> m_InRankDistribution; -}; // class TestInput + auto blob = pExtent[0] / (10 * m_MPISize); + if (0 == blob) + return; + + auto start = pExtent[0] / 4; + + if (m_MPIRank > 0) + return; + + std::ostringstream s; + s << "particle retrievel time, [" << start << " + " + << (blob * m_MPISize) << "] "; + Timer colTime(s.str(), m_MPIRank); + Offset colOff = {m_MPIRank * blob}; + Extent colExt = {blob}; + auto col_data = idVal.loadChunk(colOff, colExt); + series.flush(); + } + + int m_MPISize = 1; + int m_MPIRank = 0; + + unsigned int m_Pattern = 30; + std::string m_Backend = ".bp"; + + // std::vector> + // m_InRankDistribution; +}; // class TestInput /** TEST MAIN * * description of runtime options/flags */ -int -main( int argc, char *argv[] ) +int main(int argc, char *argv[]) { - MPI_Init( &argc, &argv ); + MPI_Init(&argc, &argv); TestInput input; - MPI_Comm_size( MPI_COMM_WORLD, &input.m_MPISize ); - MPI_Comm_rank( MPI_COMM_WORLD, &input.m_MPIRank ); - - if (argc < 3) { - if (input.m_MPIRank == 0) - std::cout<<"Usage: "< - namespace openPMD { /** @@ -41,10 +40,9 @@ struct ChunkInfo * If rank is smaller than zero, will be converted to zero. */ explicit ChunkInfo() = default; - ChunkInfo( Offset, Extent ); + ChunkInfo(Offset, Extent); - bool - operator==( ChunkInfo const & other ) const; + bool operator==(ChunkInfo const &other) const; }; /** @@ -68,12 +66,11 @@ struct WrittenChunkInfo : ChunkInfo /* * If rank is smaller than zero, will be converted to zero. */ - WrittenChunkInfo( Offset, Extent, int sourceID ); - WrittenChunkInfo( Offset, Extent ); + WrittenChunkInfo(Offset, Extent, int sourceID); + WrittenChunkInfo(Offset, Extent); - bool - operator==( WrittenChunkInfo const & other ) const; + bool operator==(WrittenChunkInfo const &other) const; }; -using ChunkTable = std::vector< WrittenChunkInfo >; +using ChunkTable = std::vector; } // namespace openPMD diff --git a/include/openPMD/Dataset.hpp b/include/openPMD/Dataset.hpp index bfe41d6b63..8757a3cf0a 100644 --- a/include/openPMD/Dataset.hpp +++ b/include/openPMD/Dataset.hpp @@ -23,15 +23,14 @@ #include "openPMD/Datatype.hpp" #include +#include #include #include -#include - namespace openPMD { -using Extent = std::vector< std::uint64_t >; -using Offset = std::vector< std::uint64_t >; +using Extent = std::vector; +using Offset = std::vector; class Dataset { @@ -46,9 +45,9 @@ class Dataset * Helpful for resizing datasets, since datatypes need not be given twice. * */ - Dataset( Extent ); + Dataset(Extent); - Dataset& extend(Extent newExtent); + Dataset &extend(Extent newExtent); Extent extent; Datatype dtype; diff --git a/include/openPMD/Datatype.hpp b/include/openPMD/Datatype.hpp index 530bc94253..66133881c8 100644 --- a/include/openPMD/Datatype.hpp +++ b/include/openPMD/Datatype.hpp @@ -41,11 +41,22 @@ namespace openPMD */ enum class Datatype : int { - CHAR, UCHAR, // SCHAR, - SHORT, INT, LONG, LONGLONG, - USHORT, UINT, ULONG, ULONGLONG, - FLOAT, DOUBLE, LONG_DOUBLE, - CFLOAT, CDOUBLE, CLONG_DOUBLE, + CHAR, + UCHAR, // SCHAR, + SHORT, + INT, + LONG, + LONGLONG, + USHORT, + UINT, + ULONG, + ULONGLONG, + FLOAT, + DOUBLE, + LONG_DOUBLE, + CFLOAT, + CDOUBLE, + CLONG_DOUBLE, STRING, VEC_CHAR, VEC_SHORT, @@ -76,7 +87,7 @@ enum class Datatype : int * listed in order in a vector. * */ -extern std::vector< Datatype > openPMD_Datatypes; +extern std::vector openPMD_Datatypes; /** @brief Fundamental equivalence check for two given types T and U. * @@ -87,125 +98,322 @@ extern std::vector< Datatype > openPMD_Datatypes; * @tparam T first type * @tparam U second type */ -template< - typename T, - typename U -> -struct decay_equiv : - std::is_same< - typename std::remove_pointer< - typename std::remove_cv< - typename std::decay< - typename std::remove_all_extents< T >::type - >::type - >::type - >::type, - typename std::remove_pointer< - typename std::remove_cv< - typename std::decay< - typename std::remove_all_extents< U >::type - >::type - >::type - >::type - >::type -{ }; - -template< - typename T, - typename U -> -constexpr bool decay_equiv_v = decay_equiv< T, U >::value; - -template< typename T > -inline -constexpr -Datatype -determineDatatype() +template +struct decay_equiv + : std::is_same< + typename std::remove_pointer::type>:: + type>::type>::type, + typename std::remove_pointer::type>:: + type>::type>::type>::type +{}; + +template +constexpr bool decay_equiv_v = decay_equiv::value; + +template +inline constexpr Datatype determineDatatype() { using DT = Datatype; - if( decay_equiv< T, char >::value ){ return DT::CHAR; } - else if( decay_equiv< T, unsigned char >::value ){ return DT::UCHAR; } - else if( decay_equiv< T, short >::value ){ return DT::SHORT; } - else if( decay_equiv< T, int >::value ){ return DT::INT; } - else if( decay_equiv< T, long >::value ){ return DT::LONG; } - else if( decay_equiv< T, long long >::value ){ return DT::LONGLONG; } - else if( decay_equiv< T, unsigned short >::value ){ return DT::USHORT; } - else if( decay_equiv< T, unsigned int >::value ){ return DT::UINT; } - else if( decay_equiv< T, unsigned long >::value ){ return DT::ULONG; } - else if( decay_equiv< T, unsigned long long >::value ){ return DT::ULONGLONG; } - else if( decay_equiv< T, float >::value ){ return DT::FLOAT; } - else if( decay_equiv< T, double >::value ){ return DT::DOUBLE; } - else if( decay_equiv< T, long double >::value ){ return DT::LONG_DOUBLE; } - else if( decay_equiv< T, std::complex< float > >::value ){ return DT::CFLOAT; } - else if( decay_equiv< T, std::complex< double > >::value ){ return DT::CDOUBLE; } - else if( decay_equiv< T, std::complex< long double > >::value ){ return DT::CLONG_DOUBLE; } - else if( decay_equiv< T, std::string >::value ){ return DT::STRING; } - else if( decay_equiv< T, std::vector< char > >::value ){ return DT::VEC_CHAR; } - else if( decay_equiv< T, std::vector< short > >::value ){ return DT::VEC_SHORT; } - else if( decay_equiv< T, std::vector< int > >::value ){ return DT::VEC_INT; } - else if( decay_equiv< T, std::vector< long > >::value ){ return DT::VEC_LONG; } - else if( decay_equiv< T, std::vector< long long > >::value ){ return DT::VEC_LONGLONG; } - else if( decay_equiv< T, std::vector< unsigned char > >::value ){ return DT::VEC_UCHAR; } - else if( decay_equiv< T, std::vector< unsigned short > >::value ){ return DT::VEC_USHORT; } - else if( decay_equiv< T, std::vector< unsigned int > >::value ){ return DT::VEC_UINT; } - else if( decay_equiv< T, std::vector< unsigned long > >::value ){ return DT::VEC_ULONG; } - else if( decay_equiv< T, std::vector< unsigned long long > >::value ){ return DT::VEC_ULONGLONG; } - else if( decay_equiv< T, std::vector< float > >::value ){ return DT::VEC_FLOAT; } - else if( decay_equiv< T, std::vector< double > >::value ){ return DT::VEC_DOUBLE; } - else if( decay_equiv< T, std::vector< long double > >::value ){ return DT::VEC_LONG_DOUBLE; } - else if( decay_equiv< T, std::vector< std::complex< float > > >::value ){ return DT::VEC_CFLOAT; } - else if( decay_equiv< T, std::vector< std::complex< double > > >::value ){ return DT::VEC_CDOUBLE; } - else if( decay_equiv< T, std::vector< std::complex< long double > > >::value ){ return DT::VEC_CLONG_DOUBLE; } - else if( decay_equiv< T, std::vector< std::string > >::value ){ return DT::VEC_STRING; } - else if( decay_equiv< T, std::array< double, 7 > >::value ){ return DT::ARR_DBL_7; } - else if( decay_equiv< T, bool >::value ){ return DT::BOOL; } - else return Datatype::UNDEFINED; + if (decay_equiv::value) + { + return DT::CHAR; + } + else if (decay_equiv::value) + { + return DT::UCHAR; + } + else if (decay_equiv::value) + { + return DT::SHORT; + } + else if (decay_equiv::value) + { + return DT::INT; + } + else if (decay_equiv::value) + { + return DT::LONG; + } + else if (decay_equiv::value) + { + return DT::LONGLONG; + } + else if (decay_equiv::value) + { + return DT::USHORT; + } + else if (decay_equiv::value) + { + return DT::UINT; + } + else if (decay_equiv::value) + { + return DT::ULONG; + } + else if (decay_equiv::value) + { + return DT::ULONGLONG; + } + else if (decay_equiv::value) + { + return DT::FLOAT; + } + else if (decay_equiv::value) + { + return DT::DOUBLE; + } + else if (decay_equiv::value) + { + return DT::LONG_DOUBLE; + } + else if (decay_equiv>::value) + { + return DT::CFLOAT; + } + else if (decay_equiv>::value) + { + return DT::CDOUBLE; + } + else if (decay_equiv>::value) + { + return DT::CLONG_DOUBLE; + } + else if (decay_equiv::value) + { + return DT::STRING; + } + else if (decay_equiv>::value) + { + return DT::VEC_CHAR; + } + else if (decay_equiv>::value) + { + return DT::VEC_SHORT; + } + else if (decay_equiv>::value) + { + return DT::VEC_INT; + } + else if (decay_equiv>::value) + { + return DT::VEC_LONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_LONGLONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_UCHAR; + } + else if (decay_equiv>::value) + { + return DT::VEC_USHORT; + } + else if (decay_equiv>::value) + { + return DT::VEC_UINT; + } + else if (decay_equiv>::value) + { + return DT::VEC_ULONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_ULONGLONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_FLOAT; + } + else if (decay_equiv>::value) + { + return DT::VEC_DOUBLE; + } + else if (decay_equiv>::value) + { + return DT::VEC_LONG_DOUBLE; + } + else if (decay_equiv>>::value) + { + return DT::VEC_CFLOAT; + } + else if (decay_equiv>>::value) + { + return DT::VEC_CDOUBLE; + } + else if (decay_equiv>>::value) + { + return DT::VEC_CLONG_DOUBLE; + } + else if (decay_equiv>::value) + { + return DT::VEC_STRING; + } + else if (decay_equiv>::value) + { + return DT::ARR_DBL_7; + } + else if (decay_equiv::value) + { + return DT::BOOL; + } + else + return Datatype::UNDEFINED; } -template< typename T > -inline -constexpr -Datatype -determineDatatype(std::shared_ptr< T >) +template +inline constexpr Datatype determineDatatype(std::shared_ptr) { using DT = Datatype; - if( decay_equiv< T, char >::value ){ return DT::CHAR; } - else if( decay_equiv< T, unsigned char >::value ){ return DT::UCHAR; } - else if( decay_equiv< T, short >::value ){ return DT::SHORT; } - else if( decay_equiv< T, int >::value ){ return DT::INT; } - else if( decay_equiv< T, long >::value ){ return DT::LONG; } - else if( decay_equiv< T, long long >::value ){ return DT::LONGLONG; } - else if( decay_equiv< T, unsigned short >::value ){ return DT::USHORT; } - else if( decay_equiv< T, unsigned int >::value ){ return DT::UINT; } - else if( decay_equiv< T, unsigned long >::value ){ return DT::ULONG; } - else if( decay_equiv< T, unsigned long long >::value ){ return DT::ULONGLONG; } - else if( decay_equiv< T, float >::value ){ return DT::FLOAT; } - else if( decay_equiv< T, double >::value ){ return DT::DOUBLE; } - else if( decay_equiv< T, long double >::value ){ return DT::LONG_DOUBLE; } - else if( decay_equiv< T, std::complex< float > >::value ){ return DT::CFLOAT; } - else if( decay_equiv< T, std::complex< double > >::value ){ return DT::CDOUBLE; } - else if( decay_equiv< T, std::complex< long double > >::value ){ return DT::CLONG_DOUBLE; } - else if( decay_equiv< T, std::string >::value ){ return DT::STRING; } - else if( decay_equiv< T, std::vector< char > >::value ){ return DT::VEC_CHAR; } - else if( decay_equiv< T, std::vector< short > >::value ){ return DT::VEC_SHORT; } - else if( decay_equiv< T, std::vector< int > >::value ){ return DT::VEC_INT; } - else if( decay_equiv< T, std::vector< long > >::value ){ return DT::VEC_LONG; } - else if( decay_equiv< T, std::vector< long long > >::value ){ return DT::VEC_LONGLONG; } - else if( decay_equiv< T, std::vector< unsigned char > >::value ){ return DT::VEC_UCHAR; } - else if( decay_equiv< T, std::vector< unsigned short > >::value ){ return DT::VEC_USHORT; } - else if( decay_equiv< T, std::vector< unsigned int > >::value ){ return DT::VEC_UINT; } - else if( decay_equiv< T, std::vector< unsigned long > >::value ){ return DT::VEC_ULONG; } - else if( decay_equiv< T, std::vector< unsigned long long > >::value ){ return DT::VEC_ULONGLONG; } - else if( decay_equiv< T, std::vector< float > >::value ){ return DT::VEC_FLOAT; } - else if( decay_equiv< T, std::vector< double > >::value ){ return DT::VEC_DOUBLE; } - else if( decay_equiv< T, std::vector< long double > >::value ){ return DT::VEC_LONG_DOUBLE; } - else if( decay_equiv< T, std::vector< std::complex< float > > >::value ){ return DT::VEC_CFLOAT; } - else if( decay_equiv< T, std::vector< std::complex< double > > >::value ){ return DT::VEC_CDOUBLE; } - else if( decay_equiv< T, std::vector< std::complex< long double > > >::value ){ return DT::VEC_CLONG_DOUBLE; } - else if( decay_equiv< T, std::vector< std::string > >::value ){ return DT::VEC_STRING; } - else if( decay_equiv< T, std::array< double, 7 > >::value ){ return DT::ARR_DBL_7; } - else if( decay_equiv< T, bool >::value ){ return DT::BOOL; } - else return DT::UNDEFINED; + if (decay_equiv::value) + { + return DT::CHAR; + } + else if (decay_equiv::value) + { + return DT::UCHAR; + } + else if (decay_equiv::value) + { + return DT::SHORT; + } + else if (decay_equiv::value) + { + return DT::INT; + } + else if (decay_equiv::value) + { + return DT::LONG; + } + else if (decay_equiv::value) + { + return DT::LONGLONG; + } + else if (decay_equiv::value) + { + return DT::USHORT; + } + else if (decay_equiv::value) + { + return DT::UINT; + } + else if (decay_equiv::value) + { + return DT::ULONG; + } + else if (decay_equiv::value) + { + return DT::ULONGLONG; + } + else if (decay_equiv::value) + { + return DT::FLOAT; + } + else if (decay_equiv::value) + { + return DT::DOUBLE; + } + else if (decay_equiv::value) + { + return DT::LONG_DOUBLE; + } + else if (decay_equiv>::value) + { + return DT::CFLOAT; + } + else if (decay_equiv>::value) + { + return DT::CDOUBLE; + } + else if (decay_equiv>::value) + { + return DT::CLONG_DOUBLE; + } + else if (decay_equiv::value) + { + return DT::STRING; + } + else if (decay_equiv>::value) + { + return DT::VEC_CHAR; + } + else if (decay_equiv>::value) + { + return DT::VEC_SHORT; + } + else if (decay_equiv>::value) + { + return DT::VEC_INT; + } + else if (decay_equiv>::value) + { + return DT::VEC_LONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_LONGLONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_UCHAR; + } + else if (decay_equiv>::value) + { + return DT::VEC_USHORT; + } + else if (decay_equiv>::value) + { + return DT::VEC_UINT; + } + else if (decay_equiv>::value) + { + return DT::VEC_ULONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_ULONGLONG; + } + else if (decay_equiv>::value) + { + return DT::VEC_FLOAT; + } + else if (decay_equiv>::value) + { + return DT::VEC_DOUBLE; + } + else if (decay_equiv>::value) + { + return DT::VEC_LONG_DOUBLE; + } + else if (decay_equiv>>::value) + { + return DT::VEC_CFLOAT; + } + else if (decay_equiv>>::value) + { + return DT::VEC_CDOUBLE; + } + else if (decay_equiv>>::value) + { + return DT::VEC_CLONG_DOUBLE; + } + else if (decay_equiv>::value) + { + return DT::VEC_STRING; + } + else if (decay_equiv>::value) + { + return DT::ARR_DBL_7; + } + else if (decay_equiv::value) + { + return DT::BOOL; + } + else + return DT::UNDEFINED; } /** Return number of bytes representing a Datatype @@ -213,71 +421,70 @@ determineDatatype(std::shared_ptr< T >) * @param d Datatype * @return number of bytes */ -inline size_t -toBytes( Datatype d ) +inline size_t toBytes(Datatype d) { using DT = Datatype; - switch( d ) - { - case DT::CHAR: - case DT::VEC_CHAR: - case DT::STRING: - case DT::VEC_STRING: - return sizeof(char); - case DT::UCHAR: - case DT::VEC_UCHAR: - return sizeof(unsigned char); - // case DT::SCHAR: - // case DT::VEC_SCHAR: - // return sizeof(signed char); - case DT::SHORT: - case DT::VEC_SHORT: - return sizeof(short); - case DT::INT: - case DT::VEC_INT: - return sizeof(int); - case DT::LONG: - case DT::VEC_LONG: - return sizeof(long); - case DT::LONGLONG: - case DT::VEC_LONGLONG: - return sizeof(long long); - case DT::USHORT: - case DT::VEC_USHORT: - return sizeof(unsigned short); - case DT::UINT: - case DT::VEC_UINT: - return sizeof(unsigned int); - case DT::ULONG: - case DT::VEC_ULONG: - return sizeof(unsigned long); - case DT::ULONGLONG: - case DT::VEC_ULONGLONG: - return sizeof(unsigned long long); - case DT::FLOAT: - case DT::VEC_FLOAT: - return sizeof(float); - case DT::DOUBLE: - case DT::VEC_DOUBLE: - case DT::ARR_DBL_7: - return sizeof(double); - case DT::LONG_DOUBLE: - case DT::VEC_LONG_DOUBLE: - return sizeof(long double); - case DT::CFLOAT: - case DT::VEC_CFLOAT: - return sizeof(float) * 2; - case DT::CDOUBLE: - case DT::VEC_CDOUBLE: - return sizeof(double) * 2; - case DT::CLONG_DOUBLE: - case DT::VEC_CLONG_DOUBLE: - return sizeof(long double) * 2; - case DT::BOOL: - return sizeof(bool); - case DT::UNDEFINED: - default: - throw std::runtime_error("toBytes: Invalid datatype!"); + switch (d) + { + case DT::CHAR: + case DT::VEC_CHAR: + case DT::STRING: + case DT::VEC_STRING: + return sizeof(char); + case DT::UCHAR: + case DT::VEC_UCHAR: + return sizeof(unsigned char); + // case DT::SCHAR: + // case DT::VEC_SCHAR: + // return sizeof(signed char); + case DT::SHORT: + case DT::VEC_SHORT: + return sizeof(short); + case DT::INT: + case DT::VEC_INT: + return sizeof(int); + case DT::LONG: + case DT::VEC_LONG: + return sizeof(long); + case DT::LONGLONG: + case DT::VEC_LONGLONG: + return sizeof(long long); + case DT::USHORT: + case DT::VEC_USHORT: + return sizeof(unsigned short); + case DT::UINT: + case DT::VEC_UINT: + return sizeof(unsigned int); + case DT::ULONG: + case DT::VEC_ULONG: + return sizeof(unsigned long); + case DT::ULONGLONG: + case DT::VEC_ULONGLONG: + return sizeof(unsigned long long); + case DT::FLOAT: + case DT::VEC_FLOAT: + return sizeof(float); + case DT::DOUBLE: + case DT::VEC_DOUBLE: + case DT::ARR_DBL_7: + return sizeof(double); + case DT::LONG_DOUBLE: + case DT::VEC_LONG_DOUBLE: + return sizeof(long double); + case DT::CFLOAT: + case DT::VEC_CFLOAT: + return sizeof(float) * 2; + case DT::CDOUBLE: + case DT::VEC_CDOUBLE: + return sizeof(double) * 2; + case DT::CLONG_DOUBLE: + case DT::VEC_CLONG_DOUBLE: + return sizeof(long double) * 2; + case DT::BOOL: + return sizeof(bool); + case DT::UNDEFINED: + default: + throw std::runtime_error("toBytes: Invalid datatype!"); } } @@ -286,10 +493,9 @@ toBytes( Datatype d ) * @param d Datatype * @return number of bits */ -inline size_t -toBits( Datatype d ) +inline size_t toBits(Datatype d) { - return toBytes( d ) * CHAR_BIT; + return toBytes(d) * CHAR_BIT; } /** Compare if a Datatype is a vector type @@ -297,33 +503,32 @@ toBits( Datatype d ) * @param d Datatype to test * @return true if vector type, else false */ -inline bool -isVector( Datatype d ) +inline bool isVector(Datatype d) { using DT = Datatype; - switch( d ) - { - case DT::VEC_CHAR: - case DT::VEC_SHORT: - case DT::VEC_INT: - case DT::VEC_LONG: - case DT::VEC_LONGLONG: - case DT::VEC_UCHAR: - case DT::VEC_USHORT: - case DT::VEC_UINT: - case DT::VEC_ULONG: - case DT::VEC_ULONGLONG: - case DT::VEC_FLOAT: - case DT::VEC_DOUBLE: - case DT::VEC_LONG_DOUBLE: - case DT::VEC_CFLOAT: - case DT::VEC_CDOUBLE: - case DT::VEC_CLONG_DOUBLE: - case DT::VEC_STRING: - return true; - default: - return false; + switch (d) + { + case DT::VEC_CHAR: + case DT::VEC_SHORT: + case DT::VEC_INT: + case DT::VEC_LONG: + case DT::VEC_LONGLONG: + case DT::VEC_UCHAR: + case DT::VEC_USHORT: + case DT::VEC_UINT: + case DT::VEC_ULONG: + case DT::VEC_ULONGLONG: + case DT::VEC_FLOAT: + case DT::VEC_DOUBLE: + case DT::VEC_LONG_DOUBLE: + case DT::VEC_CFLOAT: + case DT::VEC_CDOUBLE: + case DT::VEC_CLONG_DOUBLE: + case DT::VEC_STRING: + return true; + default: + return false; } } @@ -334,23 +539,22 @@ isVector( Datatype d ) * @param d Datatype to test * @return true if floating point, otherwise false */ -inline bool -isFloatingPoint( Datatype d ) +inline bool isFloatingPoint(Datatype d) { using DT = Datatype; - switch( d ) + switch (d) { - case DT::FLOAT: - case DT::VEC_FLOAT: - case DT::DOUBLE: - case DT::VEC_DOUBLE: - case DT::LONG_DOUBLE: - case DT::VEC_LONG_DOUBLE: + case DT::FLOAT: + case DT::VEC_FLOAT: + case DT::DOUBLE: + case DT::VEC_DOUBLE: + case DT::LONG_DOUBLE: + case DT::VEC_LONG_DOUBLE: // note: complex floats are not std::is_floating_point - return true; - default: - return false; + return true; + default: + return false; } } @@ -361,22 +565,21 @@ isFloatingPoint( Datatype d ) * @param d Datatype to test * @return true if complex floating point, otherwise false */ -inline bool -isComplexFloatingPoint( Datatype d ) +inline bool isComplexFloatingPoint(Datatype d) { using DT = Datatype; - switch( d ) + switch (d) { - case DT::CFLOAT: - case DT::VEC_CFLOAT: - case DT::CDOUBLE: - case DT::VEC_CDOUBLE: - case DT::CLONG_DOUBLE: - case DT::VEC_CLONG_DOUBLE: - return true; - default: - return false; + case DT::CFLOAT: + case DT::VEC_CFLOAT: + case DT::CDOUBLE: + case DT::VEC_CDOUBLE: + case DT::CLONG_DOUBLE: + case DT::VEC_CLONG_DOUBLE: + return true; + default: + return false; } } @@ -387,13 +590,12 @@ isComplexFloatingPoint( Datatype d ) * @tparam T type to test * @return true if floating point, otherwise false */ -template< typename T > -inline bool -isFloatingPoint() +template +inline bool isFloatingPoint() { - Datatype dtype = determineDatatype< T >(); + Datatype dtype = determineDatatype(); - return isFloatingPoint( dtype ); + return isFloatingPoint(dtype); } /** Compare if a type is a complex floating point type @@ -403,11 +605,10 @@ isFloatingPoint() * @tparam T type to test * @return true if complex floating point, otherwise false */ -template< typename T > -inline bool -isComplexFloatingPoint() +template +inline bool isComplexFloatingPoint() { - Datatype dtype = determineDatatype< T >(); + Datatype dtype = determineDatatype(); return isComplexFloatingPoint(dtype); } @@ -420,33 +621,32 @@ isComplexFloatingPoint() * @param d Datatype to test * @return std::tuple with isInteger and isSigned result */ -inline std::tuple< bool, bool > -isInteger( Datatype d ) +inline std::tuple isInteger(Datatype d) { using DT = Datatype; - switch( d ) - { - case DT::SHORT: - case DT::VEC_SHORT: - case DT::INT: - case DT::VEC_INT: - case DT::LONG: - case DT::VEC_LONG: - case DT::LONGLONG: - case DT::VEC_LONGLONG: - return std::make_tuple( true, true ); - case DT::USHORT: - case DT::VEC_USHORT: - case DT::UINT: - case DT::VEC_UINT: - case DT::ULONG: - case DT::VEC_ULONG: - case DT::ULONGLONG: - case DT::VEC_ULONGLONG: - return std::make_tuple( true, false ); - default: - return std::make_tuple( false, false ); + switch (d) + { + case DT::SHORT: + case DT::VEC_SHORT: + case DT::INT: + case DT::VEC_INT: + case DT::LONG: + case DT::VEC_LONG: + case DT::LONGLONG: + case DT::VEC_LONGLONG: + return std::make_tuple(true, true); + case DT::USHORT: + case DT::VEC_USHORT: + case DT::UINT: + case DT::VEC_UINT: + case DT::ULONG: + case DT::VEC_ULONG: + case DT::ULONGLONG: + case DT::VEC_ULONGLONG: + return std::make_tuple(true, false); + default: + return std::make_tuple(false, false); } } @@ -458,13 +658,12 @@ isInteger( Datatype d ) * @tparam T type to test * @return std::tuple with isInteger and isSigned result */ -template< typename T > -inline std::tuple< bool, bool > -isInteger() +template +inline std::tuple isInteger() { - Datatype dtype = determineDatatype< T >(); + Datatype dtype = determineDatatype(); - return isInteger( dtype ); + return isInteger(dtype); } /** Compare if a Datatype is equivalent to a floating point type @@ -473,21 +672,16 @@ isInteger() * @param d Datatype to compare * @return true if both types are floating point and same bitness, else false */ -template< typename T_FP > -inline bool -isSameFloatingPoint( Datatype d ) +template +inline bool isSameFloatingPoint(Datatype d) { // template - bool tt_is_fp = isFloatingPoint< T_FP >(); + bool tt_is_fp = isFloatingPoint(); // Datatype - bool dt_is_fp = isFloatingPoint( d ); + bool dt_is_fp = isFloatingPoint(d); - if( - tt_is_fp && - dt_is_fp && - toBits( d ) == toBits( determineDatatype< T_FP >() ) - ) + if (tt_is_fp && dt_is_fp && toBits(d) == toBits(determineDatatype())) return true; else return false; @@ -497,23 +691,20 @@ isSameFloatingPoint( Datatype d ) * * @tparam T_CFP complex floating point type to compare * @param d Datatype to compare - * @return true if both types are complex floating point and same bitness, else false + * @return true if both types are complex floating point and same bitness, else + * false */ -template< typename T_CFP > -inline bool -isSameComplexFloatingPoint( Datatype d ) +template +inline bool isSameComplexFloatingPoint(Datatype d) { // template - bool tt_is_cfp = isComplexFloatingPoint< T_CFP >(); + bool tt_is_cfp = isComplexFloatingPoint(); // Datatype - bool dt_is_cfp = isComplexFloatingPoint( d ); + bool dt_is_cfp = isComplexFloatingPoint(d); - if( - tt_is_cfp && - dt_is_cfp && - toBits( d ) == toBits( determineDatatype< T_CFP >() ) - ) + if (tt_is_cfp && dt_is_cfp && + toBits(d) == toBits(determineDatatype())) return true; else return false; @@ -523,26 +714,22 @@ isSameComplexFloatingPoint( Datatype d ) * * @tparam T_Int signed or unsigned integer type to compare * @param d Datatype to compare - * @return true if both types are integers, same signed and same bitness, else false + * @return true if both types are integers, same signed and same bitness, else + * false */ -template< typename T_Int > -inline bool -isSameInteger( Datatype d ) +template +inline bool isSameInteger(Datatype d) { // template bool tt_is_int, tt_is_sig; - std::tie(tt_is_int, tt_is_sig) = isInteger< T_Int >(); + std::tie(tt_is_int, tt_is_sig) = isInteger(); // Datatype bool dt_is_int, dt_is_sig; - std::tie(dt_is_int, dt_is_sig) = isInteger( d ); - - if( - tt_is_int && - dt_is_int && - tt_is_sig == dt_is_sig && - toBits( d ) == toBits( determineDatatype< T_Int >() ) - ) + std::tie(dt_is_int, dt_is_sig) = isInteger(d); + + if (tt_is_int && dt_is_int && tt_is_sig == dt_is_sig && + toBits(d) == toBits(determineDatatype())) return true; else return false; @@ -554,84 +741,74 @@ isSameInteger( Datatype d ) * some platforms, e.g. if long and long long are the same or double and * long double will also return true. */ -inline bool -isSame( openPMD::Datatype const d, openPMD::Datatype const e ) +inline bool isSame(openPMD::Datatype const d, openPMD::Datatype const e) { // exact same type - if( static_cast(d) == static_cast(e) ) + if (static_cast(d) == static_cast(e)) return true; - bool d_is_vec = isVector( d ); - bool e_is_vec = isVector( e ); + bool d_is_vec = isVector(d); + bool e_is_vec = isVector(e); // same int bool d_is_int, d_is_sig; - std::tie(d_is_int, d_is_sig) = isInteger( d ); + std::tie(d_is_int, d_is_sig) = isInteger(d); bool e_is_int, e_is_sig; - std::tie(e_is_int, e_is_sig) = isInteger( e ); - if( - d_is_int && - e_is_int && - d_is_vec == e_is_vec && - d_is_sig == e_is_sig && - toBits( d ) == toBits( e ) - ) + std::tie(e_is_int, e_is_sig) = isInteger(e); + if (d_is_int && e_is_int && d_is_vec == e_is_vec && d_is_sig == e_is_sig && + toBits(d) == toBits(e)) return true; // same float - bool d_is_fp = isFloatingPoint( d ); - bool e_is_fp = isFloatingPoint( e ); - - if( - d_is_fp && - e_is_fp && - d_is_vec == e_is_vec && - toBits( d ) == toBits( e ) - ) + bool d_is_fp = isFloatingPoint(d); + bool e_is_fp = isFloatingPoint(e); + + if (d_is_fp && e_is_fp && d_is_vec == e_is_vec && toBits(d) == toBits(e)) return true; // same complex floating point bool d_is_cfp = isComplexFloatingPoint(d); bool e_is_cfp = isComplexFloatingPoint(e); - if( - d_is_cfp && - e_is_cfp && - d_is_vec == e_is_vec && - toBits( d ) == toBits( e ) - ) + if (d_is_cfp && e_is_cfp && d_is_vec == e_is_vec && toBits(d) == toBits(e)) return true; return false; } -namespace detail { - template - struct BasicDatatypeHelper { +namespace detail +{ + template + struct BasicDatatypeHelper + { Datatype m_dt = determineDatatype(); }; - template - struct BasicDatatypeHelper> { + template + struct BasicDatatypeHelper> + { Datatype m_dt = BasicDatatypeHelper{}.m_dt; }; - template - struct BasicDatatypeHelper> { + template + struct BasicDatatypeHelper> + { Datatype m_dt = BasicDatatypeHelper{}.m_dt; }; - struct BasicDatatype { + struct BasicDatatype + { template static Datatype call(); template static Datatype call(); }; -} +} // namespace detail /** - * @brief basicDatatype Strip openPMD Datatype of std::vector, std::array et. al. + * @brief basicDatatype Strip openPMD Datatype of std::vector, std::array et. + * al. * @param dt The "full" Datatype. * @return The "inner" Datatype. */ @@ -639,17 +816,13 @@ Datatype basicDatatype(Datatype dt); Datatype toVectorType(Datatype dt); -std::string datatypeToString( Datatype dt ); +std::string datatypeToString(Datatype dt); -Datatype stringToDatatype( std::string s ); +Datatype stringToDatatype(std::string s); -void -warnWrongDtype(std::string const& key, - Datatype store, - Datatype request); +void warnWrongDtype(std::string const &key, Datatype store, Datatype request); -std::ostream& -operator<<(std::ostream&, openPMD::Datatype const&); +std::ostream &operator<<(std::ostream &, openPMD::Datatype const &); } // namespace openPMD @@ -666,14 +839,12 @@ operator<<(std::ostream&, openPMD::Datatype const&); * * @{ */ -inline bool -operator==( openPMD::Datatype d, openPMD::Datatype e ) +inline bool operator==(openPMD::Datatype d, openPMD::Datatype e) { return openPMD::isSame(d, e); } -inline bool -operator!=( openPMD::Datatype d, openPMD::Datatype e ) +inline bool operator!=(openPMD::Datatype d, openPMD::Datatype e) { return !(d == e); } diff --git a/include/openPMD/DatatypeHelpers.hpp b/include/openPMD/DatatypeHelpers.hpp index 196e4075e0..c015e35c74 100644 --- a/include/openPMD/DatatypeHelpers.hpp +++ b/include/openPMD/DatatypeHelpers.hpp @@ -30,63 +30,58 @@ namespace openPMD { namespace detail { -// std::void_t is C++17 -template< typename > -using void_t = void; + // std::void_t is C++17 + template + using void_t = void; -/* - * Check whether class T has a member "errorMsg" convertible - * to type std::string. - * Used to give helpful compile-time error messages with static_assert - * down in CallUndefinedDatatype. - */ -template< typename T, typename = void > -struct HasErrorMessageMember -{ - static constexpr bool value = false; -}; + /* + * Check whether class T has a member "errorMsg" convertible + * to type std::string. + * Used to give helpful compile-time error messages with static_assert + * down in CallUndefinedDatatype. + */ + template + struct HasErrorMessageMember + { + static constexpr bool value = false; + }; -template< typename T > -struct HasErrorMessageMember< - T, - void_t< decltype( std::string( T::errorMsg ) ) > > -{ - static constexpr bool value = true; -}; + template + struct HasErrorMessageMember> + { + static constexpr bool value = true; + }; -/** - * Purpose of this struct is to detect at compile time whether - * Action::template operator()\<0\>() exists. If yes, call - * Action::template operator()\() with the passed arguments. - * If not, throw an error. - * - * @tparam n As in switchType(). - * @tparam ReturnType As in switchType(). - * @tparam Action As in switchType(). - * @tparam Args As in switchType(). - */ -template< - int n, - typename ReturnType, - typename Action, - typename... Args > -struct CallUndefinedDatatype -{ - static ReturnType call( Args &&... args ) + /** + * Purpose of this struct is to detect at compile time whether + * Action::template operator()\<0\>() exists. If yes, call + * Action::template operator()\() with the passed arguments. + * If not, throw an error. + * + * @tparam n As in switchType(). + * @tparam ReturnType As in switchType(). + * @tparam Action As in switchType(). + * @tparam Args As in switchType(). + */ + template + struct CallUndefinedDatatype { - if constexpr( HasErrorMessageMember< Action >::value ) - { - throw std::runtime_error( - "[" + std::string( Action::errorMsg ) + "] Unknown Datatype." ); - } - else + static ReturnType call(Args &&...args) { - return Action::template call< n >( std::forward< Args >( args )... ); + if constexpr (HasErrorMessageMember::value) + { + throw std::runtime_error( + "[" + std::string(Action::errorMsg) + + "] Unknown Datatype."); + } + else + { + return Action::template call(std::forward(args)...); + } + throw std::runtime_error("Unreachable!"); } - throw std::runtime_error( "Unreachable!" ); - } -}; -} + }; +} // namespace detail /** * Generalizes switching over an openPMD datatype. @@ -102,130 +97,119 @@ struct CallUndefinedDatatype * @return Passes on the result of invoking the function template with the given * arguments and with the template parameter specified by dt. */ -template< typename Action, typename... Args > -auto switchType( Datatype dt, Args &&... args ) - -> decltype( Action::template call< char >( - std::forward< Args >( args )... ) ) +template +auto switchType(Datatype dt, Args &&...args) + -> decltype(Action::template call(std::forward(args)...)) { - using ReturnType = decltype( Action::template call< char >( - std::forward< Args >( args )... ) ); - switch( dt ) + using ReturnType = + decltype(Action::template call(std::forward(args)...)); + switch (dt) { case Datatype::CHAR: - return Action::template call< char >( std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::UCHAR: - return Action::template call< unsigned char >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::SHORT: - return Action::template call< short >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::INT: - return Action::template call< int >( std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::LONG: - return Action::template call< long >( std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::LONGLONG: - return Action::template call< long long >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::USHORT: - return Action::template call< unsigned short >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::UINT: - return Action::template call< unsigned int >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::ULONG: - return Action::template call< unsigned long >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::ULONGLONG: - return Action::template call< unsigned long long >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::FLOAT: - return Action::template call< float >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::DOUBLE: - return Action::template call< double >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::LONG_DOUBLE: - return Action::template call< long double >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::CFLOAT: - return Action::template call< std::complex< float > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::CDOUBLE: - return Action::template call< std::complex< double > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::CLONG_DOUBLE: - return Action::template call< std::complex< long double > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::STRING: - return Action::template call< std::string >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::VEC_CHAR: - return Action::template call< std::vector< char > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::VEC_SHORT: - return Action::template call< std::vector< short > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::VEC_INT: - return Action::template call< std::vector< int > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::VEC_LONG: - return Action::template call< std::vector< long > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::VEC_LONGLONG: - return Action::template call< std::vector< long long > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::VEC_UCHAR: - return Action::template call< std::vector< unsigned char > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::VEC_USHORT: - return Action::template call< std::vector< unsigned short > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::VEC_UINT: - return Action::template call< std::vector< unsigned int > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::VEC_ULONG: - return Action::template call< std::vector< unsigned long > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::VEC_ULONGLONG: - return Action::template call< std::vector< unsigned long long > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::VEC_FLOAT: - return Action::template call< std::vector< float > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::VEC_DOUBLE: - return Action::template call< std::vector< double > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::VEC_LONG_DOUBLE: - return Action::template call< std::vector< long double > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::VEC_CFLOAT: - return Action::template call< std::vector< std::complex< float > > >( - std::forward< Args >( args )... ); + return Action::template call>>( + std::forward(args)...); case Datatype::VEC_CDOUBLE: - return Action::template call< std::vector< std::complex< double > > >( - std::forward< Args >( args )... ); + return Action::template call>>( + std::forward(args)...); case Datatype::VEC_CLONG_DOUBLE: - return Action::template call< - std::vector< std::complex< long double > > >( - std::forward< Args >( args )... ); + return Action::template call>>( + std::forward(args)...); case Datatype::VEC_STRING: - return Action::template call< std::vector< std::string > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::ARR_DBL_7: - return Action::template call< std::array< double, 7 > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::BOOL: - return Action::template call< bool >( std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::UNDEFINED: - return detail::CallUndefinedDatatype< - 0, - ReturnType, - Action, - Args &&... >::call( std::forward< Args >( args )... ); + return detail:: + CallUndefinedDatatype<0, ReturnType, Action, Args &&...>::call( + std::forward(args)...); default: throw std::runtime_error( "Internal error: Encountered unknown datatype (switchType) ->" + - std::to_string( static_cast< int >( dt ) ) ); + std::to_string(static_cast(dt))); } } @@ -244,75 +228,65 @@ auto switchType( Datatype dt, Args &&... args ) * @return Passes on the result of invoking the function template with the given * arguments and with the template parameter specified by dt. */ -template< typename Action, typename... Args > -auto switchNonVectorType( Datatype dt, Args &&... args ) - -> decltype( Action::template call< char >( - std::forward< Args >( args )... ) ) +template +auto switchNonVectorType(Datatype dt, Args &&...args) + -> decltype(Action::template call(std::forward(args)...)) { - using ReturnType = decltype( Action::template call< char >( - std::forward< Args >( args )... ) ); - switch( dt ) + using ReturnType = + decltype(Action::template call(std::forward(args)...)); + switch (dt) { case Datatype::CHAR: - return Action::template call< char >( std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::UCHAR: - return Action::template call< unsigned char >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::SHORT: - return Action::template call< short >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::INT: - return Action::template call< int >( std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::LONG: - return Action::template call< long >( std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::LONGLONG: - return Action::template call< long long >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::USHORT: - return Action::template call< unsigned short >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::UINT: - return Action::template call< unsigned int >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::ULONG: - return Action::template call< unsigned long >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::ULONGLONG: - return Action::template call< unsigned long long >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::FLOAT: - return Action::template call< float >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::DOUBLE: - return Action::template call< double >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::LONG_DOUBLE: - return Action::template call< long double >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::CFLOAT: - return Action::template call< std::complex< float > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::CDOUBLE: - return Action::template call< std::complex< double > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::CLONG_DOUBLE: - return Action::template call< std::complex< long double > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::STRING: - return Action::template call< std::string >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::BOOL: - return Action::template call< bool >( std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::UNDEFINED: - return detail::CallUndefinedDatatype< - 0, - ReturnType, - Action, - Args &&... >::call( std::forward< Args >( args )... ); + return detail:: + CallUndefinedDatatype<0, ReturnType, Action, Args &&...>::call( + std::forward(args)...); default: throw std::runtime_error( "Internal error: Encountered unknown datatype (switchType) ->" + - std::to_string( static_cast< int >( dt ) ) ); + std::to_string(static_cast(dt))); } } -} +} // namespace openPMD diff --git a/include/openPMD/Error.hpp b/include/openPMD/Error.hpp index eea5cd56ff..4fcda11eeb 100644 --- a/include/openPMD/Error.hpp +++ b/include/openPMD/Error.hpp @@ -20,18 +20,17 @@ class Error : public std::exception std::string m_what; protected: - Error( std::string what ) : m_what( what ) - { - } + Error(std::string what) : m_what(what) + {} public: - virtual const char * what() const noexcept; + virtual const char *what() const noexcept; - Error( Error const & ) = default; - Error( Error && ) = default; + Error(Error const &) = default; + Error(Error &&) = default; - Error & operator=( Error const & ) = default; - Error & operator=( Error && ) = default; + Error &operator=(Error const &) = default; + Error &operator=(Error &&) = default; virtual ~Error() noexcept = default; }; @@ -48,8 +47,7 @@ namespace error { public: std::string backend; - OperationUnsupportedInBackend( - std::string backend_in, std::string what ); + OperationUnsupportedInBackend(std::string backend_in, std::string what); }; /** @@ -61,15 +59,15 @@ namespace error class WrongAPIUsage : public Error { public: - WrongAPIUsage( std::string what ); + WrongAPIUsage(std::string what); }; class BackendConfigSchema : public Error { public: - std::vector< std::string > errorLocation; + std::vector errorLocation; - BackendConfigSchema( std::vector< std::string >, std::string what ); + BackendConfigSchema(std::vector, std::string what); }; -} -} +} // namespace error +} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp b/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp index 57f281a2c8..838725da94 100644 --- a/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp @@ -20,205 +20,215 @@ */ #pragma once +#include "openPMD/IO/ADIOS/ADIOS1FilePosition.hpp" #include "openPMD/auxiliary/StringManip.hpp" #include "openPMD/backend/Attribute.hpp" #include "openPMD/backend/Writable.hpp" -#include "openPMD/IO/ADIOS/ADIOS1FilePosition.hpp" #include #include #include -#include #include +#include #include #include - namespace openPMD { inline std::string -getBP1Extent(Extent const& e, std::string const& delimiter = ",") +getBP1Extent(Extent const &e, std::string const &delimiter = ",") { - switch( e.size() ) + switch (e.size()) { - case 0: - return ""; - case 1: - return std::to_string(e[0]); - default: - std::ostringstream os; - std::for_each(e.begin(), - e.end()-1, - [&os, &delimiter](std::uint64_t const ext) { os << std::to_string(ext) << delimiter; }); - os << std::to_string(*e.rbegin()); - return os.str(); + case 0: + return ""; + case 1: + return std::to_string(e[0]); + default: + std::ostringstream os; + std::for_each( + e.begin(), e.end() - 1, [&os, &delimiter](std::uint64_t const ext) { + os << std::to_string(ext) << delimiter; + }); + os << std::to_string(*e.rbegin()); + return os.str(); } } inline std::string -getZerosLikeBP1Extent(Extent const& e, std::string const& delimiter = ",") +getZerosLikeBP1Extent(Extent const &e, std::string const &delimiter = ",") { - switch( e.size() ) + switch (e.size()) { - case 0: - return ""; - case 1: - return "0"; - default: - std::ostringstream os; - std::for_each(e.begin(), - e.end()-1, - [&os, &delimiter](std::uint64_t const) { os << "0" << delimiter; }); - os << "0"; - return os.str(); + case 0: + return ""; + case 1: + return "0"; + default: + std::ostringstream os; + std::for_each( + e.begin(), e.end() - 1, [&os, &delimiter](std::uint64_t const) { + os << "0" << delimiter; + }); + os << "0"; + return os.str(); } } -inline ADIOS_DATATYPES -getBP1DataType(Datatype dtype) +inline ADIOS_DATATYPES getBP1DataType(Datatype dtype) { using DT = Datatype; // note the ill-named fixed-byte adios_... types // https://github.com/ornladios/ADIOS/issues/187 - switch( dtype ) + switch (dtype) { - case DT::CHAR: - case DT::VEC_CHAR: - return adios_byte; - case DT::UCHAR: - case DT::VEC_UCHAR: - case DT::BOOL: - return adios_unsigned_byte; - case DT::SHORT: - case DT::VEC_SHORT: - if( sizeof(short) == 2u ) - return adios_short; - else if( sizeof(short) == 4u ) - return adios_integer; - else if( sizeof(long) == 8u ) - return adios_long; - else - throw unsupported_data_error("No native equivalent for Datatype::SHORT found."); - case DT::INT: - case DT::VEC_INT: - if( sizeof(int) == 2u ) - return adios_short; - else if( sizeof(int) == 4u ) - return adios_integer; - else if( sizeof(int) == 8u ) - return adios_long; - else - throw unsupported_data_error("No native equivalent for Datatype::INT found."); - case DT::LONG: - case DT::VEC_LONG: - if( sizeof(long) == 2u ) - return adios_short; - else if( sizeof(long) == 4u ) - return adios_integer; - else if( sizeof(long) == 8u ) - return adios_long; - else - throw unsupported_data_error("No native equivalent for Datatype::LONG found."); - case DT::LONGLONG: - case DT::VEC_LONGLONG: - if( sizeof(long long) == 2u ) - return adios_short; - else if( sizeof(long long) == 4u ) - return adios_integer; - else if( sizeof(long long) == 8u ) - return adios_long; - else - throw unsupported_data_error("No native equivalent for Datatype::LONGLONG found."); - case DT::USHORT: - case DT::VEC_USHORT: - if( sizeof(unsigned short) == 2u ) - return adios_unsigned_short; - else if( sizeof(unsigned short) == 4u ) - return adios_unsigned_integer; - else if( sizeof(unsigned long) == 8u ) - return adios_unsigned_long; - else - throw unsupported_data_error("No native equivalent for Datatype::USHORT found."); - case DT::UINT: - case DT::VEC_UINT: - if( sizeof(unsigned int) == 2u ) - return adios_unsigned_short; - else if( sizeof(unsigned int) == 4u ) - return adios_unsigned_integer; - else if( sizeof(unsigned int) == 8u ) - return adios_unsigned_long; - else - throw unsupported_data_error("No native equivalent for Datatype::UINT found."); - case DT::ULONG: - case DT::VEC_ULONG: - if( sizeof(unsigned long) == 2u ) - return adios_unsigned_short; - else if( sizeof(unsigned long) == 4u ) - return adios_unsigned_integer; - else if( sizeof(unsigned long) == 8u ) - return adios_unsigned_long; - else - throw unsupported_data_error("No native equivalent for Datatype::ULONG found."); - case DT::ULONGLONG: - case DT::VEC_ULONGLONG: - if( sizeof(unsigned long long) == 2u ) - return adios_unsigned_short; - else if( sizeof(unsigned long long) == 4u ) - return adios_unsigned_integer; - else if( sizeof(unsigned long long) == 8u ) - return adios_unsigned_long; - else - throw unsupported_data_error("No native equivalent for Datatype::ULONGLONG found."); - case DT::FLOAT: - case DT::VEC_FLOAT: - return adios_real; - case DT::DOUBLE: - case DT::ARR_DBL_7: - case DT::VEC_DOUBLE: - return adios_double; - case DT::LONG_DOUBLE: - case DT::VEC_LONG_DOUBLE: - return adios_long_double; - case DT::CFLOAT: - case DT::VEC_CFLOAT: - return adios_complex; - case DT::CDOUBLE: - case DT::VEC_CDOUBLE: - return adios_double_complex; - case DT::CLONG_DOUBLE: - case DT::VEC_CLONG_DOUBLE: - throw unsupported_data_error("No native equivalent for Datatype::CLONG_DOUBLE found."); - case DT::STRING: - return adios_string; - case DT::VEC_STRING: - return adios_string_array; - case DT::UNDEFINED: - throw std::runtime_error("Unknown Attribute datatype (ADIOS datatype)"); - default: - throw std::runtime_error("Datatype not implemented in ADIOS IO"); + case DT::CHAR: + case DT::VEC_CHAR: + return adios_byte; + case DT::UCHAR: + case DT::VEC_UCHAR: + case DT::BOOL: + return adios_unsigned_byte; + case DT::SHORT: + case DT::VEC_SHORT: + if (sizeof(short) == 2u) + return adios_short; + else if (sizeof(short) == 4u) + return adios_integer; + else if (sizeof(long) == 8u) + return adios_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::SHORT found."); + case DT::INT: + case DT::VEC_INT: + if (sizeof(int) == 2u) + return adios_short; + else if (sizeof(int) == 4u) + return adios_integer; + else if (sizeof(int) == 8u) + return adios_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::INT found."); + case DT::LONG: + case DT::VEC_LONG: + if (sizeof(long) == 2u) + return adios_short; + else if (sizeof(long) == 4u) + return adios_integer; + else if (sizeof(long) == 8u) + return adios_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::LONG found."); + case DT::LONGLONG: + case DT::VEC_LONGLONG: + if (sizeof(long long) == 2u) + return adios_short; + else if (sizeof(long long) == 4u) + return adios_integer; + else if (sizeof(long long) == 8u) + return adios_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::LONGLONG found."); + case DT::USHORT: + case DT::VEC_USHORT: + if (sizeof(unsigned short) == 2u) + return adios_unsigned_short; + else if (sizeof(unsigned short) == 4u) + return adios_unsigned_integer; + else if (sizeof(unsigned long) == 8u) + return adios_unsigned_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::USHORT found."); + case DT::UINT: + case DT::VEC_UINT: + if (sizeof(unsigned int) == 2u) + return adios_unsigned_short; + else if (sizeof(unsigned int) == 4u) + return adios_unsigned_integer; + else if (sizeof(unsigned int) == 8u) + return adios_unsigned_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::UINT found."); + case DT::ULONG: + case DT::VEC_ULONG: + if (sizeof(unsigned long) == 2u) + return adios_unsigned_short; + else if (sizeof(unsigned long) == 4u) + return adios_unsigned_integer; + else if (sizeof(unsigned long) == 8u) + return adios_unsigned_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::ULONG found."); + case DT::ULONGLONG: + case DT::VEC_ULONGLONG: + if (sizeof(unsigned long long) == 2u) + return adios_unsigned_short; + else if (sizeof(unsigned long long) == 4u) + return adios_unsigned_integer; + else if (sizeof(unsigned long long) == 8u) + return adios_unsigned_long; + else + throw unsupported_data_error( + "No native equivalent for Datatype::ULONGLONG found."); + case DT::FLOAT: + case DT::VEC_FLOAT: + return adios_real; + case DT::DOUBLE: + case DT::ARR_DBL_7: + case DT::VEC_DOUBLE: + return adios_double; + case DT::LONG_DOUBLE: + case DT::VEC_LONG_DOUBLE: + return adios_long_double; + case DT::CFLOAT: + case DT::VEC_CFLOAT: + return adios_complex; + case DT::CDOUBLE: + case DT::VEC_CDOUBLE: + return adios_double_complex; + case DT::CLONG_DOUBLE: + case DT::VEC_CLONG_DOUBLE: + throw unsupported_data_error( + "No native equivalent for Datatype::CLONG_DOUBLE found."); + case DT::STRING: + return adios_string; + case DT::VEC_STRING: + return adios_string_array; + case DT::UNDEFINED: + throw std::runtime_error("Unknown Attribute datatype (ADIOS datatype)"); + default: + throw std::runtime_error("Datatype not implemented in ADIOS IO"); } } -inline std::string -concrete_bp1_file_position(Writable* w) +inline std::string concrete_bp1_file_position(Writable *w) { - std::stack< Writable* > hierarchy; - if( !w->abstractFilePosition ) + std::stack hierarchy; + if (!w->abstractFilePosition) w = w->parent; - while( w ) + while (w) { hierarchy.push(w); w = w->parent; } std::string pos; - while( !hierarchy.empty() ) + while (!hierarchy.empty()) { - auto const tmp_ptr = std::dynamic_pointer_cast< ADIOS1FilePosition >(hierarchy.top()->abstractFilePosition); - if( tmp_ptr == nullptr ) - throw std::runtime_error("Dynamic pointer cast returned a nullptr!"); + auto const tmp_ptr = std::dynamic_pointer_cast( + hierarchy.top()->abstractFilePosition); + if (tmp_ptr == nullptr) + throw std::runtime_error( + "Dynamic pointer cast returned a nullptr!"); pos += tmp_ptr->location; hierarchy.pop(); } @@ -227,15 +237,15 @@ concrete_bp1_file_position(Writable* w) } inline std::string -getEnvNum(std::string const& key, std::string const& defaultValue) +getEnvNum(std::string const &key, std::string const &defaultValue) { - char const* env = std::getenv(key.c_str()); - if( env != nullptr ) + char const *env = std::getenv(key.c_str()); + if (env != nullptr) { - char const* tmp = env; - while( tmp ) + char const *tmp = env; + while (tmp) { - if( isdigit(*tmp) ) + if (isdigit(*tmp)) ++tmp; else { @@ -243,23 +253,23 @@ getEnvNum(std::string const& key, std::string const& defaultValue) break; } } - if( !tmp ) + if (!tmp) return std::string(env, std::strlen(env)); else return defaultValue; - } else + } + else return defaultValue; } -template -inline Attribute -readVectorAttributeInternal( void* data, int size ) +template +inline Attribute readVectorAttributeInternal(void *data, int size) { - auto d = reinterpret_cast< T* >(data); - std::vector< T > v; + auto d = reinterpret_cast(data); + std::vector v; v.resize(size); - for( int i = 0; i < size; ++i ) + for (int i = 0; i < size; ++i) v[i] = d[i]; return Attribute(v); } -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS1FilePosition.hpp b/include/openPMD/IO/ADIOS/ADIOS1FilePosition.hpp index e37e086597..0d1d7ab619 100644 --- a/include/openPMD/IO/ADIOS/ADIOS1FilePosition.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS1FilePosition.hpp @@ -22,15 +22,15 @@ #include "openPMD/IO/AbstractFilePosition.hpp" +#include namespace openPMD { struct ADIOS1FilePosition : public AbstractFilePosition { - ADIOS1FilePosition(std::string const& s) - : location{s} - { } + ADIOS1FilePosition(std::string const &s) : location{s} + {} std::string location; -}; //ADIOS1FilePosition -} // openPMD +}; // ADIOS1FilePosition +} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS1IOHandler.hpp b/include/openPMD/IO/ADIOS/ADIOS1IOHandler.hpp index 4b2ff08e4a..ead72e2a5e 100644 --- a/include/openPMD/IO/ADIOS/ADIOS1IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS1IOHandler.hpp @@ -20,57 +20,62 @@ */ #pragma once -#include "openPMD/config.hpp" +#include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/auxiliary/Export.hpp" #include "openPMD/auxiliary/JSON_internal.hpp" -#include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/config.hpp" #include #include #include #if openPMD_HAVE_ADIOS1 -# include +#include #endif - namespace openPMD { - class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl; +class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl; #if openPMD_HAVE_ADIOS1 - class OPENPMDAPI_EXPORT ADIOS1IOHandler : public AbstractIOHandler - { - friend class ADIOS1IOHandlerImpl; +class OPENPMDAPI_EXPORT ADIOS1IOHandler : public AbstractIOHandler +{ + friend class ADIOS1IOHandlerImpl; - public: - ADIOS1IOHandler(std::string path, Access, json::TracingJSON ); - ~ADIOS1IOHandler() override; +public: + ADIOS1IOHandler(std::string path, Access, json::TracingJSON); + ~ADIOS1IOHandler() override; - std::string backendName() const override { return "ADIOS1"; } + std::string backendName() const override + { + return "ADIOS1"; + } - std::future< void > flush() override; + std::future flush() override; - void enqueue(IOTask const&) override; + void enqueue(IOTask const &) override; - private: - std::queue< IOTask > m_setup; - std::unique_ptr< ADIOS1IOHandlerImpl > m_impl; - }; // ADIOS1IOHandler +private: + std::queue m_setup; + std::unique_ptr m_impl; +}; // ADIOS1IOHandler #else - class OPENPMDAPI_EXPORT ADIOS1IOHandler : public AbstractIOHandler - { - friend class ADIOS1IOHandlerImpl; +class OPENPMDAPI_EXPORT ADIOS1IOHandler : public AbstractIOHandler +{ + friend class ADIOS1IOHandlerImpl; - public: - ADIOS1IOHandler(std::string path, Access, json::TracingJSON ); - ~ADIOS1IOHandler() override; +public: + ADIOS1IOHandler(std::string path, Access, json::TracingJSON); + ~ADIOS1IOHandler() override; - std::string backendName() const override { return "DUMMY_ADIOS1"; } + std::string backendName() const override + { + return "DUMMY_ADIOS1"; + } - std::future< void > flush() override; + std::future flush() override; - private: - std::unique_ptr< ADIOS1IOHandlerImpl > m_impl; - }; // ADIOS1IOHandler +private: + std::unique_ptr m_impl; +}; // ADIOS1IOHandler #endif -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp b/include/openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp index 9b7d1e48a4..27bbdbcf59 100644 --- a/include/openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp @@ -20,46 +20,45 @@ */ #pragma once -#include "openPMD/config.hpp" -#include "openPMD/auxiliary/Export.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/auxiliary/Export.hpp" +#include "openPMD/config.hpp" #if openPMD_HAVE_ADIOS1 -# include "openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp" +#include "openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp" #endif #include #include #include #if openPMD_HAVE_ADIOS1 -# include -# include +#include +#include #endif - namespace openPMD { #if openPMD_HAVE_ADIOS1 - class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl - : public CommonADIOS1IOHandlerImpl< ADIOS1IOHandlerImpl > - { - private: - using Base_t = CommonADIOS1IOHandlerImpl< ADIOS1IOHandlerImpl >; - public: - ADIOS1IOHandlerImpl(AbstractIOHandler*, json::TracingJSON); - virtual ~ADIOS1IOHandlerImpl(); +class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl + : public CommonADIOS1IOHandlerImpl +{ +private: + using Base_t = CommonADIOS1IOHandlerImpl; + +public: + ADIOS1IOHandlerImpl(AbstractIOHandler *, json::TracingJSON); + virtual ~ADIOS1IOHandlerImpl(); - virtual void init(); + virtual void init(); - std::future< void > flush() override; + std::future flush() override; - virtual int64_t open_write(Writable *); - virtual ADIOS_FILE* open_read(std::string const & name); - int64_t initialize_group(std::string const& name); - }; // ADIOS1IOHandlerImpl + virtual int64_t open_write(Writable *); + virtual ADIOS_FILE *open_read(std::string const &name); + int64_t initialize_group(std::string const &name); +}; // ADIOS1IOHandlerImpl #else - class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl - { - }; // ADIOS1IOHandlerImpl +class OPENPMDAPI_EXPORT ADIOS1IOHandlerImpl +{}; // ADIOS1IOHandlerImpl #endif -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp b/include/openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp index 06ef56d918..c5b39be87b 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp @@ -23,16 +23,16 @@ #include "openPMD/config.hpp" #if openPMD_HAVE_ADIOS2 -# include "openPMD/Dataset.hpp" -# include "openPMD/Datatype.hpp" -# include "openPMD/DatatypeHelpers.hpp" +#include "openPMD/Dataset.hpp" +#include "openPMD/Datatype.hpp" +#include "openPMD/DatatypeHelpers.hpp" -# include +#include -# include -# include -# include -# include +#include +#include +#include +#include namespace openPMD { @@ -43,33 +43,37 @@ namespace detail // we represent booleans as unsigned chars using bool_representation = unsigned char; - template < typename T > struct ToDatatypeHelper + template + struct ToDatatypeHelper { - static std::string type( ); + static std::string type(); }; - template < typename T > struct ToDatatypeHelper< std::vector< T > > + template + struct ToDatatypeHelper> { - static std::string type( ); + static std::string type(); }; - template < typename T, size_t n > - struct ToDatatypeHelper< std::array< T, n > > + template + struct ToDatatypeHelper> { - static std::string type( ); + static std::string type(); }; - template <> struct ToDatatypeHelper< bool > + template <> + struct ToDatatypeHelper { - static std::string type( ); + static std::string type(); }; struct ToDatatype { - template < typename T > std::string operator( )( ); + template + std::string operator()(); - - template < int n > std::string operator( )( ); + template + std::string operator()(); }; /** @@ -78,7 +82,7 @@ namespace detail * @param verbose If false, don't print warnings. * @return */ - Datatype fromADIOS2Type( std::string const & dt, bool verbose = true ); + Datatype fromADIOS2Type(std::string const &dt, bool verbose = true); enum class VariableOrAttribute : unsigned char { @@ -88,14 +92,14 @@ namespace detail struct AttributeInfo { - template< typename T > + template static Extent call( adios2::IO &, - std::string const & attributeName, - VariableOrAttribute ); + std::string const &attributeName, + VariableOrAttribute); - template < int n, typename... Params > - static Extent call( Params &&... ); + template + static Extent call(Params &&...); }; /** @@ -110,12 +114,11 @@ namespace detail * @return The openPMD datatype corresponding to the type of the attribute. * UNDEFINED if attribute is not found. */ - Datatype - attributeInfo( - adios2::IO & IO, - std::string const & attributeName, + Datatype attributeInfo( + adios2::IO &IO, + std::string const &attributeName, bool verbose, - VariableOrAttribute voa = VariableOrAttribute::Attribute ); + VariableOrAttribute voa = VariableOrAttribute::Attribute); } // namespace detail /** @@ -133,75 +136,65 @@ namespace detail * @return Passes on the result of invoking the function template with the given * arguments and with the template parameter specified by dt. */ -template< typename Action, typename... Args > -auto switchAdios2AttributeType( Datatype dt, Args &&... args ) - -> decltype( Action::template call< char >( - std::forward< Args >( args )... ) ) +template +auto switchAdios2AttributeType(Datatype dt, Args &&...args) + -> decltype(Action::template call(std::forward(args)...)) { - using ReturnType = decltype( Action::template call< char >( - std::forward< Args >( args )... ) ); - switch( dt ) + using ReturnType = + decltype(Action::template call(std::forward(args)...)); + switch (dt) { case Datatype::CHAR: - return Action::template call< char >( std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::UCHAR: - return Action::template call< unsigned char >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::SHORT: - return Action::template call< short >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::INT: - return Action::template call< int >( std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::LONG: - return Action::template call< long >( std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::LONGLONG: - return Action::template call< long long >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::USHORT: - return Action::template call< unsigned short >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::UINT: - return Action::template call< unsigned int >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::ULONG: - return Action::template call< unsigned long >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::ULONGLONG: - return Action::template call< unsigned long long >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::FLOAT: - return Action::template call< float >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::DOUBLE: - return Action::template call< double >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::LONG_DOUBLE: - return Action::template call< long double >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::CFLOAT: - return Action::template call< std::complex< float > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::CDOUBLE: - return Action::template call< std::complex< double > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); // missing std::complex< long double > type in ADIOS2 v2.6.0 // case Datatype::CLONG_DOUBLE: // return action // .OPENPMD_TEMPLATE_OPERATOR()< std::complex< long double > >( // std::forward< Args >( args )... ); case Datatype::STRING: - return Action::template call< std::string >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::UNDEFINED: - return detail::CallUndefinedDatatype< - 0, - ReturnType, - Action, - Args &&... >::call( std::forward< Args >( args )... ); + return detail:: + CallUndefinedDatatype<0, ReturnType, Action, Args &&...>::call( + std::forward(args)...); default: throw std::runtime_error( "Internal error: Encountered unknown datatype (switchType) ->" + - std::to_string( static_cast< int >( dt ) ) ); + std::to_string(static_cast(dt))); } } @@ -221,74 +214,63 @@ auto switchAdios2AttributeType( Datatype dt, Args &&... args ) * @return Passes on the result of invoking the function template with the given * arguments and with the template parameter specified by dt. */ -template< typename Action, typename... Args > -auto switchAdios2VariableType( Datatype dt, Args &&... args ) - -> decltype( - Action::template call < char > - ( std::forward< Args >( args )... ) ) +template +auto switchAdios2VariableType(Datatype dt, Args &&...args) + -> decltype(Action::template call(std::forward(args)...)) { - using ReturnType = decltype( Action::template call< char >( - std::forward< Args >( args )... ) ); - switch( dt ) + using ReturnType = + decltype(Action::template call(std::forward(args)...)); + switch (dt) { case Datatype::CHAR: - return Action::template call< char >( std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::UCHAR: - return Action::template call< unsigned char >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::SHORT: - return Action::template call< short >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::INT: - return Action::template call< int >( std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::LONG: - return Action::template call< long >( std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::LONGLONG: - return Action::template call< long long >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::USHORT: - return Action::template call< unsigned short >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::UINT: - return Action::template call< unsigned int >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::ULONG: - return Action::template call< unsigned long >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::ULONGLONG: - return Action::template call< unsigned long long >( - std::forward< Args >( args )... ); + return Action::template call( + std::forward(args)...); case Datatype::FLOAT: - return Action::template call< float >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::DOUBLE: - return Action::template call< double >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::LONG_DOUBLE: - return Action::template call< long double >( - std::forward< Args >( args )... ); + return Action::template call(std::forward(args)...); case Datatype::CFLOAT: - return Action::template call< std::complex< float > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); case Datatype::CDOUBLE: - return Action::template call< std::complex< double > >( - std::forward< Args >( args )... ); + return Action::template call>( + std::forward(args)...); // missing std::complex< long double > type in ADIOS2 v2.6.0 // case Datatype::CLONG_DOUBLE: // return action // .OPENPMD_TEMPLATE_OPERATOR()< std::complex< long double > >( // std::forward< Args >( args )... ); case Datatype::UNDEFINED: - return detail::CallUndefinedDatatype< - 0, - ReturnType, - Action, - Args &&... >:: - call( std::forward< Args >( args )... ); + return detail:: + CallUndefinedDatatype<0, ReturnType, Action, Args &&...>::call( + std::forward(args)...); default: throw std::runtime_error( "Internal error: Encountered unknown datatype (switchType) ->" + - std::to_string( static_cast< int >( dt ) ) ); + std::to_string(static_cast(dt))); } } } // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS2FilePosition.hpp b/include/openPMD/IO/ADIOS/ADIOS2FilePosition.hpp index 3bff7e80dd..7203c33077 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2FilePosition.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2FilePosition.hpp @@ -20,50 +20,35 @@ */ #pragma once - #include "openPMD/IO/AbstractFilePosition.hpp" #include #include - namespace openPMD { - struct ADIOS2FilePosition : - public AbstractFilePosition +struct ADIOS2FilePosition : public AbstractFilePosition +{ + enum class GD { - enum class GD - { - GROUP, - DATASET - }; - - - ADIOS2FilePosition( - std::string s, - GD groupOrDataset - ) : - location { std::move( s ) }, - gd { groupOrDataset } - {} - - - explicit ADIOS2FilePosition( GD groupOrDataset ) : - ADIOS2FilePosition { - "/", - groupOrDataset - } - {} - - - ADIOS2FilePosition( ) : - ADIOS2FilePosition{ GD::GROUP } - {} - - - /** - * Convention: Starts with slash '/', ends without. - */ - std::string location; - GD gd; - }; // ADIOS2FilePosition -} // openPMD + GROUP, + DATASET + }; + + ADIOS2FilePosition(std::string s, GD groupOrDataset) + : location{std::move(s)}, gd{groupOrDataset} + {} + + explicit ADIOS2FilePosition(GD groupOrDataset) + : ADIOS2FilePosition{"/", groupOrDataset} + {} + + ADIOS2FilePosition() : ADIOS2FilePosition{GD::GROUP} + {} + + /** + * Convention: Starts with slash '/', ends without. + */ + std::string location; + GD gd; +}; // ADIOS2FilePosition +} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp index 2073851239..02da0569ad 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2IOHandler.hpp @@ -20,24 +20,24 @@ */ #pragma once -#include "openPMD/IO/AbstractIOHandler.hpp" -#include "openPMD/IO/AbstractIOHandlerImpl.hpp" -#include "openPMD/IO/AbstractIOHandlerImplCommon.hpp" #include "openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp" #include "openPMD/IO/ADIOS/ADIOS2FilePosition.hpp" #include "openPMD/IO/ADIOS/ADIOS2PreloadAttributes.hpp" +#include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/IO/AbstractIOHandlerImpl.hpp" +#include "openPMD/IO/AbstractIOHandlerImplCommon.hpp" #include "openPMD/IO/IOTask.hpp" #include "openPMD/IO/InvalidatableFile.hpp" +#include "openPMD/IterationEncoding.hpp" #include "openPMD/auxiliary/JSON_internal.hpp" #include "openPMD/backend/Writable.hpp" #include "openPMD/config.hpp" -#include "openPMD/IterationEncoding.hpp" #if openPMD_HAVE_ADIOS2 -# include +#include #endif #if openPMD_HAVE_MPI -# include +#include #endif #include @@ -53,7 +53,6 @@ #include // pair #include - namespace openPMD { #if openPMD_HAVE_ADIOS2 @@ -62,17 +61,20 @@ class ADIOS2IOHandler; namespace detail { - template < typename, typename > struct DatasetHelper; + template + struct DatasetHelper; struct GetSpan; struct DatasetReader; struct AttributeReader; struct AttributeWriter; struct OldAttributeReader; struct OldAttributeWriter; - template < typename > struct AttributeTypes; + template + struct AttributeTypes; struct DatasetOpener; struct VariableDefiner; - template < typename > struct DatasetTypes; + template + struct DatasetTypes; struct WriteDataset; struct BufferedActions; struct BufferedPut; @@ -81,7 +83,6 @@ namespace detail struct BufferedAttributeWrite; } // namespace detail - namespace ADIOS2Schema { using schema_t = uint64_t; @@ -99,129 +100,116 @@ namespace ADIOS2Schema s_0000_00_00, s_2021_02_09 }; -} +} // namespace ADIOS2Schema using SupportedSchema = ADIOS2Schema::SupportedSchema; class ADIOS2IOHandlerImpl -: public AbstractIOHandlerImplCommon< ADIOS2FilePosition > + : public AbstractIOHandlerImplCommon { - template < typename, typename > friend struct detail::DatasetHelper; + template + friend struct detail::DatasetHelper; friend struct detail::GetSpan; friend struct detail::DatasetReader; friend struct detail::AttributeReader; friend struct detail::AttributeWriter; friend struct detail::OldAttributeReader; friend struct detail::OldAttributeWriter; - template < typename > friend struct detail::AttributeTypes; + template + friend struct detail::AttributeTypes; friend struct detail::DatasetOpener; friend struct detail::VariableDefiner; - template < typename > friend struct detail::DatasetTypes; + template + friend struct detail::DatasetTypes; friend struct detail::WriteDataset; friend struct detail::BufferedActions; friend struct detail::BufferedAttributeRead; static constexpr bool ADIOS2_DEBUG_MODE = false; - public: - #if openPMD_HAVE_MPI ADIOS2IOHandlerImpl( AbstractIOHandler *, MPI_Comm, json::TracingJSON config, - std::string engineType ); + std::string engineType); #endif // openPMD_HAVE_MPI explicit ADIOS2IOHandlerImpl( - AbstractIOHandler *, - json::TracingJSON config, - std::string engineType ); - + AbstractIOHandler *, json::TracingJSON config, std::string engineType); ~ADIOS2IOHandlerImpl() override; - std::future< void > flush( ) override; - - void createFile( Writable *, - Parameter< Operation::CREATE_FILE > const & ) override; - - void createPath( Writable *, - Parameter< Operation::CREATE_PATH > const & ) override; + std::future flush() override; void - createDataset( Writable *, - Parameter< Operation::CREATE_DATASET > const & ) override; + createFile(Writable *, Parameter const &) override; void - extendDataset( Writable *, - Parameter< Operation::EXTEND_DATASET > const & ) override; + createPath(Writable *, Parameter const &) override; + + void createDataset( + Writable *, Parameter const &) override; - void openFile( Writable *, - Parameter< Operation::OPEN_FILE > const & ) override; + void extendDataset( + Writable *, Parameter const &) override; - void closeFile( Writable *, - Parameter< Operation::CLOSE_FILE > const & ) override; + void openFile(Writable *, Parameter const &) override; - void openPath( Writable *, - Parameter< Operation::OPEN_PATH > const & ) override; + void + closeFile(Writable *, Parameter const &) override; - void closePath( Writable *, - Parameter< Operation::CLOSE_PATH > const & ) override; + void openPath(Writable *, Parameter const &) override; - void openDataset( Writable *, - Parameter< Operation::OPEN_DATASET > & ) override; + void + closePath(Writable *, Parameter const &) override; - void deleteFile( Writable *, - Parameter< Operation::DELETE_FILE > const & ) override; + void openDataset(Writable *, Parameter &) override; - void deletePath( Writable *, - Parameter< Operation::DELETE_PATH > const & ) override; + void + deleteFile(Writable *, Parameter const &) override; void - deleteDataset( Writable *, - Parameter< Operation::DELETE_DATASET > const & ) override; + deletePath(Writable *, Parameter const &) override; - void deleteAttribute( Writable *, - Parameter< Operation::DELETE_ATT > const & ) override; + void deleteDataset( + Writable *, Parameter const &) override; - void writeDataset( Writable *, - Parameter< Operation::WRITE_DATASET > const & ) override; + void deleteAttribute( + Writable *, Parameter const &) override; - void writeAttribute( Writable *, - Parameter< Operation::WRITE_ATT > const & ) override; + void writeDataset( + Writable *, Parameter const &) override; - void readDataset( Writable *, - Parameter< Operation::READ_DATASET > & ) override; + void writeAttribute( + Writable *, Parameter const &) override; - void getBufferView( Writable *, - Parameter< Operation::GET_BUFFER_VIEW > & ) override; + void readDataset(Writable *, Parameter &) override; - void readAttribute( Writable *, - Parameter< Operation::READ_ATT > & ) override; + void + getBufferView(Writable *, Parameter &) override; - void listPaths( Writable *, Parameter< Operation::LIST_PATHS > & ) override; + void readAttribute(Writable *, Parameter &) override; - void listDatasets( Writable *, - Parameter< Operation::LIST_DATASETS > & ) override; + void listPaths(Writable *, Parameter &) override; void - listAttributes( Writable *, - Parameter< Operation::LIST_ATTS > & parameters ) override; + listDatasets(Writable *, Parameter &) override; - void - advance( Writable*, Parameter< Operation::ADVANCE > & ) override; + void listAttributes( + Writable *, Parameter ¶meters) override; - void - availableChunks( Writable*, - Parameter< Operation::AVAILABLE_CHUNKS > &) override; + void advance(Writable *, Parameter &) override; + + void availableChunks( + Writable *, Parameter &) override; /** * @brief The ADIOS2 access type to chose for Engines opened * within this instance. */ - adios2::Mode adios2AccessMode( std::string const & fullPath ); + adios2::Mode adios2AccessMode(std::string const &fullPath); private: adios2::ADIOS m_ADIOS; @@ -253,7 +241,7 @@ class ADIOS2IOHandlerImpl inline SupportedSchema schema() const { - switch( m_schema ) + switch (m_schema) { case ADIOS2Schema::schema_0000_00_00: return SupportedSchema::s_0000_00_00; @@ -262,20 +250,20 @@ class ADIOS2IOHandlerImpl default: throw std::runtime_error( "[ADIOS2] Encountered unsupported schema version: " + - std::to_string( m_schema ) ); + std::to_string(m_schema)); } } inline AttributeLayout attributeLayout() const { - switch( schema() ) + switch (schema()) { case SupportedSchema::s_0000_00_00: return AttributeLayout::ByAdiosAttributes; case SupportedSchema::s_2021_02_09: return AttributeLayout::ByAdiosVariables; } - throw std::runtime_error( "Unreachable!" ); + throw std::runtime_error("Unreachable!"); } struct ParameterizedOperator @@ -284,21 +272,19 @@ class ADIOS2IOHandlerImpl adios2::Params params; }; - std::vector< ParameterizedOperator > defaultOperators; + std::vector defaultOperators; json::TracingJSON m_config; static json::TracingJSON nullvalue; - void - init( json::TracingJSON config ); + void init(json::TracingJSON config); - template< typename Key > - json::TracingJSON - config( Key && key, json::TracingJSON & cfg ) + template + json::TracingJSON config(Key &&key, json::TracingJSON &cfg) { - if( cfg.json().is_object() && cfg.json().contains( key ) ) + if (cfg.json().is_object() && cfg.json().contains(key)) { - return cfg[ key ]; + return cfg[key]; } else { @@ -306,11 +292,10 @@ class ADIOS2IOHandlerImpl } } - template< typename Key > - json::TracingJSON - config( Key && key ) + template + json::TracingJSON config(Key &&key) { - return config< Key >( std::forward< Key >( key ), m_config ); + return config(std::forward(key), m_config); } /** @@ -320,15 +305,13 @@ class ADIOS2IOHandlerImpl * @return first parameter: the operators, second parameters: whether * operators have been configured */ - std::optional< std::vector< ParameterizedOperator > > - getOperators( json::TracingJSON config ); + std::optional> + getOperators(json::TracingJSON config); // use m_config - std::optional< std::vector< ParameterizedOperator > > - getOperators(); + std::optional> getOperators(); - std::string - fileSuffix() const; + std::string fileSuffix() const; /* * We need to give names to IO objects. These names are irrelevant @@ -350,31 +333,32 @@ class ADIOS2IOHandlerImpl * IO and Engine object. * Not to be accessed directly, use getFileData(). */ - std::unordered_map< InvalidatableFile, - std::unique_ptr< detail::BufferedActions > - > m_fileData; + std::unordered_map< + InvalidatableFile, + std::unique_ptr> + m_fileData; - std::map< std::string, adios2::Operator > m_operators; + std::map m_operators; // Overrides from AbstractIOHandlerImplCommon. std::string - filePositionToString( std::shared_ptr< ADIOS2FilePosition > ) override; + filePositionToString(std::shared_ptr) override; - std::shared_ptr< ADIOS2FilePosition > - extendFilePosition( std::shared_ptr< ADIOS2FilePosition > const & pos, - std::string extend ) override; + std::shared_ptr extendFilePosition( + std::shared_ptr const &pos, + std::string extend) override; // Helper methods. - std::optional< adios2::Operator > - getCompressionOperator( std::string const & compression ); + std::optional + getCompressionOperator(std::string const &compression); /* * The name of the ADIOS2 variable associated with this Writable. * To be used for Writables that represent a dataset. */ - std::string nameOfVariable( Writable * writable ); + std::string nameOfVariable(Writable *writable); /** * @brief nameOfAttribute @@ -385,13 +369,13 @@ class ADIOS2IOHandlerImpl * (possibly the empty string, representing no variable) * and the actual name. */ - std::string nameOfAttribute( Writable * writable, std::string attribute ); + std::string nameOfAttribute(Writable *writable, std::string attribute); /* * Figure out whether the Writable corresponds with a * group or a dataset. */ - ADIOS2FilePosition::GD groupOrDataset( Writable * ); + ADIOS2FilePosition::GD groupOrDataset(Writable *); enum class IfFileNotOpen : bool { @@ -399,10 +383,9 @@ class ADIOS2IOHandlerImpl ThrowError }; - detail::BufferedActions & - getFileData( InvalidatableFile file, IfFileNotOpen ); + detail::BufferedActions &getFileData(InvalidatableFile file, IfFileNotOpen); - void dropFileData( InvalidatableFile file ); + void dropFileData(InvalidatableFile file); /* * Prepare a variable that already exists for an IO @@ -412,10 +395,12 @@ class ADIOS2IOHandlerImpl * (3) setting the offset and extent (ADIOS lingo: start * and count) */ - template < typename T > - adios2::Variable< T > verifyDataset( Offset const & offset, - Extent const & extent, adios2::IO & IO, - std::string const & var ); + template + adios2::Variable verifyDataset( + Offset const &offset, + Extent const &extent, + adios2::IO &IO, + std::string const &var); }; // ADIOS2IOHandlerImpl /* @@ -424,7 +409,7 @@ class ADIOS2IOHandlerImpl */ namespace ADIOS2Defaults { - using const_str = char const * const; + using const_str = char const *const; constexpr const_str str_engine = "engine"; constexpr const_str str_type = "type"; constexpr const_str str_params = "parameters"; @@ -441,95 +426,94 @@ namespace detail { // Helper structs for calls to the switchType function - template< typename T > + template inline constexpr bool IsUnsupportedComplex_v = - std::is_same_v< T, std::complex< long double > > || - std::is_same_v< T, std::vector< std::complex< long double > > >; + std::is_same_v> || + std::is_same_v>>; struct DatasetReader { - template< typename T > + template static void call( - ADIOS2IOHandlerImpl * impl, - BufferedGet & bp, - adios2::IO & IO, - adios2::Engine & engine, - std::string const & fileName ); + ADIOS2IOHandlerImpl *impl, + BufferedGet &bp, + adios2::IO &IO, + adios2::Engine &engine, + std::string const &fileName); - static constexpr char const * errorMsg = "ADIOS2: readDataset()"; + static constexpr char const *errorMsg = "ADIOS2: readDataset()"; }; struct OldAttributeReader { - template< typename T > + template static Datatype call( - adios2::IO & IO, + adios2::IO &IO, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); - template< int n, typename... Params > - static Datatype call( Params &&... ); + template + static Datatype call(Params &&...); }; struct OldAttributeWriter { - template< typename T > + template static void call( - ADIOS2IOHandlerImpl * impl, - Writable * writable, - const Parameter< Operation::WRITE_ATT > & parameters ); + ADIOS2IOHandlerImpl *impl, + Writable *writable, + const Parameter ¶meters); - template< int n, typename... Params > - static void call( Params &&... ); + template + static void call(Params &&...); }; struct AttributeReader { - template< typename T > + template static Datatype call( - adios2::IO & IO, - detail::PreloadAdiosAttributes const & preloadedAttributes, + adios2::IO &IO, + detail::PreloadAdiosAttributes const &preloadedAttributes, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); - template< int n, typename... Params > - static Datatype call( Params &&... ); + template + static Datatype call(Params &&...); }; struct AttributeWriter { - template< typename T > - static void call( - detail::BufferedAttributeWrite & params, - BufferedActions & fileData ); + template + static void + call(detail::BufferedAttributeWrite ¶ms, BufferedActions &fileData); - template< int n, typename... Params > - static void call( Params &&... ); + template + static void call(Params &&...); }; struct DatasetOpener { - template< typename T > + template static void call( - ADIOS2IOHandlerImpl * impl, + ADIOS2IOHandlerImpl *impl, InvalidatableFile, - const std::string & varName, - Parameter< Operation::OPEN_DATASET > & parameters ); + const std::string &varName, + Parameter ¶meters); - static constexpr char const * errorMsg = "ADIOS2: openDataset()"; + static constexpr char const *errorMsg = "ADIOS2: openDataset()"; }; struct WriteDataset { - template< typename T > + template static void call( - ADIOS2IOHandlerImpl * impl, - BufferedPut & bp, - adios2::IO & IO, - adios2::Engine & engine ); + ADIOS2IOHandlerImpl *impl, + BufferedPut &bp, + adios2::IO &IO, + adios2::Engine &engine); - template< int n, typename... Params > - static void call( Params &&... ); + template + static void call(Params &&...); }; struct VariableDefiner @@ -549,31 +533,31 @@ namespace detail * @param count As in adios2::IO::DefineVariable * @param constantDims As in adios2::IO::DefineVariable */ - template< typename T > + template static void call( - adios2::IO & IO, - std::string const & name, - std::vector< ADIOS2IOHandlerImpl::ParameterizedOperator > const & - compressions, - adios2::Dims const & shape = adios2::Dims(), - adios2::Dims const & start = adios2::Dims(), - adios2::Dims const & count = adios2::Dims(), - bool const constantDims = false ); - - static constexpr char const * errorMsg = "ADIOS2: defineVariable()"; + adios2::IO &IO, + std::string const &name, + std::vector const + &compressions, + adios2::Dims const &shape = adios2::Dims(), + adios2::Dims const &start = adios2::Dims(), + adios2::Dims const &count = adios2::Dims(), + bool const constantDims = false); + + static constexpr char const *errorMsg = "ADIOS2: defineVariable()"; }; struct RetrieveBlocksInfo { - template< typename T > + template static void call( - Parameter< Operation::AVAILABLE_CHUNKS > & params, - adios2::IO & IO, - adios2::Engine & engine, - std::string const & varName ); + Parameter ¶ms, + adios2::IO &IO, + adios2::Engine &engine, + std::string const &varName); - template < int n, typename... Params > - static void call( Params &&... ); + template + static void call(Params &&...); }; // Helper structs to help distinguish valid attribute/variable @@ -584,143 +568,136 @@ namespace detail * for vector and array types, as well as the boolean * type (which is not natively supported by ADIOS). */ - template< typename T > + template struct AttributeTypes { - static void - createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - T value ); + static void createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + T value); - static void - readAttribute( + static void readAttribute( detail::PreloadAdiosAttributes const &, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); /** * @brief Is the attribute given by parameters name and val already * defined exactly in that way within the given IO? */ - static bool - attributeUnchanged( adios2::IO & IO, std::string name, T val ) + static bool attributeUnchanged(adios2::IO &IO, std::string name, T val) { - auto attr = IO.InquireAttribute< T >( name ); - if( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { return false; } - std::vector< T > data = attr.Data(); - if( data.size() != 1 ) + std::vector data = attr.Data(); + if (data.size() != 1) { return false; } - return data[ 0 ] == val; + return data[0] == val; } }; - template< > struct AttributeTypes< std::complex< long double > > + template <> + struct AttributeTypes> { - static void - createAttribute( + static void createAttribute( adios2::IO &, adios2::Engine &, detail::BufferedAttributeWrite &, - std::complex< long double > ) + std::complex) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "attribute types"); } - static void - readAttribute( + static void readAttribute( detail::PreloadAdiosAttributes const &, std::string, - std::shared_ptr< Attribute::resource > ) + std::shared_ptr) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "attribute types"); } static bool - attributeUnchanged( - adios2::IO &, std::string, std::complex< long double > ) + attributeUnchanged(adios2::IO &, std::string, std::complex) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "attribute types"); } }; - template< > struct AttributeTypes< std::vector< std::complex< long double > > > + template <> + struct AttributeTypes>> { - static void - createAttribute( + static void createAttribute( adios2::IO &, adios2::Engine &, detail::BufferedAttributeWrite &, - const std::vector< std::complex< long double > > & ) + const std::vector> &) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex vector attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "vector attribute types"); } - static void - readAttribute( + static void readAttribute( detail::PreloadAdiosAttributes const &, std::string, - std::shared_ptr< Attribute::resource > ) + std::shared_ptr) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex vector attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "vector attribute types"); } - static bool - attributeUnchanged( - adios2::IO &, - std::string, - std::vector< std::complex< long double > > ) + static bool attributeUnchanged( + adios2::IO &, std::string, std::vector>) { throw std::runtime_error( - "[ADIOS2] Internal error: no support for long double complex vector attribute types" ); + "[ADIOS2] Internal error: no support for long double complex " + "vector attribute types"); } }; - template < typename T > struct AttributeTypes< std::vector< T > > + template + struct AttributeTypes> { - static void - createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const std::vector< T > & value ); + static void createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const std::vector &value); - static void - readAttribute( + static void readAttribute( detail::PreloadAdiosAttributes const &, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); static bool - attributeUnchanged( - adios2::IO & IO, - std::string name, - std::vector< T > val ) + attributeUnchanged(adios2::IO &IO, std::string name, std::vector val) { - auto attr = IO.InquireAttribute< T >( name ); - if( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { return false; } - std::vector< T > data = attr.Data(); - if( data.size() != val.size() ) + std::vector data = attr.Data(); + if (data.size() != val.size()) { return false; } - for( size_t i = 0; i < val.size(); ++i ) + for (size_t i = 0; i < val.size(); ++i) { - if( data[ i ] != val[ i ] ) + if (data[i] != val[i]) { return false; } @@ -729,41 +706,36 @@ namespace detail } }; - template<> - struct AttributeTypes< std::vector< std::string > > + template <> + struct AttributeTypes> { - static void - createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const std::vector< std::string > & vec ); + static void createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const std::vector &vec); - static void - readAttribute( + static void readAttribute( detail::PreloadAdiosAttributes const &, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); - static bool - attributeUnchanged( - adios2::IO & IO, - std::string name, - std::vector< std::string > val ) + static bool attributeUnchanged( + adios2::IO &IO, std::string name, std::vector val) { - auto attr = IO.InquireAttribute< std::string >( name ); - if( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { return false; } - std::vector< std::string > data = attr.Data(); - if( data.size() != val.size() ) + std::vector data = attr.Data(); + if (data.size() != val.size()) { return false; } - for( size_t i = 0; i < val.size(); ++i ) + for (size_t i = 0; i < val.size(); ++i) { - if( data[ i ] != val[ i ] ) + if (data[i] != val[i]) { return false; } @@ -772,41 +744,36 @@ namespace detail } }; - template < typename T, size_t n > - struct AttributeTypes< std::array< T, n > > + template + struct AttributeTypes> { - static void - createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const std::array< T, n > & value ); + static void createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const std::array &value); - static void - readAttribute( + static void readAttribute( detail::PreloadAdiosAttributes const &, std::string name, - std::shared_ptr< Attribute::resource > resource ); + std::shared_ptr resource); - static bool - attributeUnchanged( - adios2::IO & IO, - std::string name, - std::array< T, n > val ) + static bool attributeUnchanged( + adios2::IO &IO, std::string name, std::array val) { - auto attr = IO.InquireAttribute< T >( name ); - if( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { return false; } - std::vector< T > data = attr.Data(); - if( data.size() != n ) + std::vector data = attr.Data(); + if (data.size() != n) { return false; } - for( size_t i = 0; i < n; ++i ) + for (size_t i = 0; i < n; ++i) { - if( data[ i ] != val[ i ] ) + if (data[i] != val[i]) { return false; } @@ -819,68 +786,62 @@ namespace detail { using rep = detail::bool_representation; - static constexpr rep toRep( bool b ) + static constexpr rep toRep(bool b) { return b ? 1U : 0U; } - - static constexpr bool fromRep( rep r ) + static constexpr bool fromRep(rep r) { return r != 0; } - } + } // namespace bool_repr - template <> struct AttributeTypes< bool > + template <> + struct AttributeTypes { using rep = detail::bool_representation; - static constexpr rep toRep( bool b ) + static constexpr rep toRep(bool b) { return b ? 1U : 0U; } - - static constexpr bool fromRep( rep r ) + static constexpr bool fromRep(rep r) { return r != 0; } - static void - createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - bool value ); + static void createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + bool value); - static void - readAttribute( + static void readAttribute( detail::PreloadAdiosAttributes const &, std::string name, - std::shared_ptr< Attribute::resource > resource ); - - + std::shared_ptr resource); static bool - attributeUnchanged( adios2::IO & IO, std::string name, bool val ) + attributeUnchanged(adios2::IO &IO, std::string name, bool val) { - auto attr = IO.InquireAttribute< rep >( name ); - if( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { return false; } - std::vector< rep > data = attr.Data(); - if( data.size() != 1 ) + std::vector data = attr.Data(); + if (data.size() != 1) { return false; } - return data[ 0 ] == toRep( val ); + return data[0] == toRep(val); } }; // Other datatypes used in the ADIOS2IOHandler implementation - struct BufferedActions; /* @@ -888,49 +849,48 @@ namespace detail */ struct BufferedAction { - explicit BufferedAction( ) = default; - virtual ~BufferedAction( ) = default; + explicit BufferedAction() = default; + virtual ~BufferedAction() = default; - BufferedAction( BufferedAction const & other ) = delete; - BufferedAction( BufferedAction && other ) = default; + BufferedAction(BufferedAction const &other) = delete; + BufferedAction(BufferedAction &&other) = default; - BufferedAction & operator=( BufferedAction const & other ) = delete; - BufferedAction & operator=( BufferedAction && other ) = default; + BufferedAction &operator=(BufferedAction const &other) = delete; + BufferedAction &operator=(BufferedAction &&other) = default; - virtual void run( BufferedActions & ) = 0; + virtual void run(BufferedActions &) = 0; }; struct BufferedGet : BufferedAction { std::string name; - Parameter< Operation::READ_DATASET > param; + Parameter param; - void run( BufferedActions & ) override; + void run(BufferedActions &) override; }; struct BufferedPut : BufferedAction { std::string name; - Parameter< Operation::WRITE_DATASET > param; + Parameter param; - void run( BufferedActions & ) override; + void run(BufferedActions &) override; }; struct OldBufferedAttributeRead : BufferedAction { - Parameter< Operation::READ_ATT > param; + Parameter param; std::string name; - void run( BufferedActions & ) override; + void run(BufferedActions &) override; }; struct BufferedAttributeRead { - Parameter< Operation::READ_ATT > param; + Parameter param; std::string name; - void - run( BufferedActions & ); + void run(BufferedActions &); }; struct BufferedAttributeWrite : BufferedAction @@ -938,9 +898,9 @@ namespace detail std::string name; Datatype dtype; Attribute::resource resource; - std::vector< char > bufferForVecString; + std::vector bufferForVecString; - void run( BufferedActions & ) override; + void run(BufferedActions &) override; }; struct I_UpdateSpan @@ -949,12 +909,12 @@ namespace detail virtual ~I_UpdateSpan() = default; }; - template< typename T > + template struct UpdateSpan : I_UpdateSpan { - adios2::detail::Span< T > span; + adios2::detail::Span span; - UpdateSpan( adios2::detail::Span< T > ); + UpdateSpan(adios2::detail::Span); void *update() override; }; @@ -969,7 +929,7 @@ namespace detail friend struct BufferedGet; friend struct BufferedPut; - BufferedActions( BufferedActions const & ) = delete; + BufferedActions(BufferedActions const &) = delete; /** * The full path to the file created on disk, including the @@ -998,13 +958,13 @@ namespace detail * IO. */ std::string const m_IOName; - adios2::ADIOS & m_ADIOS; + adios2::ADIOS &m_ADIOS; adios2::IO m_IO; /** * The default queue for deferred actions. * Drained upon BufferedActions::flush(). */ - std::vector< std::unique_ptr< BufferedAction > > m_buffer; + std::vector> m_buffer; /** * Buffer for attributes to be written in the new (variable-based) * attribute layout. @@ -1015,20 +975,20 @@ namespace detail * write commands. * The queue is drained only when closing a step / the engine. */ - std::map< std::string, BufferedAttributeWrite > m_attributeWrites; + std::map m_attributeWrites; /** * @todo This one is unnecessary, in the new schema, attribute reads do * not need to be deferred, but can happen instantly without performance * penalty, once preloadAttributes has been filled. */ - std::vector< BufferedAttributeRead > m_attributeReads; + std::vector m_attributeReads; /** * This contains deferred actions that have already been enqueued into * ADIOS2, but not yet performed in ADIOS2. * We must store them somewhere until the next PerformPuts/Gets, EndStep * or Close in ADIOS2 to avoid use after free conditions. */ - std::vector< std::unique_ptr< BufferedAction > > m_alreadyEnqueued; + std::vector> m_alreadyEnqueued; adios2::Mode m_mode; /** * The base pointer of an ADIOS2 span might change after reallocations. @@ -1038,7 +998,7 @@ namespace detail * retrieval of the updated base pointer. * This map is cleared upon flush points. */ - std::map< unsigned, std::unique_ptr< I_UpdateSpan > > m_updateSpans; + std::map> m_updateSpans; PreloadAdiosAttributes preloadAttributes; /* @@ -1046,7 +1006,7 @@ namespace detail * written has been closed. * A committed attribute cannot be modified. */ - std::set< std::string > uncommittedAttributes; + std::set uncommittedAttributes; /* * The openPMD API will generally create new attributes for each @@ -1060,25 +1020,26 @@ namespace detail */ bool optimizeAttributesStreaming = false; - using AttributeMap_t = std::map< std::string, adios2::Params >; + using AttributeMap_t = std::map; - BufferedActions( ADIOS2IOHandlerImpl & impl, InvalidatableFile file ); + BufferedActions(ADIOS2IOHandlerImpl &impl, InvalidatableFile file); - ~BufferedActions( ); + ~BufferedActions(); /** * Implementation of destructor, will only run once. * */ - void - finalize(); + void finalize(); - adios2::Engine & getEngine( ); - adios2::Engine & requireActiveStep( ); + adios2::Engine &getEngine(); + adios2::Engine &requireActiveStep(); - template < typename BA > void enqueue( BA && ba ); + template + void enqueue(BA &&ba); - template < typename BA > void enqueue( BA && ba, decltype( m_buffer ) & ); + template + void enqueue(BA &&ba, decltype(m_buffer) &); /** * Flush deferred IO actions. @@ -1096,21 +1057,19 @@ namespace detail * @param flushUnconditionally Whether to run the functor even if no * deferred IO tasks had been queued. */ - template< typename F > - void - flush( + template + void flush( FlushLevel level, - F && performPutsGets, + F &&performPutsGets, bool writeAttributes, - bool flushUnconditionally ); + bool flushUnconditionally); /** * Overload of flush() that uses adios2::Engine::Perform(Puts|Gets) * and does not flush unconditionally. * */ - void - flush( FlushLevel, bool writeAttributes = false ); + void flush(FlushLevel, bool writeAttributes = false); /** * @brief Begin or end an ADIOS step. @@ -1118,41 +1077,36 @@ namespace detail * @param mode Whether to begin or end a step. * @return AdvanceStatus */ - AdvanceStatus - advance( AdvanceMode mode ); + AdvanceStatus advance(AdvanceMode mode); /* * Delete all buffered actions without running them. */ - void drop( ); + void drop(); - AttributeMap_t const & - availableAttributes(); + AttributeMap_t const &availableAttributes(); - std::vector< std::string > - availableAttributesPrefixed( std::string const & prefix ); + std::vector + availableAttributesPrefixed(std::string const &prefix); /* * See description below. */ - void - invalidateAttributesMap(); + void invalidateAttributesMap(); - AttributeMap_t const & - availableVariables(); + AttributeMap_t const &availableVariables(); - std::vector< std::string > - availableVariablesPrefixed( std::string const & prefix ); + std::vector + availableVariablesPrefixed(std::string const &prefix); /* * See description below. */ - void - invalidateVariablesMap(); + void invalidateVariablesMap(); private: - ADIOS2IOHandlerImpl * m_impl; - std::optional< adios2::Engine > m_engine; //! ADIOS engine + ADIOS2IOHandlerImpl *m_impl; + std::optional m_engine; //! ADIOS engine /** * The ADIOS2 engine type, to be passed to adios2::IO::SetEngine */ @@ -1268,8 +1222,8 @@ namespace detail * the map that would be returned by a call to * IO::Available(Attributes|Variables). */ - std::optional< AttributeMap_t > m_availableAttributes; - std::optional< AttributeMap_t > m_availableVariables; + std::optional m_availableAttributes; + std::optional m_availableVariables; /* * finalize() will set this true to avoid running twice. @@ -1281,8 +1235,7 @@ namespace detail return m_impl->schema(); } - void - configure_IO( ADIOS2IOHandlerImpl & impl ); + void configure_IO(ADIOS2IOHandlerImpl &impl); using AttributeLayout = ADIOS2IOHandlerImpl::AttributeLayout; inline AttributeLayout attributeLayout() const @@ -1294,31 +1247,31 @@ namespace detail } // namespace detail #endif // openPMD_HAVE_ADIOS2 - class ADIOS2IOHandler : public AbstractIOHandler { #if openPMD_HAVE_ADIOS2 -friend class ADIOS2IOHandlerImpl; + friend class ADIOS2IOHandlerImpl; private: ADIOS2IOHandlerImpl m_impl; public: - ~ADIOS2IOHandler( ) override + ~ADIOS2IOHandler() override { // we must not throw in a destructor try { - this->flush( ); + this->flush(); } - catch( std::exception const & ex ) + catch (std::exception const &ex) { - std::cerr << "[~ADIOS2IOHandler] An error occurred: " << ex.what() << std::endl; + std::cerr << "[~ADIOS2IOHandler] An error occurred: " << ex.what() + << std::endl; } - catch( ... ) + catch (...) { - std::cerr << "[~ADIOS2IOHandler] An error occurred." << std::endl; + std::cerr << "[~ADIOS2IOHandler] An error occurred." << std::endl; } } @@ -1333,7 +1286,7 @@ friend class ADIOS2IOHandlerImpl; Access, MPI_Comm, json::TracingJSON options, - std::string engineType ); + std::string engineType); #endif @@ -1341,10 +1294,13 @@ friend class ADIOS2IOHandlerImpl; std::string path, Access, json::TracingJSON options, - std::string engineType ); + std::string engineType); - std::string backendName() const override { return "ADIOS2"; } + std::string backendName() const override + { + return "ADIOS2"; + } - std::future< void > flush( ) override; + std::future flush() override; }; // ADIOS2IOHandler } // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ADIOS2PreloadAttributes.hpp b/include/openPMD/IO/ADIOS/ADIOS2PreloadAttributes.hpp index e50fe18d1e..75c2613674 100644 --- a/include/openPMD/IO/ADIOS/ADIOS2PreloadAttributes.hpp +++ b/include/openPMD/IO/ADIOS/ADIOS2PreloadAttributes.hpp @@ -41,11 +41,11 @@ namespace detail * * @tparam T Underlying attribute data type. */ - template< typename T > + template struct AttributeWithShape { adios2::Dims shape; - T const * data; + T const *data; }; /** @@ -76,14 +76,14 @@ namespace detail char *destroy = nullptr; AttributeLocation() = delete; - AttributeLocation( adios2::Dims shape, size_t offset, Datatype dt ); + AttributeLocation(adios2::Dims shape, size_t offset, Datatype dt); - AttributeLocation( AttributeLocation const & other ) = delete; + AttributeLocation(AttributeLocation const &other) = delete; AttributeLocation & - operator=( AttributeLocation const & other ) = delete; + operator=(AttributeLocation const &other) = delete; - AttributeLocation( AttributeLocation && other ); - AttributeLocation & operator=( AttributeLocation && other ); + AttributeLocation(AttributeLocation &&other); + AttributeLocation &operator=(AttributeLocation &&other); ~AttributeLocation(); }; @@ -97,18 +97,18 @@ namespace detail * ::operator new(std::size_t) * https://en.cppreference.com/w/cpp/memory/allocator/allocate */ - std::vector< char > m_rawBuffer; - std::map< std::string, AttributeLocation > m_offsets; + std::vector m_rawBuffer; + std::map m_offsets; public: explicit PreloadAdiosAttributes() = default; - PreloadAdiosAttributes( PreloadAdiosAttributes const & other ) = delete; + PreloadAdiosAttributes(PreloadAdiosAttributes const &other) = delete; PreloadAdiosAttributes & - operator=( PreloadAdiosAttributes const & other ) = delete; + operator=(PreloadAdiosAttributes const &other) = delete; - PreloadAdiosAttributes( PreloadAdiosAttributes && other ) = default; + PreloadAdiosAttributes(PreloadAdiosAttributes &&other) = default; PreloadAdiosAttributes & - operator=( PreloadAdiosAttributes && other ) = default; + operator=(PreloadAdiosAttributes &&other) = default; /** * @brief Schedule attributes for preloading. @@ -120,8 +120,7 @@ namespace detail * @param IO * @param engine */ - void - preloadAttributes( adios2::IO & IO, adios2::Engine & engine ); + void preloadAttributes(adios2::IO &IO, adios2::Engine &engine); /** * @brief Get an attribute that has been buffered previously. @@ -133,43 +132,42 @@ namespace detail * the attribute's shape. Valid only until any non-const member * of PreloadAdiosAttributes is called. */ - template< typename T > - AttributeWithShape< T > getAttribute( std::string const & name ) const; + template + AttributeWithShape getAttribute(std::string const &name) const; - Datatype attributeType( std::string const & name ) const; + Datatype attributeType(std::string const &name) const; }; - template< typename T > - AttributeWithShape< T > - PreloadAdiosAttributes::getAttribute( std::string const & name ) const + template + AttributeWithShape + PreloadAdiosAttributes::getAttribute(std::string const &name) const { - auto it = m_offsets.find( name ); - if( it == m_offsets.end() ) + auto it = m_offsets.find(name); + if (it == m_offsets.end()) { throw std::runtime_error( - "[ADIOS2] Requested attribute not found: " + name ); + "[ADIOS2] Requested attribute not found: " + name); } - AttributeLocation const & location = it->second; - Datatype determinedDatatype = determineDatatype< T >(); - if( std::is_same< T, signed char >::value ) + AttributeLocation const &location = it->second; + Datatype determinedDatatype = determineDatatype(); + if (std::is_same::value) { // workaround: we use Datatype::CHAR to represent ADIOS2 signed char // (ADIOS2 does not have chars with unspecified signed-ness // anyway) determinedDatatype = Datatype::CHAR; } - if( location.dt != determinedDatatype ) + if (location.dt != determinedDatatype) { std::stringstream errorMsg; errorMsg << "[ADIOS2] Wrong datatype for attribute: " << name << "(location.dt=" << location.dt - << ", T=" << determineDatatype< T >() << ")"; - throw std::runtime_error( errorMsg.str() ); + << ", T=" << determineDatatype() << ")"; + throw std::runtime_error(errorMsg.str()); } - AttributeWithShape< T > res; + AttributeWithShape res; res.shape = location.shape; - res.data = - reinterpret_cast< T const * >( &m_rawBuffer[ location.offset ] ); + res.data = reinterpret_cast(&m_rawBuffer[location.offset]); return res; } } // namespace detail diff --git a/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp b/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp index 8d102247ca..ed404adb9d 100644 --- a/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp @@ -24,15 +24,15 @@ #if openPMD_HAVE_ADIOS1 +#include "openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp" +#include "openPMD/IO/ADIOS/ADIOS1FilePosition.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" -#include "openPMD/auxiliary/Filesystem.hpp" +#include "openPMD/IO/AbstractIOHandlerImpl.hpp" #include "openPMD/auxiliary/DerefDynamicCast.hpp" +#include "openPMD/auxiliary/Filesystem.hpp" #include "openPMD/auxiliary/JSON_internal.hpp" #include "openPMD/auxiliary/Memory.hpp" #include "openPMD/auxiliary/StringManip.hpp" -#include "openPMD/IO/AbstractIOHandlerImpl.hpp" -#include "openPMD/IO/ADIOS/ADIOS1Auxiliary.hpp" -#include "openPMD/IO/ADIOS/ADIOS1FilePosition.hpp" #include #include @@ -40,66 +40,84 @@ #include #include #include -#include #include #include +#include namespace openPMD { - template< typename ChildClass > // CRT pattern - class CommonADIOS1IOHandlerImpl : public AbstractIOHandlerImpl - { - public: - - void createFile(Writable*, Parameter< Operation::CREATE_FILE > const&) override; - void createPath(Writable*, Parameter< Operation::CREATE_PATH > const&) override; - void createDataset(Writable*, Parameter< Operation::CREATE_DATASET > const&) override; - void extendDataset(Writable*, Parameter< Operation::EXTEND_DATASET > const&) override; - void openFile(Writable*, Parameter< Operation::OPEN_FILE > const&) override; - void closeFile(Writable*, Parameter< Operation::CLOSE_FILE > const&) override; - void availableChunks(Writable*, Parameter< Operation::AVAILABLE_CHUNKS > &) override; - void openPath(Writable*, Parameter< Operation::OPEN_PATH > const&) override; - void openDataset(Writable*, Parameter< Operation::OPEN_DATASET > &) override; - void deleteFile(Writable*, Parameter< Operation::DELETE_FILE > const&) override; - void deletePath(Writable*, Parameter< Operation::DELETE_PATH > const&) override; - void deleteDataset(Writable*, Parameter< Operation::DELETE_DATASET > const&) override; - void deleteAttribute(Writable*, Parameter< Operation::DELETE_ATT > const&) override; - void writeDataset(Writable*, Parameter< Operation::WRITE_DATASET > const&) override; - void writeAttribute(Writable*, Parameter< Operation::WRITE_ATT > const&) override; - void readDataset(Writable*, Parameter< Operation::READ_DATASET > &) override; - void readAttribute(Writable*, Parameter< Operation::READ_ATT > &) override; - void listPaths(Writable*, Parameter< Operation::LIST_PATHS > &) override; - void listDatasets(Writable*, Parameter< Operation::LIST_DATASETS > &) override; - void listAttributes(Writable*, Parameter< Operation::LIST_ATTS > &) override; +template // CRT pattern +class CommonADIOS1IOHandlerImpl : public AbstractIOHandlerImpl +{ +public: + void + createFile(Writable *, Parameter const &) override; + void + createPath(Writable *, Parameter const &) override; + void createDataset( + Writable *, Parameter const &) override; + void extendDataset( + Writable *, Parameter const &) override; + void openFile(Writable *, Parameter const &) override; + void + closeFile(Writable *, Parameter const &) override; + void availableChunks( + Writable *, Parameter &) override; + void openPath(Writable *, Parameter const &) override; + void openDataset(Writable *, Parameter &) override; + void + deleteFile(Writable *, Parameter const &) override; + void + deletePath(Writable *, Parameter const &) override; + void deleteDataset( + Writable *, Parameter const &) override; + void deleteAttribute( + Writable *, Parameter const &) override; + void writeDataset( + Writable *, Parameter const &) override; + void writeAttribute( + Writable *, Parameter const &) override; + void readDataset(Writable *, Parameter &) override; + void readAttribute(Writable *, Parameter &) override; + void listPaths(Writable *, Parameter &) override; + void + listDatasets(Writable *, Parameter &) override; + void listAttributes(Writable *, Parameter &) override; - void close(int64_t); - void close(ADIOS_FILE*); - void flush_attribute(int64_t group, std::string const& name, Attribute const&); + void close(int64_t); + void close(ADIOS_FILE *); + void + flush_attribute(int64_t group, std::string const &name, Attribute const &); - protected: - template< typename... Args > - CommonADIOS1IOHandlerImpl( Args &&... args) - : AbstractIOHandlerImpl{ std::forward< Args >( args )... } - {} +protected: + template + CommonADIOS1IOHandlerImpl(Args &&...args) + : AbstractIOHandlerImpl{std::forward(args)...} + {} - ADIOS_READ_METHOD m_readMethod; - std::unordered_map< Writable*, std::shared_ptr< std::string > > m_filePaths; - std::unordered_map< std::shared_ptr< std::string >, int64_t > m_groups; - std::unordered_map< std::shared_ptr< std::string >, bool > m_existsOnDisk; - std::unordered_map< std::shared_ptr< std::string >, int64_t > m_openWriteFileHandles; - std::unordered_map< std::shared_ptr< std::string >, ADIOS_FILE* > m_openReadFileHandles; - std::unordered_map< ADIOS_FILE*, std::vector< ADIOS_SELECTION* > > m_scheduledReads; - std::unordered_map< int64_t, std::unordered_map< std::string, Attribute > > m_attributeWrites; - // config options - std::string m_defaultTransform; - /** - * Call this function to get adios file id for a Writable. Will create one if does not exist - * @return returns an adios file id. - */ - int64_t GetFileHandle(Writable*); + ADIOS_READ_METHOD m_readMethod; + std::unordered_map> m_filePaths; + std::unordered_map, int64_t> m_groups; + std::unordered_map, bool> m_existsOnDisk; + std::unordered_map, int64_t> + m_openWriteFileHandles; + std::unordered_map, ADIOS_FILE *> + m_openReadFileHandles; + std::unordered_map> + m_scheduledReads; + std::unordered_map> + m_attributeWrites; + // config options + std::string m_defaultTransform; + /** + * Call this function to get adios file id for a Writable. Will create one + * if does not exist + * @return returns an adios file id. + */ + int64_t GetFileHandle(Writable *); - void initJson( json::TracingJSON ); - }; // ParallelADIOS1IOHandlerImpl -} // openPMD + void initJson(json::TracingJSON); +}; // ParallelADIOS1IOHandlerImpl +} // namespace openPMD #endif diff --git a/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandler.hpp b/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandler.hpp index 9eeaefcce0..c56440c4fe 100644 --- a/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandler.hpp +++ b/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandler.hpp @@ -20,47 +20,50 @@ */ #pragma once -#include "openPMD/config.hpp" +#include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/auxiliary/Export.hpp" #include "openPMD/auxiliary/JSON_internal.hpp" -#include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/config.hpp" #include #include #include #if openPMD_HAVE_ADIOS1 -# include +#include #endif - namespace openPMD { - class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl; +class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl; - class OPENPMDAPI_EXPORT ParallelADIOS1IOHandler : public AbstractIOHandler - { - friend class ParallelADIOS1IOHandlerImpl; +class OPENPMDAPI_EXPORT ParallelADIOS1IOHandler : public AbstractIOHandler +{ + friend class ParallelADIOS1IOHandlerImpl; - public: -# if openPMD_HAVE_MPI - ParallelADIOS1IOHandler(std::string path, Access, json::TracingJSON , MPI_Comm); -# else - ParallelADIOS1IOHandler(std::string path, Access, json::TracingJSON); -# endif - ~ParallelADIOS1IOHandler() override; +public: +#if openPMD_HAVE_MPI + ParallelADIOS1IOHandler( + std::string path, Access, json::TracingJSON, MPI_Comm); +#else + ParallelADIOS1IOHandler(std::string path, Access, json::TracingJSON); +#endif + ~ParallelADIOS1IOHandler() override; - std::string backendName() const override { return "MPI_ADIOS1"; } + std::string backendName() const override + { + return "MPI_ADIOS1"; + } - std::future< void > flush() override; + std::future flush() override; #if openPMD_HAVE_ADIOS1 - void enqueue(IOTask const&) override; + void enqueue(IOTask const &) override; #endif - private: +private: #if openPMD_HAVE_ADIOS1 - std::queue< IOTask > m_setup; + std::queue m_setup; #endif - std::unique_ptr< ParallelADIOS1IOHandlerImpl > m_impl; - }; // ParallelADIOS1IOHandler + std::unique_ptr m_impl; +}; // ParallelADIOS1IOHandler -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp b/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp index 27ad5fae3b..e0c7504c90 100644 --- a/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp +++ b/include/openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp @@ -20,52 +20,52 @@ */ #pragma once -#include "openPMD/config.hpp" +#include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/auxiliary/Export.hpp" #include "openPMD/auxiliary/JSON_internal.hpp" -#include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/config.hpp" #if openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI -# include "openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp" +#include "openPMD/IO/ADIOS/CommonADIOS1IOHandler.hpp" #endif #include #include #include #if openPMD_HAVE_ADIOS1 -# include -# include +#include +#include #endif - namespace openPMD { #if openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI - class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl - : public CommonADIOS1IOHandlerImpl< ParallelADIOS1IOHandlerImpl > - { - private: - using Base_t = CommonADIOS1IOHandlerImpl< ParallelADIOS1IOHandlerImpl >; - public: - ParallelADIOS1IOHandlerImpl(AbstractIOHandler*, json::TracingJSON, MPI_Comm); - virtual ~ParallelADIOS1IOHandlerImpl(); +class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl + : public CommonADIOS1IOHandlerImpl +{ +private: + using Base_t = CommonADIOS1IOHandlerImpl; + +public: + ParallelADIOS1IOHandlerImpl( + AbstractIOHandler *, json::TracingJSON, MPI_Comm); + virtual ~ParallelADIOS1IOHandlerImpl(); - virtual void init(); + virtual void init(); - std::future< void > flush() override; + std::future flush() override; - virtual int64_t open_write(Writable *); - virtual ADIOS_FILE* open_read(std::string const & name); - int64_t initialize_group(std::string const& name); + virtual int64_t open_write(Writable *); + virtual ADIOS_FILE *open_read(std::string const &name); + int64_t initialize_group(std::string const &name); - protected: - MPI_Comm m_mpiComm; - MPI_Info m_mpiInfo; - }; // ParallelADIOS1IOHandlerImpl +protected: + MPI_Comm m_mpiComm; + MPI_Info m_mpiInfo; +}; // ParallelADIOS1IOHandlerImpl #else - class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl - { - }; // ParallelADIOS1IOHandlerImpl +class OPENPMDAPI_EXPORT ParallelADIOS1IOHandlerImpl +{}; // ParallelADIOS1IOHandlerImpl #endif -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/AbstractFilePosition.hpp b/include/openPMD/IO/AbstractFilePosition.hpp index e822b27516..166ca554c5 100644 --- a/include/openPMD/IO/AbstractFilePosition.hpp +++ b/include/openPMD/IO/AbstractFilePosition.hpp @@ -20,7 +20,6 @@ */ #pragma once - namespace openPMD { class AbstractFilePosition @@ -28,4 +27,4 @@ class AbstractFilePosition public: virtual ~AbstractFilePosition() = default; }; // AbstractFilePosition -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/AbstractIOHandler.hpp b/include/openPMD/IO/AbstractIOHandler.hpp index 9dae2ce97c..dafa896a76 100644 --- a/include/openPMD/IO/AbstractIOHandler.hpp +++ b/include/openPMD/IO/AbstractIOHandler.hpp @@ -20,13 +20,13 @@ */ #pragma once -#include "openPMD/config.hpp" #include "openPMD/IO/Access.hpp" #include "openPMD/IO/Format.hpp" #include "openPMD/IO/IOTask.hpp" +#include "openPMD/config.hpp" #if openPMD_HAVE_MPI -# include +#include #endif #include @@ -35,25 +35,26 @@ #include #include - namespace openPMD { class no_such_file_error : public std::runtime_error { public: - no_such_file_error(std::string const& what_arg) - : std::runtime_error(what_arg) - { } - virtual ~no_such_file_error() { } + no_such_file_error(std::string const &what_arg) + : std::runtime_error(what_arg) + {} + virtual ~no_such_file_error() + {} }; class unsupported_data_error : public std::runtime_error { public: - unsupported_data_error(std::string const& what_arg) - : std::runtime_error(what_arg) - { } - virtual ~unsupported_data_error() { } + unsupported_data_error(std::string const &what_arg) + : std::runtime_error(what_arg) + {} + virtual ~unsupported_data_error() + {} }; /** @@ -86,7 +87,6 @@ enum class FlushLevel : unsigned char SkeletonOnly }; - /** Interface for communicating between logical and physically persistent data. * * Input and output operations are channeled through a task queue that is @@ -100,32 +100,30 @@ class AbstractIOHandler public: #if openPMD_HAVE_MPI AbstractIOHandler(std::string path, Access at, MPI_Comm) - : directory{std::move(path)}, - m_backendAccess{at}, - m_frontendAccess{at} - { } + : directory{std::move(path)}, m_backendAccess{at}, m_frontendAccess{at} + {} #endif AbstractIOHandler(std::string path, Access at) - : directory{std::move(path)}, - m_backendAccess{at}, - m_frontendAccess{at} - { } + : directory{std::move(path)}, m_backendAccess{at}, m_frontendAccess{at} + {} virtual ~AbstractIOHandler() = default; /** Add provided task to queue according to FIFO. * - * @param iotask Task to be executed after all previously enqueued IOTasks complete. + * @param iotask Task to be executed after all previously enqueued + * IOTasks complete. */ - virtual void enqueue(IOTask const& iotask) + virtual void enqueue(IOTask const &iotask) { m_work.push(iotask); } /** Process operations in queue according to FIFO. * - * @return Future indicating the completion state of the operation for backends that decide to implement this operation asynchronously. + * @return Future indicating the completion state of the operation for + * backends that decide to implement this operation asynchronously. */ - virtual std::future< void > flush() = 0; + virtual std::future flush() = 0; /** The currently used backend */ virtual std::string backendName() const = 0; @@ -133,7 +131,7 @@ class AbstractIOHandler std::string const directory; Access const m_backendAccess; Access const m_frontendAccess; - std::queue< IOTask > m_work; + std::queue m_work; FlushLevel m_flushLevel = FlushLevel::InternalFlush; }; // AbstractIOHandler diff --git a/include/openPMD/IO/AbstractIOHandlerHelper.hpp b/include/openPMD/IO/AbstractIOHandlerHelper.hpp index 42b03be068..e6775a4757 100644 --- a/include/openPMD/IO/AbstractIOHandlerHelper.hpp +++ b/include/openPMD/IO/AbstractIOHandlerHelper.hpp @@ -20,34 +20,35 @@ */ #pragma once - -#include "openPMD/config.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/config.hpp" namespace openPMD { #if openPMD_HAVE_MPI - /** Construct an appropriate specific IOHandler for the desired IO mode that may be MPI-aware. - * - * @param path Path to root folder for all operations associated with the desired handler. - * @param access Access mode describing desired operations and permissions of the desired handler. - * @param format Format describing the IO backend of the desired handler. - * @param comm MPI communicator used for IO. - * @param options JSON-formatted option string, to be interpreted by - * the backend. - * @tparam JSON Substitute for nlohmann::json. Templated to avoid - including nlohmann::json in a .hpp file. - * @return Smart pointer to created IOHandler. - */ -template< typename JSON > -std::shared_ptr< AbstractIOHandler > -createIOHandler( +/** Construct an appropriate specific IOHandler for the desired IO mode that may + be MPI-aware. + * + * @param path Path to root folder for all operations associated with + the desired handler. + * @param access Access mode describing desired operations and + permissions of the desired handler. + * @param format Format describing the IO backend of the desired handler. + * @param comm MPI communicator used for IO. + * @param options JSON-formatted option string, to be interpreted by + * the backend. + * @tparam JSON Substitute for nlohmann::json. Templated to avoid + including nlohmann::json in a .hpp file. + * @return Smart pointer to created IOHandler. + */ +template +std::shared_ptr createIOHandler( std::string path, Access access, Format format, MPI_Comm comm, - JSON options ); + JSON options); #endif /** Construct an appropriate specific IOHandler for the desired IO mode. @@ -63,18 +64,11 @@ createIOHandler( including nlohmann::json in a .hpp file. * @return Smart pointer to created IOHandler. */ -template< typename JSON > -std::shared_ptr< AbstractIOHandler > -createIOHandler( - std::string path, - Access access, - Format format, - JSON options = JSON() ); +template +std::shared_ptr createIOHandler( + std::string path, Access access, Format format, JSON options = JSON()); // version without configuration to use in AuxiliaryTest -std::shared_ptr< AbstractIOHandler > -createIOHandler( - std::string path, - Access access, - Format format ); +std::shared_ptr +createIOHandler(std::string path, Access access, Format format); } // namespace openPMD diff --git a/include/openPMD/IO/AbstractIOHandlerImpl.hpp b/include/openPMD/IO/AbstractIOHandlerImpl.hpp index 619f0ea857..c347ad751d 100644 --- a/include/openPMD/IO/AbstractIOHandlerImpl.hpp +++ b/include/openPMD/IO/AbstractIOHandlerImpl.hpp @@ -27,7 +27,6 @@ #include #include - namespace openPMD { // class AbstractIOHandler; @@ -36,359 +35,494 @@ class Writable; class AbstractIOHandlerImpl { public: - AbstractIOHandlerImpl(AbstractIOHandler *handler) - : m_handler{handler} - { } + AbstractIOHandlerImpl(AbstractIOHandler *handler) : m_handler{handler} + {} virtual ~AbstractIOHandlerImpl() = default; - virtual std::future< void > flush() + virtual std::future flush() { using namespace auxiliary; - while( !(*m_handler).m_work.empty() ) + while (!(*m_handler).m_work.empty()) { - IOTask& i = (*m_handler).m_work.front(); + IOTask &i = (*m_handler).m_work.front(); try { - switch( i.operation ) + switch (i.operation) { using O = Operation; - case O::CREATE_FILE: - createFile(i.writable, deref_dynamic_cast< Parameter< Operation::CREATE_FILE > >(i.parameter.get())); - break; - case O::CREATE_PATH: - createPath(i.writable, deref_dynamic_cast< Parameter< O::CREATE_PATH > >(i.parameter.get())); - break; - case O::CREATE_DATASET: - createDataset(i.writable, deref_dynamic_cast< Parameter< O::CREATE_DATASET > >(i.parameter.get())); - break; - case O::EXTEND_DATASET: - extendDataset(i.writable, deref_dynamic_cast< Parameter< O::EXTEND_DATASET > >(i.parameter.get())); - break; - case O::OPEN_FILE: - openFile(i.writable, deref_dynamic_cast< Parameter< O::OPEN_FILE > >(i.parameter.get())); - break; - case O::CLOSE_FILE: - closeFile(i.writable, deref_dynamic_cast< Parameter< O::CLOSE_FILE > >(i.parameter.get())); - break; - case O::OPEN_PATH: - openPath(i.writable, deref_dynamic_cast< Parameter< O::OPEN_PATH > >(i.parameter.get())); - break; - case O::CLOSE_PATH: - closePath(i.writable, deref_dynamic_cast< Parameter< O::CLOSE_PATH > >(i.parameter.get())); - break; - case O::OPEN_DATASET: - openDataset(i.writable, deref_dynamic_cast< Parameter< O::OPEN_DATASET > >(i.parameter.get())); - break; - case O::DELETE_FILE: - deleteFile(i.writable, deref_dynamic_cast< Parameter< O::DELETE_FILE > >(i.parameter.get())); - break; - case O::DELETE_PATH: - deletePath(i.writable, deref_dynamic_cast< Parameter< O::DELETE_PATH > >(i.parameter.get())); - break; - case O::DELETE_DATASET: - deleteDataset(i.writable, deref_dynamic_cast< Parameter< O::DELETE_DATASET > >(i.parameter.get())); - break; - case O::DELETE_ATT: - deleteAttribute(i.writable, deref_dynamic_cast< Parameter< O::DELETE_ATT > >(i.parameter.get())); - break; - case O::WRITE_DATASET: - writeDataset(i.writable, deref_dynamic_cast< Parameter< O::WRITE_DATASET > >(i.parameter.get())); - break; - case O::WRITE_ATT: - writeAttribute(i.writable, deref_dynamic_cast< Parameter< O::WRITE_ATT > >(i.parameter.get())); - break; - case O::READ_DATASET: - readDataset(i.writable, deref_dynamic_cast< Parameter< O::READ_DATASET > >(i.parameter.get())); - break; - case O::GET_BUFFER_VIEW: - getBufferView(i.writable, deref_dynamic_cast< Parameter< O::GET_BUFFER_VIEW > >(i.parameter.get())); - break; - case O::READ_ATT: - readAttribute(i.writable, deref_dynamic_cast< Parameter< O::READ_ATT > >(i.parameter.get())); - break; - case O::LIST_PATHS: - listPaths(i.writable, deref_dynamic_cast< Parameter< O::LIST_PATHS > >(i.parameter.get())); - break; - case O::LIST_DATASETS: - listDatasets(i.writable, deref_dynamic_cast< Parameter< O::LIST_DATASETS > >(i.parameter.get())); - break; - case O::LIST_ATTS: - listAttributes(i.writable, deref_dynamic_cast< Parameter< O::LIST_ATTS > >(i.parameter.get())); - break; - case O::ADVANCE: - advance(i.writable, deref_dynamic_cast< Parameter< O::ADVANCE > >(i.parameter.get())); - break; - case O::AVAILABLE_CHUNKS: - availableChunks(i.writable, deref_dynamic_cast< Parameter< O::AVAILABLE_CHUNKS > >(i.parameter.get())); - break; + case O::CREATE_FILE: + createFile( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::CREATE_PATH: + createPath( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::CREATE_DATASET: + createDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::EXTEND_DATASET: + extendDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::OPEN_FILE: + openFile( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::CLOSE_FILE: + closeFile( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::OPEN_PATH: + openPath( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::CLOSE_PATH: + closePath( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::OPEN_DATASET: + openDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::DELETE_FILE: + deleteFile( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::DELETE_PATH: + deletePath( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::DELETE_DATASET: + deleteDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::DELETE_ATT: + deleteAttribute( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::WRITE_DATASET: + writeDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::WRITE_ATT: + writeAttribute( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::READ_DATASET: + readDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::GET_BUFFER_VIEW: + getBufferView( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::READ_ATT: + readAttribute( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::LIST_PATHS: + listPaths( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::LIST_DATASETS: + listDatasets( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::LIST_ATTS: + listAttributes( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::ADVANCE: + advance( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::AVAILABLE_CHUNKS: + availableChunks( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; } - } catch (...) + } + catch (...) { - std::cerr - << "[AbstractIOHandlerImpl] IO Task " - << internal::operationAsString( i.operation ) - << " failed with exception. Removing task" - << " from IO queue and passing on the exception." - << std::endl; + std::cerr << "[AbstractIOHandlerImpl] IO Task " + << internal::operationAsString(i.operation) + << " failed with exception. Removing task" + << " from IO queue and passing on the exception." + << std::endl; (*m_handler).m_work.pop(); throw; } (*m_handler).m_work.pop(); } - return std::future< void >(); + return std::future(); } - /** - * Close the file corresponding with the writable and release file handles. - * The operation should succeed in any access mode. - */ - virtual void - closeFile( Writable *, Parameter< Operation::CLOSE_FILE > const & ) = 0; + /** + * Close the file corresponding with the writable and release file handles. + * The operation should succeed in any access mode. + */ + virtual void + closeFile(Writable *, Parameter const &) = 0; - /** Advance the file/stream that this writable belongs to. - * - * If the backend is based around usage of IO steps (especially streaming - * backends), open or close an IO step. This is modeled closely after the - * step concept in ADIOS2. - * - * This task is used to implement streaming-aware semantics in the openPMD - * API by splitting data into packets that are written to and read from - * transport. - * - * IO actions up to the point of closing a step must be performed now. - * - * The advance mode is determined by parameters.mode. - * The return status code shall be stored as parameters.status. - */ - virtual void - advance( Writable *, Parameter< Operation::ADVANCE > & ) - {} + /** Advance the file/stream that this writable belongs to. + * + * If the backend is based around usage of IO steps (especially streaming + * backends), open or close an IO step. This is modeled closely after the + * step concept in ADIOS2. + * + * This task is used to implement streaming-aware semantics in the openPMD + * API by splitting data into packets that are written to and read from + * transport. + * + * IO actions up to the point of closing a step must be performed now. + * + * The advance mode is determined by parameters.mode. + * The return status code shall be stored as parameters.status. + */ + virtual void advance(Writable *, Parameter &) + {} - /** Close an openPMD group. - * - * This is an optimization-enabling task and may be ignored by backends. - * Indicates that the group will not be accessed any further. - * Especially in step-based IO mode (e.g. streaming): - * Indicates that the group corresponding with the writable needs not be held - * in a parseable state for this and upcoming IO steps, allowing for deletion - * of metadata to be sent/stored (attributes, datasets, ..). - * Should fail if the writable is not written. - * Should fail if m_handler->accessType is AccessType::READ_ONLY. - * - */ - virtual void - closePath( Writable *, Parameter< Operation::CLOSE_PATH > const & ) - {} + /** Close an openPMD group. + * + * This is an optimization-enabling task and may be ignored by backends. + * Indicates that the group will not be accessed any further. + * Especially in step-based IO mode (e.g. streaming): + * Indicates that the group corresponding with the writable needs not be + * held in a parseable state for this and upcoming IO steps, allowing for + * deletion of metadata to be sent/stored (attributes, datasets, ..). Should + * fail if the writable is not written. Should fail if m_handler->accessType + * is AccessType::READ_ONLY. + * + */ + virtual void closePath(Writable *, Parameter const &) + {} - /** Report chunks that are available for loading from the dataset represented - * by this writable. - * - * The resulting chunks should be stored into parameters.chunks. - * - */ - virtual void - availableChunks( Writable *, Parameter< Operation::AVAILABLE_CHUNKS > & ) = 0; + /** Report chunks that are available for loading from the dataset + * represented by this writable. + * + * The resulting chunks should be stored into parameters.chunks. + * + */ + virtual void + availableChunks(Writable *, Parameter &) = 0; - /** Create a new file in physical storage, possibly overriding an existing file. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The new file should be located in m_handler->directory. - * The new file should have the filename parameters.name. - * The filename should include the correct corresponding filename extension. - * Any existing file should be overwritten if m_handler->m_frontendAccess is Access::CREATE. - * The Writables file position should correspond to the root group "/" of the hierarchy. - * The Writable should be marked written when the operation completes successfully. - */ - virtual void createFile(Writable*, Parameter< Operation::CREATE_FILE > const&) = 0; - /** Create all necessary groups for a path, possibly recursively. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The path parameters.path may contain multiple levels (e.g. first/second/third/). - * The Writables file position should correspond to the complete newly created path (i.e. first/second/third/ should be assigned to the Writables file position). - * The Writable should be marked written when the operation completes successfully. - */ - virtual void createPath(Writable*, Parameter< Operation::CREATE_PATH > const&) = 0; - /** Create a new dataset of given type, extent and storage properties. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The path may contain multiple levels (e.g. group/dataset). - * The new dataset should have the name parameters.name. This name should not start or end with a slash ("/"). - * The new dataset should be of datatype parameters.dtype. - * The new dataset should have an extent of parameters.extent. - * If possible, the new dataset should be extensible. - * If possible, the new dataset should be divided into chunks with size parameters.chunkSize. - * If possible, the new dataset should be compressed according to parameters.compression. This may be format-specific. - * If possible, the new dataset should be transformed accoring to parameters.transform. This may be format-specific. - * The Writables file position should correspond to the newly created dataset. - * The Writable should be marked written when the operation completes successfully. - */ - virtual void createDataset(Writable*, Parameter< Operation::CREATE_DATASET > const&) = 0; - /** Increase the extent of an existing dataset. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The operation should fail if the dataset does not yet exist. - * The dataset should have the name parameters.name. This name should not start or end with a slash ("/"). - * The operation should fail if the new extent is not strictly large in every dimension. - * The dataset should have an extent of parameters.extent. - */ - virtual void extendDataset(Writable*, Parameter< Operation::EXTEND_DATASET > const&) = 0; - /** Open an existing file assuming it conforms to openPMD. - * - * The operation should fail if m_handler->directory is not accessible. - * The opened file should have filename parameters.name and include the correct corresponding filename extension. - * The operation should not open files more than once. - * If possible, the file should be opened with read-only permissions if m_handler->m_frontendAccess is Access::READ_ONLY. - * The Writables file position should correspond to the root group "/" of the hierarchy in the opened file. - * The Writable should be marked written when the operation completes successfully. - */ - virtual void openFile(Writable*, Parameter< Operation::OPEN_FILE > const&) = 0; - /** Open all contained groups in a path, possibly recursively. - * - * The operation should overwrite existing file positions, even when the Writable was already marked written. - * The path parameters.path may contain multiple levels (e.g. first/second/third/). This path should be relative (i.e. it should not start with a slash "/"). - * The number of levels may be zero, i.e. parameters.path may be an empty string. - * The Writables file position should correspond to the complete opened path (i.e. first/second/third/ should be assigned to the Writables file position). - * The Writable should be marked written when the operation completes successfully. - */ - virtual void openPath(Writable*, Parameter< Operation::OPEN_PATH > const&) = 0; - /** Open an existing dataset and determine its datatype and extent. - * - * The opened dataset should be located in a group below the group of the Writables parent writable->parent. - * The opened datasets name should be parameters.name. This name should not start or end with a slash ("/"). - * The opened datasets datatype should be stored in *(parameters.dtype). - * The opened datasets extent should be stored in *(parameters.extent). - * The Writables file position should correspond to the opened dataset. - * The Writable should be marked written when the operation completes successfully. - */ - virtual void openDataset(Writable*, Parameter< Operation::OPEN_DATASET > &) = 0; - /** Delete an existing file from physical storage. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The operation should pass if the Writable was not marked written. - * All handles that correspond to the file should be closed before deletion. - * The file to delete should have the filename parameters.name. - * The filename should include the correct corresponding filename extension. - * The Writables file position should be set to an invalid position (i.e. the pointer should be a nullptr). - * The Writable should be marked not written when the operation completes successfully. - */ - virtual void deleteFile(Writable*, Parameter< Operation::DELETE_FILE > const&) = 0; - /** Delete all objects within an existing path. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The operation should pass if the Writable was not marked written. - * The path parameters.path may contain multiple levels (e.g. first/second/third/). This path should be relative (i.e. it should not start with a slash "/"). It may also contain the current group ".". - * All groups and datasets starting from the path should not be accessible in physical storage after the operation completes successfully. - * The Writables file position should be set to an invalid position (i.e. the pointer should be a nullptr). - * The Writable should be marked not written when the operation completes successfully. - */ - virtual void deletePath(Writable*, Parameter< Operation::DELETE_PATH > const&) = 0; - /** Delete an existing dataset. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The operation should pass if the Writable was not marked written. - * The dataset should have the name parameters.name. This name should not start or end with a slash ("/"). It may also contain the current dataset ".". - * The dataset should not be accessible in physical storage after the operation completes successfully. - * The Writables file position should be set to an invalid position (i.e. the pointer should be a nullptr). - * The Writable should be marked not written when the operation completes successfully. - */ - virtual void deleteDataset(Writable*, Parameter< Operation::DELETE_DATASET > const&) = 0; - /** Delete an existing attribute. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The operation should pass if the Writable was not marked written. - * The attribute should be associated with the Writable and have the name parameters.name before deletion. - * The attribute should not be accessible in physical storage after the operation completes successfully. - */ - virtual void deleteAttribute(Writable*, Parameter< Operation::DELETE_ATT > const&) = 0; - /** Write a chunk of data into an existing dataset. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The dataset should be associated with the Writable. - * The operation should fail if the dataset does not exist. - * The operation should fail if the chunk extent parameters.extent is not smaller or equals in every dimension. - * The operation should fail if chunk positions parameters.offset+parameters.extent do not reside inside the dataset. - * The dataset should match the dataype parameters.dtype. - * The data parameters.data is a cast-to-void pointer to a flattened version of the chunk data. It should be re-cast to the provided datatype. The chunk is stored row-major. - * The region of the chunk should be written to physical storage after the operation completes successfully. - */ - virtual void writeDataset(Writable*, Parameter< Operation::WRITE_DATASET > const&) = 0; + /** Create a new file in physical storage, possibly overriding an existing + * file. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The new file should be located in + * m_handler->directory. The new file should have the filename + * parameters.name. The filename should include the correct corresponding + * filename extension. Any existing file should be overwritten if + * m_handler->m_frontendAccess is Access::CREATE. The Writables file + * position should correspond to the root group "/" of the hierarchy. The + * Writable should be marked written when the operation completes + * successfully. + */ + virtual void + createFile(Writable *, Parameter const &) = 0; + /** Create all necessary groups for a path, possibly recursively. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The path parameters.path may contain multiple levels + * (e.g. first/second/third/). The Writables file position should correspond + * to the complete newly created path (i.e. first/second/third/ should be + * assigned to the Writables file position). The Writable should be marked + * written when the operation completes successfully. + */ + virtual void + createPath(Writable *, Parameter const &) = 0; + /** Create a new dataset of given type, extent and storage properties. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The path may contain multiple levels (e.g. + * group/dataset). The new dataset should have the name parameters.name. + * This name should not start or end with a slash ("/"). The new dataset + * should be of datatype parameters.dtype. The new dataset should have an + * extent of parameters.extent. If possible, the new dataset should be + * extensible. If possible, the new dataset should be divided into chunks + * with size parameters.chunkSize. If possible, the new dataset should be + * compressed according to parameters.compression. This may be + * format-specific. If possible, the new dataset should be transformed + * accoring to parameters.transform. This may be format-specific. The + * Writables file position should correspond to the newly created dataset. + * The Writable should be marked written when the operation completes + * successfully. + */ + virtual void + createDataset(Writable *, Parameter const &) = 0; + /** Increase the extent of an existing dataset. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The operation should fail if the dataset does not yet + * exist. The dataset should have the name parameters.name. This name should + * not start or end with a slash ("/"). The operation should fail if the new + * extent is not strictly large in every dimension. The dataset should have + * an extent of parameters.extent. + */ + virtual void + extendDataset(Writable *, Parameter const &) = 0; + /** Open an existing file assuming it conforms to openPMD. + * + * The operation should fail if m_handler->directory is not accessible. + * The opened file should have filename parameters.name and include the + * correct corresponding filename extension. The operation should not open + * files more than once. If possible, the file should be opened with + * read-only permissions if m_handler->m_frontendAccess is + * Access::READ_ONLY. The Writables file position should correspond to the + * root group "/" of the hierarchy in the opened file. The Writable should + * be marked written when the operation completes successfully. + */ + virtual void + openFile(Writable *, Parameter const &) = 0; + /** Open all contained groups in a path, possibly recursively. + * + * The operation should overwrite existing file positions, even when the + * Writable was already marked written. The path parameters.path may contain + * multiple levels (e.g. first/second/third/). This path should be relative + * (i.e. it should not start with a slash "/"). The number of levels may be + * zero, i.e. parameters.path may be an empty string. The Writables file + * position should correspond to the complete opened path (i.e. + * first/second/third/ should be assigned to the Writables file position). + * The Writable should be marked written when the operation completes + * successfully. + */ + virtual void + openPath(Writable *, Parameter const &) = 0; + /** Open an existing dataset and determine its datatype and extent. + * + * The opened dataset should be located in a group below the group of the + * Writables parent writable->parent. The opened datasets name should be + * parameters.name. This name should not start or end with a slash ("/"). + * The opened datasets datatype should be stored in *(parameters.dtype). + * The opened datasets extent should be stored in *(parameters.extent). + * The Writables file position should correspond to the opened dataset. + * The Writable should be marked written when the operation completes + * successfully. + */ + virtual void + openDataset(Writable *, Parameter &) = 0; + /** Delete an existing file from physical storage. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The operation should pass if the Writable was not + * marked written. All handles that correspond to the file should be closed + * before deletion. The file to delete should have the filename + * parameters.name. The filename should include the correct corresponding + * filename extension. The Writables file position should be set to an + * invalid position (i.e. the pointer should be a nullptr). The Writable + * should be marked not written when the operation completes successfully. + */ + virtual void + deleteFile(Writable *, Parameter const &) = 0; + /** Delete all objects within an existing path. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The operation should pass if the Writable was not + * marked written. The path parameters.path may contain multiple levels + * (e.g. first/second/third/). This path should be relative (i.e. it should + * not start with a slash "/"). It may also contain the current group ".". + * All groups and datasets starting from the path should not be accessible + * in physical storage after the operation completes successfully. The + * Writables file position should be set to an invalid position (i.e. the + * pointer should be a nullptr). The Writable should be marked not written + * when the operation completes successfully. + */ + virtual void + deletePath(Writable *, Parameter const &) = 0; + /** Delete an existing dataset. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The operation should pass if the Writable was not + * marked written. The dataset should have the name parameters.name. This + * name should not start or end with a slash ("/"). It may also contain the + * current dataset ".". The dataset should not be accessible in physical + * storage after the operation completes successfully. The Writables file + * position should be set to an invalid position (i.e. the pointer should be + * a nullptr). The Writable should be marked not written when the operation + * completes successfully. + */ + virtual void + deleteDataset(Writable *, Parameter const &) = 0; + /** Delete an existing attribute. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The operation should pass if the Writable was not + * marked written. The attribute should be associated with the Writable and + * have the name parameters.name before deletion. The attribute should not + * be accessible in physical storage after the operation completes + * successfully. + */ + virtual void + deleteAttribute(Writable *, Parameter const &) = 0; + /** Write a chunk of data into an existing dataset. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The dataset should be associated with the Writable. + * The operation should fail if the dataset does not exist. + * The operation should fail if the chunk extent parameters.extent is not + * smaller or equals in every dimension. The operation should fail if chunk + * positions parameters.offset+parameters.extent do not reside inside the + * dataset. The dataset should match the dataype parameters.dtype. The data + * parameters.data is a cast-to-void pointer to a flattened version of the + * chunk data. It should be re-cast to the provided datatype. The chunk is + * stored row-major. The region of the chunk should be written to physical + * storage after the operation completes successfully. + */ + virtual void + writeDataset(Writable *, Parameter const &) = 0; - /** Get a view into a dataset buffer that can be filled by a user. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The dataset should be associated with the Writable. - * The operation should fail if the dataset does not exist. - * The operation should fail if the chunk extent parameters.extent is not smaller or equals in every dimension. - * The operation should fail if chunk positions parameters.offset+parameters.extent do not reside inside the dataset. - * The dataset should match the dataype parameters.dtype. - * The buffer should be stored as a cast-to-char pointer to a flattened version of the backend buffer in parameters.out->ptr. The chunk is stored row-major. - * The buffer's content should be written to storage not before the next call to AbstractIOHandler::flush where AbstractIOHandler::m_flushLevel == FlushLevel::InternalFlush. - * The precise time of data consumption is defined by the backend: - * * Data written to the returned buffer should be consumed not earlier than the next call to AbstractIOHandler::flush where AbstractIOHandler::m_flushLevel == FlushLevel::InternalFlush. - * * Data should be consumed not later than the next Operation::ADVANCE task where parameter.mode == AdvanceMode::ENDSTEP. - * - * This IOTask is optional and should either (1) not be implemented by a backend at all or (2) be implemented as indicated above and set parameters.out->backendManagedBuffer = true. - */ - virtual void getBufferView(Writable*, Parameter< Operation::GET_BUFFER_VIEW >& parameters) - { + /** Get a view into a dataset buffer that can be filled by a user. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The dataset should be associated with the Writable. + * The operation should fail if the dataset does not exist. + * The operation should fail if the chunk extent parameters.extent is not + * smaller or equals in every dimension. The operation should fail if chunk + * positions parameters.offset+parameters.extent do not reside inside the + * dataset. The dataset should match the dataype parameters.dtype. The + * buffer should be stored as a cast-to-char pointer to a flattened version + * of the backend buffer in parameters.out->ptr. The chunk is stored + * row-major. The buffer's content should be written to storage not before + * the next call to AbstractIOHandler::flush where + * AbstractIOHandler::m_flushLevel == FlushLevel::InternalFlush. The precise + * time of data consumption is defined by the backend: + * * Data written to the returned buffer should be consumed not earlier than + * the next call to AbstractIOHandler::flush where + * AbstractIOHandler::m_flushLevel == FlushLevel::InternalFlush. + * * Data should be consumed not later than the next Operation::ADVANCE task + * where parameter.mode == AdvanceMode::ENDSTEP. + * + * This IOTask is optional and should either (1) not be implemented by a + * backend at all or (2) be implemented as indicated above and set + * parameters.out->backendManagedBuffer = true. + */ + virtual void + getBufferView(Writable *, Parameter ¶meters) + { // default implementation: operation unsupported by backend parameters.out->backendManagedBuffer = false; - } - /** Create a single attribute and fill the value, possibly overwriting an existing attribute. - * - * The operation should fail if m_handler->m_frontendAccess is Access::READ_ONLY. - * The attribute should have the name parameters.name. This name should not contain a slash ("/"). - * The attribute should be of datatype parameters.dtype. - * Any existing attribute with the same name should be overwritten. If possible, only the value should be changed if the datatype stays the same. - * The attribute should be written to physical storage after the operation completes successfully. - * All datatypes of Datatype should be supported in a type-safe way. - */ - virtual void writeAttribute(Writable*, Parameter< Operation::WRITE_ATT > const&) = 0; - /** Read a chunk of data from an existing dataset. - * - * The dataset should be associated with the Writable. - * The operation should fail if the dataset does not exist. - * The operation should fail if the chunk extent parameters.extent is not smaller or equals in every dimension. - * The operation should fail if chunk positions parameters.offset+parameters.extent do not reside inside the dataset. - * The dataset should match the dataype parameters.dtype. - * The data parameters.data should be a cast-to-void pointer to a flattened version of the chunk data. The chunk should be stored row-major. - * The region of the chunk should be written to the location indicated by the pointer after the operation completes successfully. - */ - virtual void readDataset(Writable*, Parameter< Operation::READ_DATASET > &) = 0; - /** Read the value of an existing attribute. - * - * The operation should fail if the Writable was not marked written. - * The operation should fail if the attribute does not exist. - * The attribute should be associated with the Writable and have the name parameters.name. This name should not contain a slash ("/"). - * The attribute datatype should be stored in the location indicated by the pointer parameters.dtype. - * The attribute value should be stored as a generic Variant::resource in the location indicated by the pointer parameters.resource. - * All datatypes of Datatype should be supported in a type-safe way. - */ - virtual void readAttribute(Writable*, Parameter< Operation::READ_ATT > &) = 0; - /** List all paths/sub-groups inside a group, non-recursively. - * - * The operation should fail if the Writable was not marked written. - * The operation should fail if the Writable is not a group. - * The list of group names should be stored in the location indicated by the pointer parameters.paths. - */ - virtual void listPaths(Writable*, Parameter< Operation::LIST_PATHS > &) = 0; - /** List all datasets inside a group, non-recursively. - * - * The operation should fail if the Writable was not marked written. - * The operation should fail if the Writable is not a group. - * The list of dataset names should be stored in the location indicated by the pointer parameters.datasets. - */ - virtual void listDatasets(Writable*, Parameter< Operation::LIST_DATASETS > &) = 0; - /** List all attributes associated with an object. - * - * The operation should fail if the Writable was not marked written. - * The attribute should be associated with the Writable. - * The list of attribute names should be stored in the location indicated by the pointer parameters.attributes. - */ - virtual void listAttributes(Writable*, Parameter< Operation::LIST_ATTS > &) = 0; + } + /** Create a single attribute and fill the value, possibly overwriting an + * existing attribute. + * + * The operation should fail if m_handler->m_frontendAccess is + * Access::READ_ONLY. The attribute should have the name parameters.name. + * This name should not contain a slash ("/"). The attribute should be of + * datatype parameters.dtype. Any existing attribute with the same name + * should be overwritten. If possible, only the value should be changed if + * the datatype stays the same. The attribute should be written to physical + * storage after the operation completes successfully. All datatypes of + * Datatype should be supported in a type-safe way. + */ + virtual void + writeAttribute(Writable *, Parameter const &) = 0; + /** Read a chunk of data from an existing dataset. + * + * The dataset should be associated with the Writable. + * The operation should fail if the dataset does not exist. + * The operation should fail if the chunk extent parameters.extent is not + * smaller or equals in every dimension. The operation should fail if chunk + * positions parameters.offset+parameters.extent do not reside inside the + * dataset. The dataset should match the dataype parameters.dtype. The data + * parameters.data should be a cast-to-void pointer to a flattened version + * of the chunk data. The chunk should be stored row-major. The region of + * the chunk should be written to the location indicated by the pointer + * after the operation completes successfully. + */ + virtual void + readDataset(Writable *, Parameter &) = 0; + /** Read the value of an existing attribute. + * + * The operation should fail if the Writable was not marked written. + * The operation should fail if the attribute does not exist. + * The attribute should be associated with the Writable and have the name + * parameters.name. This name should not contain a slash ("/"). The + * attribute datatype should be stored in the location indicated by the + * pointer parameters.dtype. The attribute value should be stored as a + * generic Variant::resource in the location indicated by the pointer + * parameters.resource. All datatypes of Datatype should be supported in a + * type-safe way. + */ + virtual void + readAttribute(Writable *, Parameter &) = 0; + /** List all paths/sub-groups inside a group, non-recursively. + * + * The operation should fail if the Writable was not marked written. + * The operation should fail if the Writable is not a group. + * The list of group names should be stored in the location indicated by the + * pointer parameters.paths. + */ + virtual void listPaths(Writable *, Parameter &) = 0; + /** List all datasets inside a group, non-recursively. + * + * The operation should fail if the Writable was not marked written. + * The operation should fail if the Writable is not a group. + * The list of dataset names should be stored in the location indicated by + * the pointer parameters.datasets. + */ + virtual void + listDatasets(Writable *, Parameter &) = 0; + /** List all attributes associated with an object. + * + * The operation should fail if the Writable was not marked written. + * The attribute should be associated with the Writable. + * The list of attribute names should be stored in the location indicated by + * the pointer parameters.attributes. + */ + virtual void + listAttributes(Writable *, Parameter &) = 0; - AbstractIOHandler* m_handler; -}; //AbstractIOHandlerImpl -} // openPMD + AbstractIOHandler *m_handler; +}; // AbstractIOHandlerImpl +} // namespace openPMD diff --git a/include/openPMD/IO/AbstractIOHandlerImplCommon.hpp b/include/openPMD/IO/AbstractIOHandlerImplCommon.hpp index a7ddae605b..efa8b238ae 100644 --- a/include/openPMD/IO/AbstractIOHandlerImplCommon.hpp +++ b/include/openPMD/IO/AbstractIOHandlerImplCommon.hpp @@ -21,7 +21,6 @@ #pragma once - #include "openPMD/IO/AbstractFilePosition.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IO/AbstractIOHandlerImpl.hpp" @@ -32,26 +31,24 @@ #include #include - - namespace openPMD { -template < typename FilePositionType = AbstractFilePosition > +template class AbstractIOHandlerImplCommon : public AbstractIOHandlerImpl { // friend struct detail::BufferedActions; public: - explicit AbstractIOHandlerImplCommon( AbstractIOHandler * handler ); + explicit AbstractIOHandlerImplCommon(AbstractIOHandler *handler); - ~AbstractIOHandlerImplCommon( ) override; + ~AbstractIOHandlerImplCommon() override; protected: /** * map each Writable to its associated file contains only the filename, * without the OS path */ - std::unordered_map< Writable *, InvalidatableFile > m_files; - std::unordered_set< InvalidatableFile > m_dirty; + std::unordered_map m_files; + std::unordered_set m_dirty; enum PossiblyExisting { @@ -60,33 +57,35 @@ class AbstractIOHandlerImplCommon : public AbstractIOHandlerImpl PE_NewlyCreated, }; - std::tuple< InvalidatableFile, - std::unordered_map< Writable *, InvalidatableFile >::iterator, - bool - > getPossiblyExisting( std::string file ); + std::tuple< + InvalidatableFile, + std::unordered_map::iterator, + bool> + getPossiblyExisting(std::string file); - void associateWithFile( Writable * writable, InvalidatableFile file ); + void associateWithFile(Writable *writable, InvalidatableFile file); /** * * @return Full OS path of the file. */ - std::string fullPath( InvalidatableFile ); + std::string fullPath(InvalidatableFile); - std::string fullPath( std::string ); + std::string fullPath(std::string); /** * Get the writable's containing file. * @param writable The writable whose containing file to figure out. * @param preferParentFile If true, the file is set to the parent's file if * present. Otherwise, the parent file is only considered if no own file - * is defined. This is usually needed when switching between iterations when opening paths. + * is defined. This is usually needed when switching between iterations + * when opening paths. * @return The containing file of the writable. If its parent is associated * with another file, update the writable to match its parent and return * the refreshed file. */ InvalidatableFile - refreshFileFromParent( Writable * writable, bool preferParentFile ); + refreshFileFromParent(Writable *writable, bool preferParentFile); /** * Figure out the file position of the writable. @@ -95,8 +94,8 @@ class AbstractIOHandlerImplCommon : public AbstractIOHandlerImpl * @param write Whether to refresh the writable's file position. * @return The current file position. */ - std::shared_ptr< FilePositionType > - setAndGetFilePosition( Writable * writable, bool write = true ); + std::shared_ptr + setAndGetFilePosition(Writable *writable, bool write = true); /** * Figure out the file position of the writable and extend it. @@ -104,54 +103,53 @@ class AbstractIOHandlerImplCommon : public AbstractIOHandlerImpl * @param extend The extension string. * @return The current file position. */ - virtual std::shared_ptr< FilePositionType > - setAndGetFilePosition( Writable * writable, std::string extend ); + virtual std::shared_ptr + setAndGetFilePosition(Writable *writable, std::string extend); /** * @return A string representation of the file position. */ virtual std::string - filePositionToString( std::shared_ptr< FilePositionType > ) = 0; + filePositionToString(std::shared_ptr) = 0; /** * @return A new file position that is extended with the given string. */ - virtual std::shared_ptr< FilePositionType > - extendFilePosition( std::shared_ptr< FilePositionType > const &, - std::string ) = 0; + virtual std::shared_ptr extendFilePosition( + std::shared_ptr const &, std::string) = 0; }; -template < typename FilePositionType > -AbstractIOHandlerImplCommon< FilePositionType >::AbstractIOHandlerImplCommon( - AbstractIOHandler * handler ) -: AbstractIOHandlerImpl{handler} -{ -} - - -template < typename FilePositionType > -AbstractIOHandlerImplCommon< - FilePositionType >::~AbstractIOHandlerImplCommon( ) = default; - - -template < typename FilePositionType > -std::tuple< InvalidatableFile, - std::unordered_map< Writable *, InvalidatableFile >::iterator, - bool > -AbstractIOHandlerImplCommon< FilePositionType >::getPossiblyExisting( - std::string file ) +template +AbstractIOHandlerImplCommon::AbstractIOHandlerImplCommon( + AbstractIOHandler *handler) + : AbstractIOHandlerImpl{handler} +{} + +template +AbstractIOHandlerImplCommon::~AbstractIOHandlerImplCommon() = + default; + +template +std::tuple< + InvalidatableFile, + std::unordered_map::iterator, + bool> +AbstractIOHandlerImplCommon::getPossiblyExisting( + std::string file) { auto it = std::find_if( - m_files.begin( ), m_files.end( ), - [file]( std::unordered_map< - Writable *, InvalidatableFile >::value_type const & entry ) { - return *entry.second == file && entry.second.valid( ); - } ); + m_files.begin(), + m_files.end(), + [file]( + std::unordered_map::value_type const + &entry) { + return *entry.second == file && entry.second.valid(); + }); bool newlyCreated; InvalidatableFile name; - if ( it == m_files.end( ) ) + if (it == m_files.end()) { name = file; newlyCreated = true; @@ -163,33 +161,30 @@ AbstractIOHandlerImplCommon< FilePositionType >::getPossiblyExisting( } return std::tuple< InvalidatableFile, - std::unordered_map< Writable *, InvalidatableFile >::iterator, bool >( - std::move( name ), it, newlyCreated ); + std::unordered_map::iterator, + bool>(std::move(name), it, newlyCreated); } - -template < typename FilePositionType > -void AbstractIOHandlerImplCommon< FilePositionType >::associateWithFile( - Writable * writable, InvalidatableFile file ) +template +void AbstractIOHandlerImplCommon::associateWithFile( + Writable *writable, InvalidatableFile file) { // make sure to overwrite - m_files[writable] = std::move( file ); + m_files[writable] = std::move(file); } - -template < typename FilePositionType > -std::string AbstractIOHandlerImplCommon< FilePositionType >::fullPath( - InvalidatableFile fileName ) +template +std::string AbstractIOHandlerImplCommon::fullPath( + InvalidatableFile fileName) { - return fullPath( *fileName ); + return fullPath(*fileName); } - -template < typename FilePositionType > -std::string AbstractIOHandlerImplCommon< FilePositionType >::fullPath( - std::string fileName ) +template +std::string +AbstractIOHandlerImplCommon::fullPath(std::string fileName) { - if ( auxiliary::ends_with( m_handler->directory, "/" ) ) + if (auxiliary::ends_with(m_handler->directory, "/")) { return m_handler->directory + fileName; } @@ -199,77 +194,76 @@ std::string AbstractIOHandlerImplCommon< FilePositionType >::fullPath( } } -template< typename FilePositionType > +template InvalidatableFile -AbstractIOHandlerImplCommon< FilePositionType >::refreshFileFromParent( - Writable * writable, bool preferParentFile ) +AbstractIOHandlerImplCommon::refreshFileFromParent( + Writable *writable, bool preferParentFile) { - auto getFileFromParent = [ writable, this ]() { - auto file = m_files.find( writable->parent )->second; - associateWithFile( writable, file ); + auto getFileFromParent = [writable, this]() { + auto file = m_files.find(writable->parent)->second; + associateWithFile(writable, file); return file; }; - if( preferParentFile && writable->parent ) + if (preferParentFile && writable->parent) { return getFileFromParent(); } else { - auto it = m_files.find( writable ); - if( it != m_files.end() ) + auto it = m_files.find(writable); + if (it != m_files.end()) { - return m_files.find( writable )->second; + return m_files.find(writable)->second; } - else if( writable->parent ) + else if (writable->parent) { return getFileFromParent(); } else { throw std::runtime_error( - "Internal error: Root object must be opened explicitly." ); + "Internal error: Root object must be opened explicitly."); } } } -template< typename FilePositionType > -std::shared_ptr< FilePositionType > -AbstractIOHandlerImplCommon< FilePositionType >::setAndGetFilePosition( - Writable * writable, bool write ) +template +std::shared_ptr +AbstractIOHandlerImplCommon::setAndGetFilePosition( + Writable *writable, bool write) { - std::shared_ptr< AbstractFilePosition > res; + std::shared_ptr res; - if ( writable->abstractFilePosition ) + if (writable->abstractFilePosition) { res = writable->abstractFilePosition; } - else if ( writable->parent ) + else if (writable->parent) { res = writable->parent->abstractFilePosition; } else { // we are root - res = std::make_shared< FilePositionType >( ); + res = std::make_shared(); } - if ( write ) + if (write) { writable->abstractFilePosition = res; } - return std::dynamic_pointer_cast< FilePositionType >( res ); + return std::dynamic_pointer_cast(res); } - -template < typename FilePositionType > -std::shared_ptr< FilePositionType > -AbstractIOHandlerImplCommon< FilePositionType >::setAndGetFilePosition( - Writable * writable, std::string extend ) +template +std::shared_ptr +AbstractIOHandlerImplCommon::setAndGetFilePosition( + Writable *writable, std::string extend) { - if ( !auxiliary::starts_with( extend, '/' ) ) + if (!auxiliary::starts_with(extend, '/')) { extend = "/" + extend; } - auto oldPos = setAndGetFilePosition( writable, false ); - auto res = extendFilePosition( oldPos, extend ); + auto oldPos = setAndGetFilePosition(writable, false); + auto res = extendFilePosition(oldPos, extend); writable->abstractFilePosition = res; return res; diff --git a/include/openPMD/IO/Access.hpp b/include/openPMD/IO/Access.hpp index 272ef80e80..93ba662241 100644 --- a/include/openPMD/IO/Access.hpp +++ b/include/openPMD/IO/Access.hpp @@ -20,24 +20,25 @@ */ #pragma once - namespace openPMD { - /** File access mode to use during IO. - */ - enum class Access - { - READ_ONLY, //!< open series as read-only, fails if series is not found - READ_WRITE, //!< open existing series as writable - CREATE //!< create new series and truncate existing (files) - }; // Access - +/** File access mode to use during IO. + */ +enum class Access +{ + READ_ONLY, //!< open series as read-only, fails if series is not found + READ_WRITE, //!< open existing series as writable + CREATE //!< create new series and truncate existing (files) +}; // Access - // deprecated name (used prior to 0.12.0) - // note: "using old [[deprecated(msg)]] = new;" is still badly supported, thus using typedef - // https://en.cppreference.com/w/cpp/language/attributes/deprecated - // - NVCC < 11.0.167 works but noisy "warning: attribute does not apply to any entity" - // Nvidia bug report: 2991260 - // - Intel C++ 19.1.0.20200306 bug report: 04651484 - [[deprecated("AccessType is deprecated, use Access instead.")]] typedef Access AccessType; +// deprecated name (used prior to 0.12.0) +// note: "using old [[deprecated(msg)]] = new;" is still badly supported, thus +// using typedef +// https://en.cppreference.com/w/cpp/language/attributes/deprecated +// - NVCC < 11.0.167 works but noisy "warning: attribute does not apply to any +// entity" +// Nvidia bug report: 2991260 +// - Intel C++ 19.1.0.20200306 bug report: 04651484 +[[deprecated("AccessType is deprecated, use Access instead.")]] typedef Access + AccessType; } // namespace openPMD diff --git a/include/openPMD/IO/DummyIOHandler.hpp b/include/openPMD/IO/DummyIOHandler.hpp index 50ad87cdd5..8a84bb0919 100644 --- a/include/openPMD/IO/DummyIOHandler.hpp +++ b/include/openPMD/IO/DummyIOHandler.hpp @@ -24,25 +24,26 @@ #include "openPMD/IO/Access.hpp" #include "openPMD/IO/IOTask.hpp" -#include #include - +#include namespace openPMD { - /** Dummy handler without any IO operations. - */ - class DummyIOHandler : public AbstractIOHandler - { - public: - DummyIOHandler(std::string, Access); - ~DummyIOHandler() override = default; +/** Dummy handler without any IO operations. + */ +class DummyIOHandler : public AbstractIOHandler +{ +public: + DummyIOHandler(std::string, Access); + ~DummyIOHandler() override = default; - /** No-op consistent with the IOHandler interface to enable library use without IO. - */ - void enqueue(IOTask const&) override; - /** No-op consistent with the IOHandler interface to enable library use without IO. - */ - std::future< void > flush() override; - }; // DummyIOHandler + /** No-op consistent with the IOHandler interface to enable library use + * without IO. + */ + void enqueue(IOTask const &) override; + /** No-op consistent with the IOHandler interface to enable library use + * without IO. + */ + std::future flush() override; +}; // DummyIOHandler } // namespace openPMD diff --git a/include/openPMD/IO/Format.hpp b/include/openPMD/IO/Format.hpp index 7b277b72b1..993f7bae90 100644 --- a/include/openPMD/IO/Format.hpp +++ b/include/openPMD/IO/Format.hpp @@ -22,33 +22,32 @@ #include - namespace openPMD { - /** File format to use during IO. - */ - enum class Format - { - HDF5, - ADIOS1, - ADIOS2, - ADIOS2_SST, - ADIOS2_SSC, - JSON, - DUMMY - }; +/** File format to use during IO. + */ +enum class Format +{ + HDF5, + ADIOS1, + ADIOS2, + ADIOS2_SST, + ADIOS2_SSC, + JSON, + DUMMY +}; - /** Determine the storage format of a Series from the used filename extension. - * - * @param filename string containing the filename. - * @return Format that best fits the filename extension. - */ - Format determineFormat(std::string const& filename); +/** Determine the storage format of a Series from the used filename extension. + * + * @param filename string containing the filename. + * @return Format that best fits the filename extension. + */ +Format determineFormat(std::string const &filename); - /** Determine the default filename suffix for a given storage format. - * - * @param f File format to determine suffix for. - * @return String containing the default filename suffix - */ - std::string suffix(Format f); -} // openPMD +/** Determine the default filename suffix for a given storage format. + * + * @param f File format to determine suffix for. + * @return String containing the default filename suffix + */ +std::string suffix(Format f); +} // namespace openPMD diff --git a/include/openPMD/IO/HDF5/HDF5Auxiliary.hpp b/include/openPMD/IO/HDF5/HDF5Auxiliary.hpp index 4785cf4f66..da7ff2f68f 100644 --- a/include/openPMD/IO/HDF5/HDF5Auxiliary.hpp +++ b/include/openPMD/IO/HDF5/HDF5Auxiliary.hpp @@ -20,9 +20,9 @@ */ #pragma once -#include "openPMD/config.hpp" #include "openPMD/backend/Attribute.hpp" #include "openPMD/backend/Writable.hpp" +#include "openPMD/config.hpp" #include @@ -31,42 +31,36 @@ #include #include - namespace openPMD { - struct GetH5DataType - { - std::unordered_map< std::string, hid_t > m_userTypes; +struct GetH5DataType +{ + std::unordered_map m_userTypes; - GetH5DataType( std::unordered_map< std::string, hid_t > userTypes ) - : m_userTypes{ std::move(userTypes) } - { - } + GetH5DataType(std::unordered_map userTypes) + : m_userTypes{std::move(userTypes)} + {} - hid_t - operator()(Attribute const &att); - }; + hid_t operator()(Attribute const &att); +}; - hid_t - getH5DataSpace(Attribute const& att); +hid_t getH5DataSpace(Attribute const &att); - std::string - concrete_h5_file_position(Writable* w); +std::string concrete_h5_file_position(Writable *w); - /** Computes the chunk dimensions for a dataset. - * - * Chunk dimensions are selected to create chunks sizes between - * 64KByte and 4MB. Smaller chunk sizes are inefficient due to overhead, - * larger chunks do not map well to file system blocks and striding. - * - * Chunk dimensions are less or equal to dataset dimensions and do - * not need to be a factor of the respective dataset dimension. - * - * @param[in] dims dimensions of dataset to get chunk dims for - * @param[in] typeSize size of each element in bytes - * @return array for resulting chunk dimensions - */ - std::vector< hsize_t > - getOptimalChunkDims( std::vector< hsize_t > const dims, - size_t const typeSize ); +/** Computes the chunk dimensions for a dataset. + * + * Chunk dimensions are selected to create chunks sizes between + * 64KByte and 4MB. Smaller chunk sizes are inefficient due to overhead, + * larger chunks do not map well to file system blocks and striding. + * + * Chunk dimensions are less or equal to dataset dimensions and do + * not need to be a factor of the respective dataset dimension. + * + * @param[in] dims dimensions of dataset to get chunk dims for + * @param[in] typeSize size of each element in bytes + * @return array for resulting chunk dimensions + */ +std::vector +getOptimalChunkDims(std::vector const dims, size_t const typeSize); } // namespace openPMD diff --git a/include/openPMD/IO/HDF5/HDF5FilePosition.hpp b/include/openPMD/IO/HDF5/HDF5FilePosition.hpp index 3537e3252e..eb615907b7 100644 --- a/include/openPMD/IO/HDF5/HDF5FilePosition.hpp +++ b/include/openPMD/IO/HDF5/HDF5FilePosition.hpp @@ -22,15 +22,13 @@ #include "openPMD/IO/AbstractFilePosition.hpp" - namespace openPMD { struct HDF5FilePosition : public AbstractFilePosition { - HDF5FilePosition(std::string const& s) - : location{s} - { } + HDF5FilePosition(std::string const &s) : location{s} + {} std::string location; -}; //HDF5FilePosition -} // openPMD +}; // HDF5FilePosition +} // namespace openPMD diff --git a/include/openPMD/IO/HDF5/HDF5IOHandler.hpp b/include/openPMD/IO/HDF5/HDF5IOHandler.hpp index 77dbf26c37..9c2433a7da 100644 --- a/include/openPMD/IO/HDF5/HDF5IOHandler.hpp +++ b/include/openPMD/IO/HDF5/HDF5IOHandler.hpp @@ -20,14 +20,13 @@ */ #pragma once -#include "openPMD/auxiliary/JSON_internal.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/auxiliary/JSON_internal.hpp" #include #include #include - namespace openPMD { class HDF5IOHandlerImpl; @@ -38,11 +37,14 @@ class HDF5IOHandler : public AbstractIOHandler HDF5IOHandler(std::string path, Access, json::TracingJSON config); ~HDF5IOHandler() override; - std::string backendName() const override { return "HDF5"; } + std::string backendName() const override + { + return "HDF5"; + } - std::future< void > flush() override; + std::future flush() override; private: - std::unique_ptr< HDF5IOHandlerImpl > m_impl; + std::unique_ptr m_impl; }; // HDF5IOHandler -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp b/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp index 92d39ceabb..adee717ff0 100644 --- a/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp +++ b/include/openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp @@ -22,78 +22,89 @@ #include "openPMD/config.hpp" #if openPMD_HAVE_HDF5 -# include "openPMD/IO/AbstractIOHandlerImpl.hpp" +#include "openPMD/IO/AbstractIOHandlerImpl.hpp" -# include "openPMD/auxiliary/JSON_internal.hpp" +#include "openPMD/auxiliary/JSON_internal.hpp" -# include -# include -# include -# include +#include +#include +#include +#include #endif - namespace openPMD { #if openPMD_HAVE_HDF5 - class HDF5IOHandlerImpl : public AbstractIOHandlerImpl - { - public: - HDF5IOHandlerImpl(AbstractIOHandler*, json::TracingJSON config); - ~HDF5IOHandlerImpl() override; +class HDF5IOHandlerImpl : public AbstractIOHandlerImpl +{ +public: + HDF5IOHandlerImpl(AbstractIOHandler *, json::TracingJSON config); + ~HDF5IOHandlerImpl() override; - void createFile(Writable*, Parameter< Operation::CREATE_FILE > const&) override; - void createPath(Writable*, Parameter< Operation::CREATE_PATH > const&) override; - void createDataset(Writable*, Parameter< Operation::CREATE_DATASET > const&) override; - void extendDataset(Writable*, Parameter< Operation::EXTEND_DATASET > const&) override; - void availableChunks(Writable *, Parameter< Operation::AVAILABLE_CHUNKS > &) override; - void openFile(Writable*, Parameter< Operation::OPEN_FILE > const&) override; - void closeFile(Writable*, Parameter< Operation::CLOSE_FILE > const&) override; - void openPath(Writable*, Parameter< Operation::OPEN_PATH > const&) override; - void openDataset(Writable*, Parameter< Operation::OPEN_DATASET > &) override; - void deleteFile(Writable*, Parameter< Operation::DELETE_FILE > const&) override; - void deletePath(Writable*, Parameter< Operation::DELETE_PATH > const&) override; - void deleteDataset(Writable*, Parameter< Operation::DELETE_DATASET > const&) override; - void deleteAttribute(Writable*, Parameter< Operation::DELETE_ATT > const&) override; - void writeDataset(Writable*, Parameter< Operation::WRITE_DATASET > const&) override; - void writeAttribute(Writable*, Parameter< Operation::WRITE_ATT > const&) override; - void readDataset(Writable*, Parameter< Operation::READ_DATASET > &) override; - void readAttribute(Writable*, Parameter< Operation::READ_ATT > &) override; - void listPaths(Writable*, Parameter< Operation::LIST_PATHS > &) override; - void listDatasets(Writable*, Parameter< Operation::LIST_DATASETS > &) override; - void listAttributes(Writable*, Parameter< Operation::LIST_ATTS > &) override; + void + createFile(Writable *, Parameter const &) override; + void + createPath(Writable *, Parameter const &) override; + void createDataset( + Writable *, Parameter const &) override; + void extendDataset( + Writable *, Parameter const &) override; + void availableChunks( + Writable *, Parameter &) override; + void openFile(Writable *, Parameter const &) override; + void + closeFile(Writable *, Parameter const &) override; + void openPath(Writable *, Parameter const &) override; + void openDataset(Writable *, Parameter &) override; + void + deleteFile(Writable *, Parameter const &) override; + void + deletePath(Writable *, Parameter const &) override; + void deleteDataset( + Writable *, Parameter const &) override; + void deleteAttribute( + Writable *, Parameter const &) override; + void writeDataset( + Writable *, Parameter const &) override; + void writeAttribute( + Writable *, Parameter const &) override; + void readDataset(Writable *, Parameter &) override; + void readAttribute(Writable *, Parameter &) override; + void listPaths(Writable *, Parameter &) override; + void + listDatasets(Writable *, Parameter &) override; + void listAttributes(Writable *, Parameter &) override; - std::unordered_map< Writable*, std::string > m_fileNames; - std::unordered_map< std::string, hid_t > m_fileNamesWithID; + std::unordered_map m_fileNames; + std::unordered_map m_fileNamesWithID; - std::unordered_set< hid_t > m_openFileIDs; + std::unordered_set m_openFileIDs; - hid_t m_datasetTransferProperty; - hid_t m_fileAccessProperty; - hid_t m_fileCreateProperty; + hid_t m_datasetTransferProperty; + hid_t m_fileAccessProperty; + hid_t m_fileCreateProperty; - hbool_t m_hdf5_collective_metadata = 1; + hbool_t m_hdf5_collective_metadata = 1; - // h5py compatible types for bool and complex - hid_t m_H5T_BOOL_ENUM; - hid_t m_H5T_CFLOAT; - hid_t m_H5T_CDOUBLE; - hid_t m_H5T_CLONG_DOUBLE; + // h5py compatible types for bool and complex + hid_t m_H5T_BOOL_ENUM; + hid_t m_H5T_CFLOAT; + hid_t m_H5T_CDOUBLE; + hid_t m_H5T_CLONG_DOUBLE; - private: - json::TracingJSON m_config; - std::string m_chunks = "auto"; - struct File - { - std::string name; - hid_t id; - }; - std::optional< File > getFile( Writable * ); - }; // HDF5IOHandlerImpl -#else - class HDF5IOHandlerImpl +private: + json::TracingJSON m_config; + std::string m_chunks = "auto"; + struct File { - }; // HDF5IOHandlerImpl + std::string name; + hid_t id; + }; + std::optional getFile(Writable *); +}; // HDF5IOHandlerImpl +#else +class HDF5IOHandlerImpl +{}; // HDF5IOHandlerImpl #endif -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp b/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp index 71e1a13a25..512e3edbb2 100644 --- a/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp +++ b/include/openPMD/IO/HDF5/ParallelHDF5IOHandler.hpp @@ -20,35 +20,37 @@ */ #pragma once -#include "openPMD/config.hpp" -#include "openPMD/auxiliary/JSON_internal.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/auxiliary/JSON_internal.hpp" +#include "openPMD/config.hpp" #include #include #include - namespace openPMD { - class ParallelHDF5IOHandlerImpl; +class ParallelHDF5IOHandlerImpl; - class ParallelHDF5IOHandler : public AbstractIOHandler +class ParallelHDF5IOHandler : public AbstractIOHandler +{ +public: +#if openPMD_HAVE_MPI + ParallelHDF5IOHandler( + std::string path, Access, MPI_Comm, json::TracingJSON config); +#else + ParallelHDF5IOHandler(std::string path, Access, json::TracingJSON config); +#endif + ~ParallelHDF5IOHandler() override; + + std::string backendName() const override { - public: - #if openPMD_HAVE_MPI - ParallelHDF5IOHandler( - std::string path, Access, MPI_Comm, json::TracingJSON config); - #else - ParallelHDF5IOHandler(std::string path, Access, json::TracingJSON config); - #endif - ~ParallelHDF5IOHandler() override; - - std::string backendName() const override { return "MPI_HDF5"; } + return "MPI_HDF5"; + } - std::future< void > flush() override; + std::future flush() override; - private: - std::unique_ptr< ParallelHDF5IOHandlerImpl > m_impl; - }; // ParallelHDF5IOHandler -} // openPMD +private: + std::unique_ptr m_impl; +}; // ParallelHDF5IOHandler +} // namespace openPMD diff --git a/include/openPMD/IO/HDF5/ParallelHDF5IOHandlerImpl.hpp b/include/openPMD/IO/HDF5/ParallelHDF5IOHandlerImpl.hpp index 843280fc55..e1190b3d71 100644 --- a/include/openPMD/IO/HDF5/ParallelHDF5IOHandlerImpl.hpp +++ b/include/openPMD/IO/HDF5/ParallelHDF5IOHandlerImpl.hpp @@ -20,34 +20,32 @@ */ #pragma once -#include "openPMD/config.hpp" #include "openPMD/IO/AbstractIOHandlerImpl.hpp" +#include "openPMD/config.hpp" #if openPMD_HAVE_MPI -# include -# if openPMD_HAVE_HDF5 -# include "openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp" -# include "openPMD/auxiliary/JSON_internal.hpp" -# endif +#include +#if openPMD_HAVE_HDF5 +#include "openPMD/IO/HDF5/HDF5IOHandlerImpl.hpp" +#include "openPMD/auxiliary/JSON_internal.hpp" +#endif #endif - namespace openPMD { #if openPMD_HAVE_HDF5 && openPMD_HAVE_MPI - class ParallelHDF5IOHandlerImpl : public HDF5IOHandlerImpl - { - public: - ParallelHDF5IOHandlerImpl( - AbstractIOHandler*, MPI_Comm, json::TracingJSON config); - ~ParallelHDF5IOHandlerImpl() override; +class ParallelHDF5IOHandlerImpl : public HDF5IOHandlerImpl +{ +public: + ParallelHDF5IOHandlerImpl( + AbstractIOHandler *, MPI_Comm, json::TracingJSON config); + ~ParallelHDF5IOHandlerImpl() override; - MPI_Comm m_mpiComm; - MPI_Info m_mpiInfo; - }; // ParallelHDF5IOHandlerImpl + MPI_Comm m_mpiComm; + MPI_Info m_mpiInfo; +}; // ParallelHDF5IOHandlerImpl #else - class ParallelHDF5IOHandlerImpl - { - }; // ParallelHDF5IOHandlerImpl +class ParallelHDF5IOHandlerImpl +{}; // ParallelHDF5IOHandlerImpl #endif -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/IO/IOTask.hpp b/include/openPMD/IO/IOTask.hpp index 8c5a0c40e8..15177366f6 100644 --- a/include/openPMD/IO/IOTask.hpp +++ b/include/openPMD/IO/IOTask.hpp @@ -20,61 +20,44 @@ */ #pragma once -#include "openPMD/auxiliary/Export.hpp" -#include "openPMD/auxiliary/Variant.hpp" -#include "openPMD/backend/Attribute.hpp" #include "openPMD/ChunkInfo.hpp" #include "openPMD/Dataset.hpp" #include "openPMD/IterationEncoding.hpp" #include "openPMD/Streaming.hpp" +#include "openPMD/auxiliary/Export.hpp" +#include "openPMD/auxiliary/Variant.hpp" +#include "openPMD/backend/Attribute.hpp" -#include #include +#include #include #include #include - namespace openPMD { class Attributable; class Writable; -Writable* -getWritable(Attributable*); +Writable *getWritable(Attributable *); /** Type of IO operation between logical and persistent data. */ -OPENPMDAPI_EXPORT_ENUM_CLASS(Operation) -{ - CREATE_FILE, - OPEN_FILE, - CLOSE_FILE, - DELETE_FILE, - - CREATE_PATH, - CLOSE_PATH, - OPEN_PATH, - DELETE_PATH, +OPENPMDAPI_EXPORT_ENUM_CLASS(Operation){ + CREATE_FILE, OPEN_FILE, CLOSE_FILE, DELETE_FILE, + + CREATE_PATH, CLOSE_PATH, OPEN_PATH, DELETE_PATH, LIST_PATHS, - CREATE_DATASET, - EXTEND_DATASET, - OPEN_DATASET, - DELETE_DATASET, - WRITE_DATASET, - READ_DATASET, - LIST_DATASETS, - GET_BUFFER_VIEW, + CREATE_DATASET, EXTEND_DATASET, OPEN_DATASET, DELETE_DATASET, + WRITE_DATASET, READ_DATASET, LIST_DATASETS, GET_BUFFER_VIEW, - DELETE_ATT, - WRITE_ATT, - READ_ATT, - LIST_ATTS, + DELETE_ATT, WRITE_ATT, READ_ATT, LIST_ATTS, ADVANCE, AVAILABLE_CHUNKS //!< Query chunks that can be loaded in a dataset -}; // note: if you change the enum members here, please update docs/source/dev/design.rst +}; // note: if you change the enum members here, please update + // docs/source/dev/design.rst namespace internal { @@ -82,22 +65,23 @@ namespace internal * The returned strings are compile-time constants, so no worries about * pointer validity. */ - std::string operationAsString( Operation ); -} + std::string operationAsString(Operation); +} // namespace internal struct OPENPMDAPI_EXPORT AbstractParameter { virtual ~AbstractParameter() = default; AbstractParameter() = default; - //AbstractParameter(AbstractParameter&&) = default; + // AbstractParameter(AbstractParameter&&) = default; // avoid object slicing - AbstractParameter(const AbstractParameter&) = delete; - AbstractParameter& operator=(const AbstractParameter&) = delete; - virtual std::unique_ptr< AbstractParameter > clone() const = 0; + AbstractParameter(const AbstractParameter &) = delete; + AbstractParameter &operator=(const AbstractParameter &) = delete; + virtual std::unique_ptr clone() const = 0; }; -/** @brief Typesafe description of all required arguments for a specified Operation. +/** @brief Typesafe description of all required arguments for a specified + * Operation. * * @note Input operations (i.e. ones that transfer data from persistent files * to logical representations in openPMD-api) use shared pointers to @@ -105,7 +89,7 @@ struct OPENPMDAPI_EXPORT AbstractParameter * valid after the Operation has completed. * @tparam Operation Type of Operation to be executed. */ -template< Operation > +template struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = delete; @@ -113,36 +97,38 @@ struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter Parameter(Parameter &&) = delete; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::CREATE_FILE > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : - AbstractParameter(), name(p.name), encoding(p.encoding) {} + Parameter(Parameter const &p) + : AbstractParameter(), name(p.name), encoding(p.encoding) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::CREATE_FILE >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; IterationEncoding encoding = IterationEncoding::groupBased; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::OPEN_FILE > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : - AbstractParameter(), name(p.name), encoding(p.encoding) {} + Parameter(Parameter const &p) + : AbstractParameter(), name(p.name), encoding(p.encoding) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::OPEN_FILE >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; @@ -154,136 +140,144 @@ struct OPENPMDAPI_EXPORT Parameter< Operation::OPEN_FILE > : public AbstractPara IterationEncoding encoding = IterationEncoding::groupBased; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::CLOSE_FILE > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter( Parameter const & ) : AbstractParameter() {} + Parameter(Parameter const &) : AbstractParameter() + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::CLOSE_FILE >( *this ) ); + return std::unique_ptr( + new Parameter(*this)); } }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::DELETE_FILE > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), name(p.name) {} + Parameter(Parameter const &p) : AbstractParameter(), name(p.name) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::DELETE_FILE >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::CREATE_PATH > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), path(p.path) {} + Parameter(Parameter const &p) : AbstractParameter(), path(p.path) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::CREATE_PATH >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string path = ""; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::CLOSE_PATH > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter( Parameter const & ) : AbstractParameter() - { - } + Parameter(Parameter const &) : AbstractParameter() + {} - Parameter & - operator=( Parameter const & ) + Parameter &operator=(Parameter const &) { return *this; } - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::CLOSE_PATH >( *this ) ); + return std::unique_ptr( + new Parameter(*this)); } }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::OPEN_PATH > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), path(p.path) {} + Parameter(Parameter const &p) : AbstractParameter(), path(p.path) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::OPEN_PATH >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string path = ""; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::DELETE_PATH > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), path(p.path) {} + Parameter(Parameter const &p) : AbstractParameter(), path(p.path) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::DELETE_PATH >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string path = ""; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::LIST_PATHS > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), paths(p.paths) {} + Parameter(Parameter const &p) : AbstractParameter(), paths(p.paths) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::LIST_PATHS >(*this)); + return std::unique_ptr( + new Parameter(*this)); } - std::shared_ptr< std::vector< std::string > > paths - = std::make_shared< std::vector< std::string > >(); + std::shared_ptr> paths = + std::make_shared>(); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::CREATE_DATASET > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - name(p.name), extent(p.extent), dtype(p.dtype), - options(p.options) {} + Parameter(Parameter const &p) + : AbstractParameter() + , name(p.name) + , extent(p.extent) + , dtype(p.dtype) + , options(p.options) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::CREATE_DATASET >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; @@ -297,75 +291,82 @@ struct OPENPMDAPI_EXPORT Parameter< Operation::CREATE_DATASET > : public Abstrac * This function is useful for the createDataset() methods in, * IOHandlerImpl's, so putting that here is the simplest way to make it * available for them. */ - template< typename TracingJSON > + template static void warnUnusedParameters( TracingJSON &, - std::string const & currentBackendName, - std::string const & warningMessage ); + std::string const ¤tBackendName, + std::string const &warningMessage); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::EXTEND_DATASET > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), extent(p.extent) {} + Parameter(Parameter const &p) : AbstractParameter(), extent(p.extent) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::EXTEND_DATASET >(*this)); + return std::unique_ptr( + new Parameter(*this)); } Extent extent = {}; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::OPEN_DATASET > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - name(p.name), dtype(p.dtype), extent(p.extent) {} + Parameter(Parameter const &p) + : AbstractParameter(), name(p.name), dtype(p.dtype), extent(p.extent) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::OPEN_DATASET >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; - std::shared_ptr< Datatype > dtype - = std::make_shared< Datatype >(); - std::shared_ptr< Extent > extent - = std::make_shared< Extent >(); + std::shared_ptr dtype = std::make_shared(); + std::shared_ptr extent = std::make_shared(); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::DELETE_DATASET > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), name(p.name) {} + Parameter(Parameter const &p) : AbstractParameter(), name(p.name) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::DELETE_DATASET >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::WRITE_DATASET > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - extent(p.extent), offset(p.offset), dtype(p.dtype), - data(p.data) {} + Parameter(Parameter const &p) + : AbstractParameter() + , extent(p.extent) + , offset(p.offset) + , dtype(p.dtype) + , data(p.data) + {} - Parameter& operator=(const Parameter& p) { + Parameter &operator=(const Parameter &p) + { this->extent = p.extent; this->offset = p.offset; this->dtype = p.dtype; @@ -373,28 +374,33 @@ struct OPENPMDAPI_EXPORT Parameter< Operation::WRITE_DATASET > : public Abstract return *this; } - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::WRITE_DATASET >(*this)); + return std::unique_ptr( + new Parameter(*this)); } Extent extent = {}; Offset offset = {}; Datatype dtype = Datatype::UNDEFINED; - std::shared_ptr< void const > data = nullptr; + std::shared_ptr data = nullptr; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::READ_DATASET > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - extent(p.extent), offset(p.offset), dtype(p.dtype), - data(p.data) {} + Parameter(Parameter const &p) + : AbstractParameter() + , extent(p.extent) + , offset(p.offset) + , dtype(p.dtype) + , data(p.data) + {} - Parameter& operator=(const Parameter &p) { + Parameter &operator=(const Parameter &p) + { this->extent = p.extent; this->offset = p.offset; this->dtype = p.dtype; @@ -402,46 +408,50 @@ struct OPENPMDAPI_EXPORT Parameter< Operation::READ_DATASET > : public AbstractP return *this; } - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::READ_DATASET >(*this)); + return std::unique_ptr( + new Parameter(*this)); } Extent extent = {}; Offset offset = {}; Datatype dtype = Datatype::UNDEFINED; - std::shared_ptr< void > data = nullptr; + std::shared_ptr data = nullptr; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::LIST_DATASETS > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - datasets(p.datasets) {} + Parameter(Parameter const &p) : AbstractParameter(), datasets(p.datasets) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::LIST_DATASETS >(*this)); + return std::unique_ptr( + new Parameter(*this)); } - std::shared_ptr< std::vector< std::string > > datasets - = std::make_shared< std::vector< std::string > >(); + std::shared_ptr> datasets = + std::make_shared>(); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::GET_BUFFER_VIEW > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - offset(p.offset), extent(p.extent), dtype(p.dtype), update(p.update), - out(p.out) + Parameter(Parameter const &p) + : AbstractParameter() + , offset(p.offset) + , extent(p.extent) + , dtype(p.dtype) + , update(p.update) + , out(p.out) {} - Parameter & operator=(Parameter const & p) + Parameter &operator=(Parameter const &p) { offset = p.offset; extent = p.extent; @@ -451,11 +461,10 @@ struct OPENPMDAPI_EXPORT Parameter< Operation::GET_BUFFER_VIEW > : public Abstra return *this; } - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::GET_BUFFER_VIEW >(*this)); + return std::unique_ptr( + new Parameter(*this)); } // in parameters @@ -470,37 +479,42 @@ struct OPENPMDAPI_EXPORT Parameter< Operation::GET_BUFFER_VIEW > : public Abstra unsigned viewIndex = 0; void *ptr = nullptr; }; - std::shared_ptr< OutParameters > out = std::make_shared< OutParameters >(); + std::shared_ptr out = std::make_shared(); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::DELETE_ATT > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), name(p.name) {} + Parameter(Parameter const &p) : AbstractParameter(), name(p.name) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::DELETE_ATT >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::WRITE_ATT > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - name(p.name), dtype(p.dtype), resource(p.resource) {} + Parameter(Parameter const &p) + : AbstractParameter() + , name(p.name) + , dtype(p.dtype) + , resource(p.resource) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::WRITE_ATT >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; @@ -508,100 +522,101 @@ struct OPENPMDAPI_EXPORT Parameter< Operation::WRITE_ATT > : public AbstractPara Attribute::resource resource; }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::READ_ATT > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - name(p.name), dtype(p.dtype), resource(p.resource) {} + Parameter(Parameter const &p) + : AbstractParameter() + , name(p.name) + , dtype(p.dtype) + , resource(p.resource) + {} - Parameter& operator=(const Parameter &p) { + Parameter &operator=(const Parameter &p) + { this->name = p.name; this->dtype = p.dtype; this->resource = p.resource; return *this; } - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::READ_ATT >(*this)); + return std::unique_ptr( + new Parameter(*this)); } std::string name = ""; - std::shared_ptr< Datatype > dtype - = std::make_shared< Datatype >(); - std::shared_ptr< Attribute::resource > resource - = std::make_shared< Attribute::resource >(); + std::shared_ptr dtype = std::make_shared(); + std::shared_ptr resource = + std::make_shared(); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::LIST_ATTS > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter(Parameter const & p) : AbstractParameter(), - attributes(p.attributes) {} + Parameter(Parameter const &p) + : AbstractParameter(), attributes(p.attributes) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::LIST_ATTS >(*this)); + return std::unique_ptr( + new Parameter(*this)); } - std::shared_ptr< std::vector< std::string > > attributes - = std::make_shared< std::vector< std::string > >(); + std::shared_ptr> attributes = + std::make_shared>(); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::ADVANCE > : public AbstractParameter +template <> +struct OPENPMDAPI_EXPORT Parameter + : public AbstractParameter { Parameter() = default; - Parameter( Parameter const & p ) - : AbstractParameter(), mode( p.mode ), status( p.status ) - { - } + Parameter(Parameter const &p) + : AbstractParameter(), mode(p.mode), status(p.status) + {} - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::ADVANCE >( *this ) ); + return std::unique_ptr( + new Parameter(*this)); } //! input parameter AdvanceMode mode; //! output parameter - std::shared_ptr< AdvanceStatus > status = - std::make_shared< AdvanceStatus >( AdvanceStatus::OK ); + std::shared_ptr status = + std::make_shared(AdvanceStatus::OK); }; -template<> -struct OPENPMDAPI_EXPORT Parameter< Operation::AVAILABLE_CHUNKS > +template <> +struct OPENPMDAPI_EXPORT Parameter : public AbstractParameter { Parameter() = default; - Parameter( Parameter const & p ) : AbstractParameter(), chunks( p.chunks ) - { - } + Parameter(Parameter const &p) : AbstractParameter(), chunks(p.chunks) + {} - Parameter & - operator=( Parameter const & p ) + Parameter &operator=(Parameter const &p) { chunks = p.chunks; return *this; } - std::unique_ptr< AbstractParameter > - clone() const override + std::unique_ptr clone() const override { - return std::unique_ptr< AbstractParameter >( - new Parameter< Operation::AVAILABLE_CHUNKS >( *this ) ); + return std::unique_ptr( + new Parameter(*this)); } // output parameter - std::shared_ptr< ChunkTable > chunks = std::make_shared< ChunkTable >(); + std::shared_ptr chunks = std::make_shared(); }; /** @brief Self-contained description of a single IO operation. @@ -618,32 +633,28 @@ class OPENPMDAPI_EXPORT IOTask /** Constructor for self-contained description of single IO operation. * * @tparam op Type of Operation to be executed. - * @param w Writable indicating the location of the object being operated on. - * @param p Parameter object supplying all required input and/or output parameters to the operation. + * @param w Writable indicating the location of the object being + * operated on. + * @param p Parameter object supplying all required input and/or output + * parameters to the operation. */ - template< Operation op > - explicit IOTask(Writable* w, - Parameter< op > const & p) - : writable{w}, - operation{op}, - parameter{p.clone()} - { } - - template< Operation op > - explicit IOTask(Attributable* a, - Parameter< op > const & p) - : writable{getWritable(a)}, - operation{op}, - parameter{p.clone()} - { } - - explicit IOTask(IOTask const & other) : - writable{other.writable}, - operation{other.operation}, - parameter{other.parameter} + template + explicit IOTask(Writable *w, Parameter const &p) + : writable{w}, operation{op}, parameter{p.clone()} + {} + + template + explicit IOTask(Attributable *a, Parameter const &p) + : writable{getWritable(a)}, operation{op}, parameter{p.clone()} + {} + + explicit IOTask(IOTask const &other) + : writable{other.writable} + , operation{other.operation} + , parameter{other.parameter} {} - IOTask& operator=(IOTask const & other) + IOTask &operator=(IOTask const &other) { writable = other.writable; operation = other.operation; @@ -651,8 +662,8 @@ class OPENPMDAPI_EXPORT IOTask return *this; } - Writable* writable; + Writable *writable; Operation operation; - std::shared_ptr< AbstractParameter > parameter; -}; // IOTask + std::shared_ptr parameter; +}; // IOTask } // namespace openPMD diff --git a/include/openPMD/IO/InvalidatableFile.hpp b/include/openPMD/IO/InvalidatableFile.hpp index 006c3829e5..31c9fd3fcc 100644 --- a/include/openPMD/IO/InvalidatableFile.hpp +++ b/include/openPMD/IO/InvalidatableFile.hpp @@ -20,77 +20,66 @@ */ #pragma once - -#include #include - +#include namespace openPMD { - /** - * Wrapper around a shared pointer to: - * * a filename - * * and a boolean indicating whether the file still exists - * The wrapper adds no extra information, but some commodity functions. - * Invariant for any context within which this class shall be used: - * For any valid filename, there is at any time at most one - * such shared pointer (wrapper) known in said context's data structures - * (counting by pointer equality) - * This means, that a file can be invalidated (i.e. deleted or overwritten) - * by simply searching for one instance of the file among all known files and - * invalidating this instance - * A new instance may hence only be created after making sure that there are - * no valid instances in the data structures. - */ - struct InvalidatableFile - { - explicit InvalidatableFile( std::string s ); - - - InvalidatableFile( ) = default; - - - struct FileState - { - explicit FileState( std::string s ); - - std::string name; - bool valid = true; - }; - - std::shared_ptr< FileState > fileState; - - - void invalidate( ); - +/** + * Wrapper around a shared pointer to: + * * a filename + * * and a boolean indicating whether the file still exists + * The wrapper adds no extra information, but some commodity functions. + * Invariant for any context within which this class shall be used: + * For any valid filename, there is at any time at most one + * such shared pointer (wrapper) known in said context's data structures + * (counting by pointer equality) + * This means, that a file can be invalidated (i.e. deleted or overwritten) + * by simply searching for one instance of the file among all known files and + * invalidating this instance + * A new instance may hence only be created after making sure that there are + * no valid instances in the data structures. + */ +struct InvalidatableFile +{ + explicit InvalidatableFile(std::string s); - bool valid( ) const; + InvalidatableFile() = default; + struct FileState + { + explicit FileState(std::string s); - InvalidatableFile & operator=( std::string s ); + std::string name; + bool valid = true; + }; + std::shared_ptr fileState; - bool operator==( InvalidatableFile const & f ) const; + void invalidate(); + bool valid() const; - std::string & operator*( ) const; + InvalidatableFile &operator=(std::string s); + bool operator==(InvalidatableFile const &f) const; - std::string * operator->( ) const; + std::string &operator*() const; + std::string *operator->() const; - explicit operator bool( ) const; - }; -} + explicit operator bool() const; +}; +} // namespace openPMD namespace std { - template< > - struct hash< openPMD::InvalidatableFile > - { - using argument_type = openPMD::InvalidatableFile; - using result_type = std::size_t; +template <> +struct hash +{ + using argument_type = openPMD::InvalidatableFile; + using result_type = std::size_t; - result_type operator()( argument_type const & s ) const noexcept; - }; -} + result_type operator()(argument_type const &s) const noexcept; +}; +} // namespace std diff --git a/include/openPMD/IO/JSON/JSONFilePosition.hpp b/include/openPMD/IO/JSON/JSONFilePosition.hpp index 9ae257f626..ca18ddde93 100644 --- a/include/openPMD/IO/JSON/JSONFilePosition.hpp +++ b/include/openPMD/IO/JSON/JSONFilePosition.hpp @@ -21,20 +21,18 @@ #pragma once -#include "openPMD/config.hpp" #include "openPMD/IO/AbstractFilePosition.hpp" +#include "openPMD/config.hpp" #include - namespace openPMD { - struct JSONFilePosition : - public AbstractFilePosition - { - using json = nlohmann::json; - json::json_pointer id; +struct JSONFilePosition : public AbstractFilePosition +{ + using json = nlohmann::json; + json::json_pointer id; - JSONFilePosition( json::json_pointer ptr = json::json_pointer( ) ); - }; -} // openPMD + JSONFilePosition(json::json_pointer ptr = json::json_pointer()); +}; +} // namespace openPMD diff --git a/include/openPMD/IO/JSON/JSONIOHandler.hpp b/include/openPMD/IO/JSON/JSONIOHandler.hpp index 8e017b2eab..1c1302bb55 100644 --- a/include/openPMD/IO/JSON/JSONIOHandler.hpp +++ b/include/openPMD/IO/JSON/JSONIOHandler.hpp @@ -21,29 +21,26 @@ #pragma once - #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IO/JSON/JSONIOHandlerImpl.hpp" - namespace openPMD { - class JSONIOHandler : - public AbstractIOHandler - { - public: - JSONIOHandler( - std::string path, - Access at - ); +class JSONIOHandler : public AbstractIOHandler +{ +public: + JSONIOHandler(std::string path, Access at); - ~JSONIOHandler( ) override; + ~JSONIOHandler() override; - std::string backendName() const override { return "JSON"; } + std::string backendName() const override + { + return "JSON"; + } - std::future< void > flush( ) override; + std::future flush() override; - private: - JSONIOHandlerImpl m_impl; - }; -} // openPMD +private: + JSONIOHandlerImpl m_impl; +}; +} // namespace openPMD diff --git a/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp b/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp index 798d0bd664..20dfd441da 100644 --- a/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp +++ b/include/openPMD/IO/JSON/JSONIOHandlerImpl.hpp @@ -21,524 +21,394 @@ #pragma once -#include "openPMD/config.hpp" -#include "openPMD/auxiliary/Filesystem.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IO/AbstractIOHandlerImpl.hpp" #include "openPMD/IO/Access.hpp" #include "openPMD/IO/JSON/JSONFilePosition.hpp" +#include "openPMD/auxiliary/Filesystem.hpp" +#include "openPMD/config.hpp" #include #include #include #include +#include #include #include #include #include -#include - namespace openPMD { - // Wrapper around a shared pointer to: - // * a filename - // * and a boolean indicating whether the file still exists - // The wrapper adds no extra information, but some commodity functions. - // Invariant for JSONIOHandlerImpl: - // For any valid filename, there is at any time at most one - // such shared pointer (wrapper) in the HandlerImpl's data structures - // (counting by pointer equality) - // This means, that a file can be invalidated (i.e. deleted or overwritten) - // by simply searching for one instance of the file e.g. in m_files and - // invalidating this instance - // A new instance may hence only be created after making sure that there are - // no valid instances in the data structures. - struct File +// Wrapper around a shared pointer to: +// * a filename +// * and a boolean indicating whether the file still exists +// The wrapper adds no extra information, but some commodity functions. +// Invariant for JSONIOHandlerImpl: +// For any valid filename, there is at any time at most one +// such shared pointer (wrapper) in the HandlerImpl's data structures +// (counting by pointer equality) +// This means, that a file can be invalidated (i.e. deleted or overwritten) +// by simply searching for one instance of the file e.g. in m_files and +// invalidating this instance +// A new instance may hence only be created after making sure that there are +// no valid instances in the data structures. +struct File +{ + explicit File(std::string s) : fileState{std::make_shared(s)} + {} + + File() = default; + + struct FileState { - explicit File( std::string s ) : - fileState { std::make_shared< FileState >( s ) } + explicit FileState(std::string s) : name{std::move(s)} {} + std::string name; + bool valid = true; + }; - File( ) = default; + std::shared_ptr fileState; + void invalidate() + { + fileState->valid = false; + } - struct FileState + bool valid() const + { + return fileState->valid; + } + + File &operator=(std::string s) + { + if (fileState) { - explicit FileState( std::string s ) : - name { std::move( s ) } - {} + fileState->name = s; + } + else + { + fileState = std::make_shared(s); + } + return *this; + } + bool operator==(File const &f) const + { + return this->fileState == f.fileState; + } - std::string name; - bool valid = true; - }; + std::string &operator*() const + { + return fileState->name; + } - std::shared_ptr< FileState > fileState; + std::string *operator->() const + { + return &fileState->name; + } + explicit operator bool() const + { + return fileState.operator bool(); + } +}; +} // namespace openPMD - void invalidate( ) - { - fileState->valid = false; - } +namespace std +{ +template <> +struct hash +{ + typedef openPMD::File argument_type; + typedef std::size_t result_type; + result_type operator()(argument_type const &s) const noexcept + { + return std::hash>{}(s.fileState); + } +}; - bool valid( ) const - { - return fileState->valid; - } +// std::complex handling +template +void to_json(nlohmann::json &j, const std::complex &p) +{ + j = nlohmann::json{p.real(), p.imag()}; +} +template +void from_json(const nlohmann::json &j, std::complex &p) +{ + p.real(j.at(0)); + p.imag(j.at(1)); +} +} // namespace std - File & operator=( std::string s ) - { - if( fileState ) - { - fileState->name = s; - } - else - { - fileState = std::make_shared< FileState >( s ); - } - return *this; - } +namespace openPMD +{ +class JSONIOHandlerImpl : public AbstractIOHandlerImpl +{ + using json = nlohmann::json; +public: + explicit JSONIOHandlerImpl(AbstractIOHandler *); - bool operator==( - File const & f - ) const - { - return this->fileState == f.fileState; - } + ~JSONIOHandlerImpl() override; + void + createFile(Writable *, Parameter const &) override; - std::string & operator*( ) const - { - return fileState->name; - } + void + createPath(Writable *, Parameter const &) override; + void createDataset( + Writable *, Parameter const &) override; - std::string * operator->( ) const - { - return &fileState->name; - } + void extendDataset( + Writable *, Parameter const &) override; + void availableChunks( + Writable *, Parameter &) override; - explicit operator bool( ) const - { - return fileState.operator bool( ); - } - }; -} + void openFile(Writable *, Parameter const &) override; -namespace std -{ - template< > - struct hash< openPMD::File > - { - typedef openPMD::File argument_type; - typedef std::size_t result_type; + void + closeFile(Writable *, Parameter const &) override; + void openPath(Writable *, Parameter const &) override; - result_type operator()( argument_type const & s ) const noexcept - { - return std::hash< shared_ptr< openPMD::File::FileState>> {}( s.fileState ); - } - }; + void openDataset(Writable *, Parameter &) override; - // std::complex handling - template< class T > void to_json(nlohmann::json &j, const std::complex< T > &p) { - j = nlohmann::json {p.real(), p.imag()}; - } + void + deleteFile(Writable *, Parameter const &) override; - template< class T > void from_json(const nlohmann::json &j, std::complex< T > &p) { - p.real(j.at(0)); - p.imag(j.at(1)); - } -} + void + deletePath(Writable *, Parameter const &) override; -namespace openPMD -{ - class JSONIOHandlerImpl : - public AbstractIOHandlerImpl + void deleteDataset( + Writable *, Parameter const &) override; + + void deleteAttribute( + Writable *, Parameter const &) override; + + void writeDataset( + Writable *, Parameter const &) override; + + void writeAttribute( + Writable *, Parameter const &) override; + + void readDataset(Writable *, Parameter &) override; + + void readAttribute(Writable *, Parameter &) override; + + void listPaths(Writable *, Parameter &) override; + + void + listDatasets(Writable *, Parameter &) override; + + void listAttributes(Writable *, Parameter &) override; + + std::future flush() override; + +private: + using FILEHANDLE = std::fstream; + + // map each Writable to its associated file + // contains only the filename, without the OS path + std::unordered_map m_files; + + std::unordered_map> m_jsonVals; + + // files that have logically, but not physically been written to + std::unordered_set m_dirty; + + // HELPER FUNCTIONS + + // will use the IOHandler to retrieve the correct directory + // shared pointer to circumvent the fact that c++ pre 17 does + // not enforce (only allow) copy elision in return statements + std::shared_ptr getFilehandle( + File, + Access access); //, Access + // m_frontendAccess=this->m_handler->m_frontendAccess); + + // full operating system path of the given file + std::string fullPath(File); + + std::string fullPath(std::string const &); + + // from a path specification /a/b/c, remove the last + // "folder" (i.e. modify the string to equal /a/b) + static void parentDir(std::string &); + + // Fileposition is assumed to have already been set, + // get it in string form + static std::string filepositionOf(Writable *w); + + // Execute visitor on each pair of positions in the json value + // and the flattened multidimensional array. + // Used for writing from the data to JSON and for reading back into + // the array from JSON + template + static void syncMultidimensionalJson( + nlohmann::json &j, + Offset const &offset, + Extent const &extent, + Extent const &multiplicator, + Visitor visitor, + T *data, + size_t currentdim = 0); + + // multiplicators: an array [m_0,...,m_n] s.t. + // data[i_0]...[i_n] = data[m_0*i_0+...+m_n*i_n] + // (m_n = 1) + // essentially: m_i = \prod_{j=0}^{i-1} extent_j + static Extent getMultiplicators(Extent const &extent); + + static nlohmann::json initializeNDArray(Extent const &extent); + + static Extent getExtent(nlohmann::json &j); + + // remove single '/' in the beginning and end of a string + static std::string removeSlashes(std::string); + + template + static bool hasKey(nlohmann::json &, KeyT &&key); + + // make sure that the given path exists in proper form in + // the passed json value + static void ensurePath(nlohmann::json *json, std::string path); + + // In order not to insert the same file name into the data structures + // with a new pointer (e.g. when reopening), search for a possibly + // existing old pointer. Construct a new pointer only upon failure. + // The bool is true iff the pointer has been newly-created. + // The iterator is an iterator for m_files + std::tuple::iterator, bool> + getPossiblyExisting(std::string file); + + // get the json value representing the whole file, possibly reading + // from disk + std::shared_ptr obtainJsonContents(File); + + // get the json value at the writable's fileposition + nlohmann::json &obtainJsonContents(Writable *writable); + + // write to disk the json contents associated with the file + // remove from m_dirty if unsetDirty == true + void putJsonContents(File, bool unsetDirty = true); + + // figure out the file position of the writable + // (preferring the parent's file position) and extend it + // by extend. return the modified file position. + std::shared_ptr + setAndGetFilePosition(Writable *, std::string extend); + + // figure out the file position of the writable + // (preferring the parent's file position) + // only modify the writable's fileposition when specified + std::shared_ptr + setAndGetFilePosition(Writable *, bool write = true); + + // get the writable's containing file + // if the parent is associated with another file, + // associate the writable with that file and return it + File refreshFileFromParent(Writable *writable); + + void associateWithFile(Writable *writable, File); + + // need to check the name too in order to exclude "attributes" key + static bool isGroup(nlohmann::json::const_iterator it); + + static bool isDataset(nlohmann::json const &j); + + // check whether the json reference contains a valid dataset + template + void verifyDataset(Param const ¶meters, nlohmann::json &); + + static nlohmann::json platformSpecifics(); + + struct DatasetWriter { - using json = nlohmann::json; - - public: - explicit JSONIOHandlerImpl( AbstractIOHandler * ); - - ~JSONIOHandlerImpl( ) override; - - void createFile( - Writable *, - Parameter< Operation::CREATE_FILE > const & - ) override; - - void createPath( - Writable *, - Parameter< Operation::CREATE_PATH > const & - ) override; - - void createDataset( - Writable *, - Parameter< Operation::CREATE_DATASET > const & - ) override; - - void extendDataset( - Writable *, - Parameter< Operation::EXTEND_DATASET > const & - ) override; - - void - availableChunks( - Writable *, - Parameter< Operation::AVAILABLE_CHUNKS > & - ) override; + template + static void call( + nlohmann::json &json, + const Parameter ¶meters); - void openFile( - Writable *, - Parameter< Operation::OPEN_FILE > const & - ) override; - - void closeFile( - Writable *, - Parameter< Operation::CLOSE_FILE > const & - ) override; - - void openPath( - Writable *, - Parameter< Operation::OPEN_PATH > const & - ) override; - - void openDataset( - Writable *, - Parameter< Operation::OPEN_DATASET > & - ) override; - - void deleteFile( - Writable *, - Parameter< Operation::DELETE_FILE > const & - ) override; - - void deletePath( - Writable *, - Parameter< Operation::DELETE_PATH > const & - ) override; - - void deleteDataset( - Writable *, - Parameter< Operation::DELETE_DATASET > const & - ) override; - - void deleteAttribute( - Writable *, - Parameter< Operation::DELETE_ATT > const & - ) override; - - void writeDataset( - Writable *, - Parameter< Operation::WRITE_DATASET > const & - ) override; - - void writeAttribute( - Writable *, - Parameter< Operation::WRITE_ATT > const & - ) override; - - void readDataset( - Writable *, - Parameter< Operation::READ_DATASET > & - ) override; - - void readAttribute( - Writable *, - Parameter< Operation::READ_ATT > & - ) override; - - void listPaths( - Writable *, - Parameter< Operation::LIST_PATHS > & - ) override; - - void listDatasets( - Writable *, - Parameter< Operation::LIST_DATASETS > & - ) override; - - void listAttributes( - Writable *, - Parameter< Operation::LIST_ATTS > & - ) override; - - std::future< void > flush( ) override; - - - private: - - using FILEHANDLE = std::fstream; - - // map each Writable to its associated file - // contains only the filename, without the OS path - std::unordered_map< - Writable *, - File - > m_files; - - std::unordered_map< - File, - std::shared_ptr< nlohmann::json >> m_jsonVals; - - // files that have logically, but not physically been written to - std::unordered_set< File > m_dirty; - - - // HELPER FUNCTIONS - - - // will use the IOHandler to retrieve the correct directory - // shared pointer to circumvent the fact that c++ pre 17 does - // not enforce (only allow) copy elision in return statements - std::shared_ptr< FILEHANDLE > getFilehandle( - File, - Access access - ); //, Access m_frontendAccess=this->m_handler->m_frontendAccess); - - // full operating system path of the given file - std::string fullPath( File ); - - std::string fullPath( std::string const & ); - - // from a path specification /a/b/c, remove the last - // "folder" (i.e. modify the string to equal /a/b) - static void parentDir( std::string & ); - - // Fileposition is assumed to have already been set, - // get it in string form - static std::string filepositionOf( Writable * w ); - - // Execute visitor on each pair of positions in the json value - // and the flattened multidimensional array. - // Used for writing from the data to JSON and for reading back into - // the array from JSON - template< - typename T, - typename Visitor - > - static void syncMultidimensionalJson( - nlohmann::json & j, - Offset const & offset, - Extent const & extent, - Extent const & multiplicator, - Visitor visitor, - T * data, - size_t currentdim = 0 - ); - - // multiplicators: an array [m_0,...,m_n] s.t. - // data[i_0]...[i_n] = data[m_0*i_0+...+m_n*i_n] - // (m_n = 1) - // essentially: m_i = \prod_{j=0}^{i-1} extent_j - static Extent getMultiplicators( Extent const & extent ); - - static nlohmann::json initializeNDArray( Extent const & extent ); - - static Extent getExtent( nlohmann::json & j ); - - - // remove single '/' in the beginning and end of a string - static std::string removeSlashes( std::string ); - - template< typename KeyT > - static bool hasKey( - nlohmann::json &, - KeyT && key - ); - - // make sure that the given path exists in proper form in - // the passed json value - static void ensurePath( - nlohmann::json * json, - std::string path - ); - - // In order not to insert the same file name into the data structures - // with a new pointer (e.g. when reopening), search for a possibly - // existing old pointer. Construct a new pointer only upon failure. - // The bool is true iff the pointer has been newly-created. - // The iterator is an iterator for m_files - std::tuple< - File, - std::unordered_map< - Writable *, - File - >::iterator, - bool - > getPossiblyExisting( - std::string file - ); - - // get the json value representing the whole file, possibly reading - // from disk - std::shared_ptr< nlohmann::json > obtainJsonContents( File ); - - // get the json value at the writable's fileposition - nlohmann::json & obtainJsonContents( Writable * writable ); - - // write to disk the json contents associated with the file - // remove from m_dirty if unsetDirty == true - void putJsonContents( - File, - bool unsetDirty = true - ); - - // figure out the file position of the writable - // (preferring the parent's file position) and extend it - // by extend. return the modified file position. - std::shared_ptr< JSONFilePosition > setAndGetFilePosition( - Writable *, - std::string extend - ); - - // figure out the file position of the writable - // (preferring the parent's file position) - // only modify the writable's fileposition when specified - std::shared_ptr< JSONFilePosition > setAndGetFilePosition( - Writable *, - bool write = true - ); - - // get the writable's containing file - // if the parent is associated with another file, - // associate the writable with that file and return it - File refreshFileFromParent( Writable * writable ); - - void associateWithFile( - Writable * writable, - File - ); - - // need to check the name too in order to exclude "attributes" key - static bool isGroup( nlohmann::json::const_iterator it ); - - static bool isDataset( nlohmann::json const & j ); - - - // check whether the json reference contains a valid dataset - template< typename Param > - void verifyDataset( - Param const & parameters, - nlohmann::json & - ); - - static nlohmann::json platformSpecifics( ); - - struct DatasetWriter - { - template< typename T > - static void call( - nlohmann::json & json, - const Parameter< Operation::WRITE_DATASET > & parameters ); + static constexpr char const *errorMsg = "JSON: writeDataset"; + }; - static constexpr char const * errorMsg = "JSON: writeDataset"; - }; + struct DatasetReader + { + template + static void call( + nlohmann::json &json, + Parameter ¶meters); - struct DatasetReader - { - template< typename T > - static void call( - nlohmann::json & json, - Parameter< Operation::READ_DATASET > & parameters ); + static constexpr char const *errorMsg = "JSON: readDataset"; + }; - static constexpr char const * errorMsg = "JSON: readDataset"; - }; + struct AttributeWriter + { + template + static void call(nlohmann::json &, Attribute::resource const &); - struct AttributeWriter - { - template< typename T > - static void call( nlohmann::json &, Attribute::resource const & ); + static constexpr char const *errorMsg = "JSON: writeAttribute"; + }; - static constexpr char const * errorMsg = "JSON: writeAttribute"; - }; + struct AttributeReader + { + template + static void call(nlohmann::json &, Parameter &); - struct AttributeReader - { - template< typename T > - static void call( - nlohmann::json &, - Parameter< Operation::READ_ATT > & - ); + static constexpr char const *errorMsg = "JSON: writeAttribute"; + }; - static constexpr char const * errorMsg = "JSON: writeAttribute"; - }; + template + struct CppToJSON + { + nlohmann::json operator()(T const &); + }; - template< typename T > - struct CppToJSON - { - nlohmann::json operator()( T const & ); - }; + template + struct CppToJSON> + { + nlohmann::json operator()(std::vector const &); + }; - template< typename T > - struct CppToJSON< std::vector< T>> - { - nlohmann::json operator()( std::vector< T > const & ); - }; - - template< typename T, int n > - struct CppToJSON< - std::array< - T, - n>> - { - nlohmann::json operator()( - std::array< - T, - n - > const & - ); - }; - - template< - typename T, - typename Enable = T - > - struct JsonToCpp - { - T operator()( nlohmann::json const & ); - }; + template + struct CppToJSON> + { + nlohmann::json operator()(std::array const &); + }; - template< typename T > - struct JsonToCpp< std::vector< T > > - { - std::vector< T > operator()( nlohmann::json const & ); - }; - - template< typename T, int n > - struct JsonToCpp< - std::array< - T, - n - > - > - { - std::array< - T, - n - > operator()( nlohmann::json const & ); - }; - - template< typename T > - struct JsonToCpp< - T, - typename std::enable_if< - std::is_floating_point< - T - >::value - >::type - > - { - T operator()( nlohmann::json const & ); - }; + template + struct JsonToCpp + { + T operator()(nlohmann::json const &); + }; + + template + struct JsonToCpp> + { + std::vector operator()(nlohmann::json const &); + }; + + template + struct JsonToCpp> + { + std::array operator()(nlohmann::json const &); + }; + + template + struct JsonToCpp< + T, + typename std::enable_if::value>::type> + { + T operator()(nlohmann::json const &); }; +}; -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/Iteration.hpp b/include/openPMD/Iteration.hpp index 173b1eb25d..246ae195e5 100644 --- a/include/openPMD/Iteration.hpp +++ b/include/openPMD/Iteration.hpp @@ -20,17 +20,16 @@ */ #pragma once -#include "openPMD/auxiliary/Variant.hpp" -#include "openPMD/backend/Attributable.hpp" -#include "openPMD/backend/Container.hpp" #include "openPMD/IterationEncoding.hpp" #include "openPMD/Mesh.hpp" #include "openPMD/ParticleSpecies.hpp" #include "openPMD/Streaming.hpp" +#include "openPMD/auxiliary/Variant.hpp" +#include "openPMD/backend/Attributable.hpp" +#include "openPMD/backend/Container.hpp" #include - namespace openPMD { namespace internal @@ -42,7 +41,7 @@ namespace internal enum class CloseStatus { ParseAccessDeferred, //!< The reader has not yet parsed this iteration - Open, //!< Iteration has not been closed + Open, //!< Iteration has not been closed ClosedInFrontend, /*!< Iteration has been closed, but task has not yet been propagated to the backend */ ClosedInBackend, /*!< Iteration has been closed and task has been @@ -78,14 +77,15 @@ namespace internal class IterationData : public AttributableData { public: - /* - * An iteration may be logically closed in the frontend, - * but not necessarily yet in the backend. - * Will be propagated to the backend upon next flush. - * Store the current status. - * Once an iteration has been closed, no further flushes shall be performed. - * If flushing a closed file, the old file may otherwise be overwritten. - */ + /* + * An iteration may be logically closed in the frontend, + * but not necessarily yet in the backend. + * Will be propagated to the backend upon next flush. + * Store the current status. + * Once an iteration has been closed, no further flushes shall be + * performed. If flushing a closed file, the old file may otherwise be + * overwritten. + */ CloseStatus m_closed = CloseStatus::Open; /** @@ -101,58 +101,60 @@ namespace internal * Information on a parsing request that has not yet been executed. * Otherwise empty. */ - std::optional< DeferredParseAccess > m_deferredParseAccess{}; + std::optional m_deferredParseAccess{}; }; -} -/** @brief Logical compilation of data from one snapshot (e.g. a single simulation cycle). +} // namespace internal +/** @brief Logical compilation of data from one snapshot (e.g. a single + * simulation cycle). * - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#required-attributes-for-the-basepath + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#required-attributes-for-the-basepath */ class Iteration : public Attributable { - template< - typename T, - typename T_key, - typename T_container - > + template friend class Container; friend class Series; friend class WriteIterations; friend class SeriesIterator; public: - Iteration( Iteration const & ) = default; - Iteration & operator=( Iteration const & ) = default; + Iteration(Iteration const &) = default; + Iteration &operator=(Iteration const &) = default; /** - * @tparam T Floating point type of user-selected precision (e.g. float, double). + * @tparam T Floating point type of user-selected precision (e.g. float, + * double). * @return Global reference time for this iteration. */ - template< typename T > + template T time() const; /** Set the global reference time for this iteration. * - * @tparam T Floating point type of user-selected precision (e.g. float, double). + * @tparam T Floating point type of user-selected precision (e.g. + * float, double). * @param newTime Global reference time for this iteration. * @return Reference to modified iteration. */ - template< typename T > - Iteration& setTime(T newTime); + template + Iteration &setTime(T newTime); /** - * @tparam T Floating point type of user-selected precision (e.g. float, double). + * @tparam T Floating point type of user-selected precision (e.g. float, + * double). * @return Time step used to reach this iteration. */ - template< typename T > + template T dt() const; /** Set the time step used to reach this iteration. * - * @tparam T Floating point type of user-selected precision (e.g. float, double). + * @tparam T Floating point type of user-selected precision (e.g. + * float, double). * @param newDt Time step used to reach this iteration. * @return Reference to modified iteration. */ - template< typename T > - Iteration& setDt(T newDt); + template + Iteration &setDt(T newDt); /** * @return Conversion factor to convert time and dt to seconds. @@ -163,7 +165,7 @@ class Iteration : public Attributable * @param newTimeUnitSI new value for timeUnitSI * @return Reference to modified iteration. */ - Iteration& setTimeUnitSI(double newTimeUnitSI); + Iteration &setTimeUnitSI(double newTimeUnitSI); /** Close an iteration * @@ -180,8 +182,7 @@ class Iteration : public Attributable * API. Currently, disallowing to reopen closed iterations satisfies * the requirements of the streaming API. */ - Iteration & - close( bool flush = true ); + Iteration &close(bool flush = true); /** Open an iteration * @@ -196,8 +197,7 @@ class Iteration : public Attributable * * @return Reference to iteration. */ - Iteration & - open(); + Iteration &open(); /** * @brief Has the iteration been closed? @@ -205,8 +205,7 @@ class Iteration : public Attributable * * @return Whether the iteration has been closed. */ - bool - closed() const; + bool closed() const; /** * @brief Has the iteration been closed by the writer? @@ -219,35 +218,35 @@ class Iteration : public Attributable * @return Whether the iteration has been explicitly closed (yet) by the * writer. */ - [[deprecated( "This attribute is no longer set by the openPMD-api." )]] - bool + [[deprecated("This attribute is no longer set by the openPMD-api.")]] bool closedByWriter() const; - Container< Mesh > meshes{}; - Container< ParticleSpecies > particles{}; //particleSpecies? + Container meshes{}; + Container particles{}; // particleSpecies? virtual ~Iteration() = default; + private: Iteration(); - std::shared_ptr< internal::IterationData > m_iterationData{ - new internal::IterationData }; + std::shared_ptr m_iterationData{ + new internal::IterationData}; - inline internal::IterationData const & get() const + inline internal::IterationData const &get() const { return *m_iterationData; } - inline internal::IterationData & get() + inline internal::IterationData &get() { return *m_iterationData; } - void flushFileBased(std::string const&, uint64_t); + void flushFileBased(std::string const &, uint64_t); void flushGroupBased(uint64_t); void flushVariableBased(uint64_t); void flush(); - void deferParseAccess( internal::DeferredParseAccess ); + void deferParseAccess(internal::DeferredParseAccess); /* * Control flow for read(), readFileBased(), readGroupBased() and * read_impl(): @@ -266,12 +265,10 @@ class Iteration : public Attributable * */ void read(); - void reread( std::string const & path ); - void readFileBased( std::string filePath, std::string const & groupPath ); - void readGorVBased( std::string const & groupPath ); - void read_impl( std::string const & groupPath ); - - + void reread(std::string const &path); + void readFileBased(std::string filePath, std::string const &groupPath); + void readGorVBased(std::string const &groupPath); + void read_impl(std::string const &groupPath); /** * @brief Begin an IO step on the IO file (or file-like object) @@ -280,8 +277,7 @@ class Iteration : public Attributable * * @return AdvanceStatus */ - AdvanceStatus - beginStep(); + AdvanceStatus beginStep(); /** * @brief End an IO step on the IO file (or file-like object) @@ -290,8 +286,7 @@ class Iteration : public Attributable * * @return AdvanceStatus */ - void - endStep(); + void endStep(); /** * @brief Is a step currently active for this iteration? @@ -301,8 +296,7 @@ class Iteration : public Attributable * in case of file-based iteration layout, it is local (member of this very * object). */ - StepStatus - getStepStatus(); + StepStatus getStepStatus(); /** * @brief Set step activity status for this iteration. @@ -312,7 +306,7 @@ class Iteration : public Attributable * in case of file-based iteration layout, it is set locally (member of * this very object). */ - void setStepStatus( StepStatus ); + void setStepStatus(StepStatus); /* * @brief Check recursively whether this Iteration is dirty. @@ -322,15 +316,14 @@ class Iteration : public Attributable * @return true If dirty. * @return false Otherwise. */ - bool - dirtyRecursive() const; + bool dirtyRecursive() const; /** * @brief Link with parent. * * @param w The Writable representing the parent. */ - virtual void linkHierarchy(Writable& w); + virtual void linkHierarchy(Writable &w); /** * @brief Access an iteration in read mode that has potentially not been @@ -338,40 +331,29 @@ class Iteration : public Attributable * */ void runDeferredParseAccess(); -}; // Iteration - -extern template -float -Iteration::time< float >() const; +}; // Iteration -extern template -double -Iteration::time< double >() const; +extern template float Iteration::time() const; -extern template -long double -Iteration::time< long double >() const; +extern template double Iteration::time() const; -template< typename T > -inline T -Iteration::time() const -{ return this->readFloatingpoint< T >("time"); } +extern template long double Iteration::time() const; +template +inline T Iteration::time() const +{ + return this->readFloatingpoint("time"); +} -extern template -float -Iteration::dt< float >() const; +extern template float Iteration::dt() const; -extern template -double -Iteration::dt< double >() const; +extern template double Iteration::dt() const; -extern template -long double -Iteration::dt< long double >() const; +extern template long double Iteration::dt() const; -template< typename T > -inline T -Iteration::dt() const -{ return this->readFloatingpoint< T >("dt"); } -} // openPMD +template +inline T Iteration::dt() const +{ + return this->readFloatingpoint("dt"); +} +} // namespace openPMD diff --git a/include/openPMD/IterationEncoding.hpp b/include/openPMD/IterationEncoding.hpp index fa655aa348..81cc191000 100644 --- a/include/openPMD/IterationEncoding.hpp +++ b/include/openPMD/IterationEncoding.hpp @@ -22,19 +22,20 @@ #include - namespace openPMD { /** Encoding scheme of an Iterations Series'. * - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#iterations-and-time-series + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#iterations-and-time-series */ enum class IterationEncoding { - fileBased, groupBased, variableBased + fileBased, + groupBased, + variableBased }; -std::ostream& -operator<<(std::ostream&, openPMD::IterationEncoding const&); +std::ostream &operator<<(std::ostream &, openPMD::IterationEncoding const &); -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/Mesh.hpp b/include/openPMD/Mesh.hpp index bc875d25db..7781d6b3d1 100644 --- a/include/openPMD/Mesh.hpp +++ b/include/openPMD/Mesh.hpp @@ -30,28 +30,28 @@ #include #include - namespace openPMD { /** @brief Container for N-dimensional, homogeneous Records. * - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#mesh-based-records + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#mesh-based-records */ -class Mesh : public BaseRecord< MeshRecordComponent > +class Mesh : public BaseRecord { - friend class Container< Mesh >; + friend class Container; friend class Iteration; public: - Mesh(Mesh const&) = default; - Mesh& operator=(Mesh const&) = default; + Mesh(Mesh const &) = default; + Mesh &operator=(Mesh const &) = default; ~Mesh() override = default; /** @brief Enumerated datatype for the geometry of the mesh. * - * @note If the default values do not suit your application, you can set arbitrary - * Geometry with MeshRecordComponent::setAttribute("geometry", VALUE). - * Note that this might break openPMD compliance and tool support. + * @note If the default values do not suit your application, you can set + * arbitrary Geometry with MeshRecordComponent::setAttribute("geometry", + * VALUE). Note that this might break openPMD compliance and tool support. */ enum class Geometry { @@ -60,7 +60,7 @@ class Mesh : public BaseRecord< MeshRecordComponent > cylindrical, spherical, other - }; //Geometry + }; // Geometry /** @brief Enumerated datatype for the memory layout of N-dimensional data. */ @@ -68,7 +68,7 @@ class Mesh : public BaseRecord< MeshRecordComponent > { C = 'C', F = 'F' - }; //DataOrder + }; // DataOrder /** * @return Enum representing the geometry of the mesh of the mesh record. @@ -83,7 +83,7 @@ class Mesh : public BaseRecord< MeshRecordComponent > * @param g geometry of the mesh of the mesh record. * @return Reference to modified mesh. */ - Mesh& setGeometry(Geometry g); + Mesh &setGeometry(Geometry g); /** Set the geometry of the mesh of the mesh record. * * If the geometry is unknown to the openPMD-api, the string is prefixed @@ -92,20 +92,24 @@ class Mesh : public BaseRecord< MeshRecordComponent > * @param geometry geometry of the mesh of the mesh record, as string * @return Reference to modified mesh. */ - Mesh& setGeometry(std::string geometry); + Mesh &setGeometry(std::string geometry); /** - * @throw no_such_attribute_error If Mesh::geometry is not Mesh::Geometry::thetaMode. - * @return String representing additional parameters for the geometry, separated by a @code ; @endcode. + * @throw no_such_attribute_error If Mesh::geometry is not + * Mesh::Geometry::thetaMode. + * @return String representing additional parameters for the geometry, + * separated by a @code ; @endcode. */ std::string geometryParameters() const; - /** Set additional parameters for the geometry, separated by a @code ; @endcode. + /** Set additional parameters for the geometry, separated by a @code ; + * @endcode. * * @note Separation constraint is not verified by API. - * @param geometryParameters additional parameters for the geometry, separated by a @code ; @endcode. + * @param geometryParameters additional parameters for the geometry, + * separated by a @code ; @endcode. * @return Reference to modified mesh. */ - Mesh& setGeometryParameters(std::string const& geometryParameters); + Mesh &setGeometryParameters(std::string const &geometryParameters); /** * @return Memory layout of N-dimensional data. @@ -116,105 +120,132 @@ class Mesh : public BaseRecord< MeshRecordComponent > * @param dor memory layout of N-dimensional data. * @return Reference to modified mesh. */ - Mesh& setDataOrder(DataOrder dor); + Mesh &setDataOrder(DataOrder dor); /** * @return Ordering of the labels for the Mesh::geometry of the mesh. */ - std::vector< std::string > axisLabels() const; + std::vector axisLabels() const; /** Set the ordering of the labels for the Mesh::geometry of the mesh. * * @note Dimensionality constraint is not verified by API. - * @param axisLabels vector containing N (string) elements, where N is the number of dimensions in the simulation. + * @param axisLabels vector containing N (string) elements, where N is + * the number of dimensions in the simulation. * @return Reference to modified mesh. */ - Mesh& setAxisLabels(std::vector< std::string > const & axisLabels); + Mesh &setAxisLabels(std::vector const &axisLabels); /** - * @tparam T Floating point type of user-selected precision (e.g. float, double). - * @return vector of T representing the spacing of the grid points along each dimension (in the units of the simulation). - */ - template< typename T > - std::vector< T > gridSpacing() const; - /** Set the spacing of the grid points along each dimension (in the units of the simulation). + * @tparam T Floating point type of user-selected precision (e.g. float, + * double). + * @return vector of T representing the spacing of the grid points along + * each dimension (in the units of the simulation). + */ + template + std::vector gridSpacing() const; + /** Set the spacing of the grid points along each dimension (in the units of + * the simulation). * * @note Dimensionality constraint is not verified by API. - * @tparam T Floating point type of user-selected precision (e.g. float, double). - * @param gridSpacing vector containing N (T) elements, where N is the number of dimensions in the simulation. + * @tparam T Floating point type of user-selected precision (e.g. float, + * double). + * @param gridSpacing vector containing N (T) elements, where N is the + * number of dimensions in the simulation. * @return Reference to modified mesh. */ - template< typename T, - typename = std::enable_if_t::value> > - Mesh& setGridSpacing(std::vector< T > const & gridSpacing); + template < + typename T, + typename = std::enable_if_t::value>> + Mesh &setGridSpacing(std::vector const &gridSpacing); /** - * @return Vector of (double) representing the start of the current domain of the simulation (position of the beginning of the first cell) in simulation units. + * @return Vector of (double) representing the start of the current domain + * of the simulation (position of the beginning of the first cell) in + * simulation units. */ - std::vector< double > gridGlobalOffset() const; - /** Set the start of the current domain of the simulation (position of the beginning of the first cell) in simulation units. + std::vector gridGlobalOffset() const; + /** Set the start of the current domain of the simulation (position of the + * beginning of the first cell) in simulation units. * * @note Dimensionality constraint is not verified by API. - * @param gridGlobalOffset vector containing N (double) elements, where N is the number of dimensions in the simulation. + * @param gridGlobalOffset vector containing N (double) elements, where + * N is the number of dimensions in the simulation. * @return Reference to modified mesh. */ - Mesh& setGridGlobalOffset(std::vector< double > const & gridGlobalOffset); + Mesh &setGridGlobalOffset(std::vector const &gridGlobalOffset); /** - * @return Unit-conversion factor to multiply each value in Mesh::gridSpacing and Mesh::gridGlobalOffset, in order to convert from simulation units to SI units. + * @return Unit-conversion factor to multiply each value in + * Mesh::gridSpacing and Mesh::gridGlobalOffset, in order to convert from + * simulation units to SI units. */ double gridUnitSI() const; - /** Set the unit-conversion factor to multiply each value in Mesh::gridSpacing and Mesh::gridGlobalOffset, in order to convert from simulation units to SI units. + /** Set the unit-conversion factor to multiply each value in + * Mesh::gridSpacing and Mesh::gridGlobalOffset, in order to convert from + * simulation units to SI units. * - * @param gridUnitSI unit-conversion factor to multiply each value in Mesh::gridSpacing and Mesh::gridGlobalOffset, in order to convert from simulation units to SI units. + * @param gridUnitSI unit-conversion factor to multiply each value in + * Mesh::gridSpacing and Mesh::gridGlobalOffset, in order to convert from + * simulation units to SI units. * @return Reference to modified mesh. */ - Mesh& setGridUnitSI(double gridUnitSI); + Mesh &setGridUnitSI(double gridUnitSI); - /** Set the powers of the 7 base measures characterizing the record's unit in SI. + /** Set the powers of the 7 base measures characterizing the record's unit + * in SI. * - * @param unitDimension map containing pairs of (UnitDimension, double) that represent the power of the particular base. + * @param unitDimension map containing pairs of (UnitDimension, double) + * that represent the power of the particular base. * @return Reference to modified mesh. */ - Mesh& setUnitDimension(std::map< UnitDimension, double > const& unitDimension); + Mesh & + setUnitDimension(std::map const &unitDimension); /** - * @tparam T Floating point type of user-selected precision (e.g. float, double). - * @return Offset between the time at which this record is defined and the Iteration::time attribute of the Series::basePath level. + * @tparam T Floating point type of user-selected precision (e.g. float, + * double). + * @return Offset between the time at which this record is defined and the + * Iteration::time attribute of the Series::basePath level. */ - template< typename T > + template T timeOffset() const; - /** Set the offset between the time at which this record is defined and the Iteration::time attribute of the Series::basePath level. + /** Set the offset between the time at which this record is defined and the + * Iteration::time attribute of the Series::basePath level. * - * @note This should be written in the same unit system as Iteration::time. - * @tparam T Floating point type of user-selected precision (e.g. float, double). - * @param timeOffset Offset between the time at which this record is defined and the Iteration::time attribute of the Series::basePath level. + * @note This should be written in the same unit system as + * Iteration::time. + * @tparam T Floating point type of user-selected precision (e.g. float, + * double). + * @param timeOffset Offset between the time at which this record is + * defined and the Iteration::time attribute of the Series::basePath level. * @return Reference to modified mesh. */ - template< typename T, - typename = std::enable_if_t::value> > - Mesh& setTimeOffset(T timeOffset); + template < + typename T, + typename = std::enable_if_t::value>> + Mesh &setTimeOffset(T timeOffset); private: Mesh(); - void flush_impl(std::string const&) override; + void flush_impl(std::string const &) override; void read() override; }; // Mesh -template< typename T > -inline std::vector< T > -Mesh::gridSpacing() const -{ return readVectorFloatingpoint< T >("gridSpacing"); } +template +inline std::vector Mesh::gridSpacing() const +{ + return readVectorFloatingpoint("gridSpacing"); +} -template< typename T > -inline T -Mesh::timeOffset() const -{ return readFloatingpoint< T >("timeOffset"); } +template +inline T Mesh::timeOffset() const +{ + return readFloatingpoint("timeOffset"); +} -std::ostream& -operator<<(std::ostream&, openPMD::Mesh::Geometry const&); +std::ostream &operator<<(std::ostream &, openPMD::Mesh::Geometry const &); -std::ostream& -operator<<(std::ostream&, openPMD::Mesh::DataOrder const&); +std::ostream &operator<<(std::ostream &, openPMD::Mesh::DataOrder const &); -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/ParticlePatches.hpp b/include/openPMD/ParticlePatches.hpp index ca0d0ff76e..f3c4c0b943 100644 --- a/include/openPMD/ParticlePatches.hpp +++ b/include/openPMD/ParticlePatches.hpp @@ -23,25 +23,24 @@ #include "openPMD/backend/Container.hpp" #include "openPMD/backend/PatchRecord.hpp" -#include #include - +#include namespace openPMD { - class ParticlePatches : public Container< PatchRecord > - { - friend class ParticleSpecies; - friend class Container< ParticlePatches >; - friend class Container< PatchRecord >; +class ParticlePatches : public Container +{ + friend class ParticleSpecies; + friend class Container; + friend class Container; - public: - size_t numPatches() const; - ~ParticlePatches() override = default; +public: + size_t numPatches() const; + ~ParticlePatches() override = default; - private: - ParticlePatches() = default; - void read(); - }; // ParticlePatches +private: + ParticlePatches() = default; + void read(); +}; // ParticlePatches } // namespace openPMD diff --git a/include/openPMD/ParticleSpecies.hpp b/include/openPMD/ParticleSpecies.hpp index a32fb17001..fc80960ca5 100644 --- a/include/openPMD/ParticleSpecies.hpp +++ b/include/openPMD/ParticleSpecies.hpp @@ -20,21 +20,20 @@ */ #pragma once -#include "openPMD/backend/Attributable.hpp" -#include "openPMD/backend/Container.hpp" #include "openPMD/ParticlePatches.hpp" #include "openPMD/Record.hpp" +#include "openPMD/backend/Attributable.hpp" +#include "openPMD/backend/Container.hpp" #include - namespace openPMD { -class ParticleSpecies : public Container< Record > +class ParticleSpecies : public Container { - friend class Container< ParticleSpecies >; - friend class Container< Record >; + friend class Container; + friend class Container; friend class Iteration; public: @@ -54,29 +53,28 @@ class ParticleSpecies : public Container< Record > * @return true If dirty. * @return false Otherwise. */ - bool - dirtyRecursive() const; + bool dirtyRecursive() const; }; namespace traits { - template<> - struct GenerationPolicy< ParticleSpecies > + template <> + struct GenerationPolicy { - template< typename T > - void operator()(T & ret) + template + void operator()(T &ret) { ret.particlePatches.linkHierarchy(ret.writable()); - auto& np = ret.particlePatches["numParticles"]; - auto& npc = np[RecordComponent::SCALAR]; + auto &np = ret.particlePatches["numParticles"]; + auto &npc = np[RecordComponent::SCALAR]; npc.resetDataset(Dataset(determineDatatype(), {1})); npc.parent() = np.parent(); - auto& npo = ret.particlePatches["numParticlesOffset"]; - auto& npoc = npo[RecordComponent::SCALAR]; + auto &npo = ret.particlePatches["numParticlesOffset"]; + auto &npoc = npo[RecordComponent::SCALAR]; npoc.resetDataset(Dataset(determineDatatype(), {1})); npoc.parent() = npo.parent(); } }; -} // traits -} // openPMD +} // namespace traits +} // namespace openPMD diff --git a/include/openPMD/ReadIterations.hpp b/include/openPMD/ReadIterations.hpp index 0890881e90..473a4fae36 100644 --- a/include/openPMD/ReadIterations.hpp +++ b/include/openPMD/ReadIterations.hpp @@ -36,24 +36,22 @@ class IndexedIteration : public Iteration friend class SeriesIterator; public: - using iterations_t = decltype( internal::SeriesData::iterations ); + using iterations_t = decltype(internal::SeriesData::iterations); using index_t = iterations_t::key_type; index_t const iterationIndex; private: - template< typename Iteration_t > - IndexedIteration( Iteration_t && it, index_t index ) - : Iteration( std::forward< Iteration_t >( it ) ) - , iterationIndex( index ) - { - } + template + IndexedIteration(Iteration_t &&it, index_t index) + : Iteration(std::forward(it)), iterationIndex(index) + {} }; class SeriesIterator { using iteration_index_t = IndexedIteration::index_t; - using maybe_series_t = std::optional< Series >; + using maybe_series_t = std::optional; maybe_series_t m_series; iteration_index_t m_currentIteration = 0; @@ -62,15 +60,15 @@ class SeriesIterator //! construct the end() iterator explicit SeriesIterator(); - SeriesIterator( Series ); + SeriesIterator(Series); - SeriesIterator & operator++(); + SeriesIterator &operator++(); IndexedIteration operator*(); - bool operator==( SeriesIterator const & other ) const; + bool operator==(SeriesIterator const &other) const; - bool operator!=( SeriesIterator const & other ) const; + bool operator!=(SeriesIterator const &other) const; static SeriesIterator end(); }; @@ -96,12 +94,12 @@ class ReadIterations friend class Series; private: - using iterations_t = decltype( internal::SeriesData::iterations ); + using iterations_t = decltype(internal::SeriesData::iterations); using iterator_t = SeriesIterator; Series m_series; - ReadIterations( Series ); + ReadIterations(Series); public: iterator_t begin(); diff --git a/include/openPMD/Record.hpp b/include/openPMD/Record.hpp index b0c3653704..10bf0a5666 100644 --- a/include/openPMD/Record.hpp +++ b/include/openPMD/Record.hpp @@ -20,54 +20,54 @@ */ #pragma once -#include "openPMD/backend/BaseRecord.hpp" #include "openPMD/RecordComponent.hpp" +#include "openPMD/backend/BaseRecord.hpp" #include -#include #include - +#include namespace openPMD { -class Record : public BaseRecord< RecordComponent > +class Record : public BaseRecord { - friend class Container< Record >; + friend class Container; friend class Iteration; friend class ParticleSpecies; public: - Record(Record const&) = default; - Record& operator=(Record const&) = default; + Record(Record const &) = default; + Record &operator=(Record const &) = default; ~Record() override = default; - Record& setUnitDimension(std::map< UnitDimension, double > const&); + Record &setUnitDimension(std::map const &); - template< typename T > + template T timeOffset() const; - template< typename T > - Record& setTimeOffset(T); + template + Record &setTimeOffset(T); private: Record(); - void flush_impl(std::string const&) override; + void flush_impl(std::string const &) override; void read() override; -}; //Record - +}; // Record -template< typename T > -inline T -Record::timeOffset() const -{ return readFloatingpoint< T >("timeOffset"); } +template +inline T Record::timeOffset() const +{ + return readFloatingpoint("timeOffset"); +} -template< typename T > -inline Record& -Record::setTimeOffset(T to) +template +inline Record &Record::setTimeOffset(T to) { - static_assert(std::is_floating_point< T >::value, "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); setAttribute("timeOffset", to); return *this; } -} // openPMD +} // namespace openPMD diff --git a/include/openPMD/RecordComponent.hpp b/include/openPMD/RecordComponent.hpp index d382d83d21..4fda44b928 100644 --- a/include/openPMD/RecordComponent.hpp +++ b/include/openPMD/RecordComponent.hpp @@ -20,62 +20,58 @@ */ #pragma once -#include "openPMD/backend/BaseRecordComponent.hpp" -#include "openPMD/auxiliary/ShareRaw.hpp" #include "openPMD/Dataset.hpp" +#include "openPMD/auxiliary/ShareRaw.hpp" +#include "openPMD/backend/BaseRecordComponent.hpp" +#include #include -#include #include +#include #include -#include #include #include +#include #include #include -#include // expose private and protected members for invasive testing #ifndef OPENPMD_protected -# define OPENPMD_protected protected +#define OPENPMD_protected protected: #endif - namespace openPMD { namespace traits { -/** Emulate in the C++17 concept ContiguousContainer - * - * Users can implement this trait for a type to signal it can be used as - * contiguous container. - * - * See: - * https://en.cppreference.com/w/cpp/named_req/ContiguousContainer - */ -template< typename T > -struct IsContiguousContainer -{ - static constexpr bool value = false; -}; + /** Emulate in the C++17 concept ContiguousContainer + * + * Users can implement this trait for a type to signal it can be used as + * contiguous container. + * + * See: + * https://en.cppreference.com/w/cpp/named_req/ContiguousContainer + */ + template + struct IsContiguousContainer + { + static constexpr bool value = false; + }; -template< typename T_Value > -struct IsContiguousContainer< std::vector< T_Value > > -{ - static constexpr bool value = true; -}; - -template< - typename T_Value, - std::size_t N -> -struct IsContiguousContainer< std::array< T_Value, N > > -{ - static constexpr bool value = true; -}; + template + struct IsContiguousContainer> + { + static constexpr bool value = true; + }; + + template + struct IsContiguousContainer> + { + static constexpr bool value = true; + }; } // namespace traits -template< typename T > +template class DynamicMemoryView; class RecordComponent; @@ -87,27 +83,28 @@ namespace internal public: RecordComponentData(); - RecordComponentData( RecordComponentData const & ) = delete; - RecordComponentData( RecordComponentData && ) = delete; + RecordComponentData(RecordComponentData const &) = delete; + RecordComponentData(RecordComponentData &&) = delete; - RecordComponentData & operator=( RecordComponentData const & ) = delete; - RecordComponentData & operator=( RecordComponentData && ) = delete; + RecordComponentData &operator=(RecordComponentData const &) = delete; + RecordComponentData &operator=(RecordComponentData &&) = delete; /** * Chunk reading/writing requests on the contained dataset. */ - std::queue< IOTask > m_chunks; + std::queue m_chunks; /** * Stores the value for constant record components. * Ignored otherwise. */ - Attribute m_constantValue{ -1 }; + Attribute m_constantValue{-1}; /** * The same std::string that the parent class would pass as parameter to * RecordComponent::flush(). * This is stored only upon RecordComponent::flush() if * AbstractIOHandler::flushLevel is set to FlushLevel::SkeletonOnly - * (for use by the Span-based overload of RecordComponent::storeChunk()). + * (for use by the Span-based overload of + * RecordComponent::storeChunk()). * @todo Merge functionality with ownKeyInParent? */ std::string m_name; @@ -124,25 +121,21 @@ namespace internal */ bool m_hasBeenExtended = false; }; -} +} // namespace internal class RecordComponent : public BaseRecordComponent { - template< - typename T, - typename T_key, - typename T_container - > + template friend class Container; friend class Iteration; friend class ParticleSpecies; - template< typename T_elem > + template friend class BaseRecord; - template< typename T_elem > + template friend class BaseRecordInterface; friend class Record; friend class Mesh; - template< typename > + template friend class DynamicMemoryView; friend class internal::RecordComponentData; friend class MeshRecordComponent; @@ -155,7 +148,7 @@ class RecordComponent : public BaseRecordComponent AUTO }; // Allocation - RecordComponent& setUnitSI(double); + RecordComponent &setUnitSI(double); /** * @brief Declare the dataset's type and extent. @@ -176,7 +169,7 @@ class RecordComponent : public BaseRecordComponent * * @return RecordComponent& */ - RecordComponent & resetDataset( Dataset ); + RecordComponent &resetDataset(Dataset); uint8_t getDimensionality() const; Extent getExtent() const; @@ -189,8 +182,8 @@ class RecordComponent : public BaseRecordComponent * @tparam T type of the stored value * @return A reference to this RecordComponent. */ - template< typename T > - RecordComponent& makeConstant(T); + template + RecordComponent &makeConstant(T); /** Create a dataset with zero extent in each dimension. * @@ -200,8 +193,8 @@ class RecordComponent : public BaseRecordComponent * zero. * @return A reference to this RecordComponent. */ - template< typename T > - RecordComponent& makeEmpty( uint8_t dimensions ); + template + RecordComponent &makeEmpty(uint8_t dimensions); /** * @brief Non-template overload of RecordComponent::makeEmpty(). @@ -211,7 +204,7 @@ class RecordComponent : public BaseRecordComponent * @param dimensions The dimensionality of the dataset. * @return RecordComponent& */ - RecordComponent& makeEmpty( Datatype dt, uint8_t dimensions ); + RecordComponent &makeEmpty(Datatype dt, uint8_t dimensions); /** Returns true if this is an empty record component * @@ -229,33 +222,28 @@ class RecordComponent : public BaseRecordComponent * If offset is non-zero and extent is {-1u} the leftover extent in the * record component will be selected. */ - template< typename T > - std::shared_ptr< T > loadChunk( - Offset = { 0u }, - Extent = { -1u } ); + template + std::shared_ptr loadChunk(Offset = {0u}, Extent = {-1u}); /** Load a chunk of data into pre-allocated memory * - * shared_ptr for data must be pre-allocated, contiguous and large enough for extent + * shared_ptr for data must be pre-allocated, contiguous and large enough + * for extent * * Set offset to {0u} and extent to {-1u} for full selection. * * If offset is non-zero and extent is {-1u} the leftover extent in the * record component will be selected. */ - template< typename T > - void loadChunk( - std::shared_ptr< T >, - Offset, - Extent ); + template + void loadChunk(std::shared_ptr, Offset, Extent); - template< typename T > - void storeChunk(std::shared_ptr< T >, Offset, Extent); + template + void storeChunk(std::shared_ptr, Offset, Extent); - template< typename T_ContiguousContainer > + template typename std::enable_if< - traits::IsContiguousContainer< T_ContiguousContainer >::value - >::type + traits::IsContiguousContainer::value>::type storeChunk(T_ContiguousContainer &, Offset = {0u}, Extent = {-1u}); /** @@ -288,20 +276,20 @@ class RecordComponent : public BaseRecordComponent * * @return View into a buffer that can be filled with data. */ - template< typename T, typename F > - DynamicMemoryView< T > storeChunk( Offset, Extent, F && createBuffer ); + template + DynamicMemoryView storeChunk(Offset, Extent, F &&createBuffer); /** * Overload of span-based storeChunk() that uses operator new() to create * a buffer. */ - template< typename T > - DynamicMemoryView< T > storeChunk( Offset, Extent ); + template + DynamicMemoryView storeChunk(Offset, Extent); - static constexpr char const * const SCALAR = "\vScalar"; + static constexpr char const *const SCALAR = "\vScalar"; private: - void flush(std::string const&); + void flush(std::string const &); virtual void read(); /** @@ -310,7 +298,7 @@ class RecordComponent : public BaseRecordComponent * @param d The dataset description. Must have nonzero dimensions. * @return Reference to this RecordComponent instance. */ - RecordComponent& makeEmpty( Dataset d ); + RecordComponent &makeEmpty(Dataset d); /** * @brief Check recursively whether this RecordComponent is dirty. @@ -322,28 +310,31 @@ class RecordComponent : public BaseRecordComponent */ bool dirtyRecursive() const; - std::shared_ptr< internal::RecordComponentData > m_recordComponentData{ - new internal::RecordComponentData() }; + std::shared_ptr m_recordComponentData{ + new internal::RecordComponentData()}; RecordComponent(); -OPENPMD_protected: - RecordComponent( std::shared_ptr< internal::RecordComponentData > ); + // clang-format off +OPENPMD_protected + // clang-format on + + RecordComponent(std::shared_ptr); - inline internal::RecordComponentData const & get() const + inline internal::RecordComponentData const &get() const { return *m_recordComponentData; } - inline internal::RecordComponentData & get() + inline internal::RecordComponentData &get() { return *m_recordComponentData; } - inline void setData( std::shared_ptr< internal::RecordComponentData > data ) + inline void setData(std::shared_ptr data) { - m_recordComponentData = std::move( data ); - BaseRecordComponent::setData( m_recordComponentData ); + m_recordComponentData = std::move(data); + BaseRecordComponent::setData(m_recordComponentData); } void readBase(); diff --git a/include/openPMD/Series.hpp b/include/openPMD/Series.hpp index 309aec7567..527ef4538f 100644 --- a/include/openPMD/Series.hpp +++ b/include/openPMD/Series.hpp @@ -20,9 +20,6 @@ */ #pragma once -#include "openPMD/config.hpp" -#include "openPMD/backend/Attributable.hpp" -#include "openPMD/backend/Container.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IO/Access.hpp" #include "openPMD/IO/Format.hpp" @@ -37,7 +34,7 @@ #include "openPMD/version.hpp" #if openPMD_HAVE_MPI -# include +#include #endif #include @@ -46,10 +43,9 @@ // expose private and protected members for invasive testing #ifndef OPENPMD_private -# define OPENPMD_private private +#define OPENPMD_private private: #endif - namespace openPMD { class ReadIterations; @@ -58,105 +54,107 @@ class Series; namespace internal { -/** - * @brief Data members for Series. Pinned at one memory location. - * - * (Not movable or copyable) - * - */ -class SeriesData : public AttributableData -{ -public: - explicit SeriesData() = default; - - virtual ~SeriesData(); - - SeriesData( SeriesData const & ) = delete; - SeriesData( SeriesData && ) = delete; - - SeriesData & operator=( SeriesData const & ) = delete; - SeriesData & operator=( SeriesData && ) = delete; - - Container< Iteration, uint64_t > iterations{}; - - /** - * For each instance of Series, there is only one instance - * of WriteIterations, stored in this Option. - * This ensures that Series::writeIteration() always returns - * the same instance. - */ - std::optional< WriteIterations > m_writeIterations; - /** - * Needed if reading a single iteration of a file-based series. - * Users may specify the concrete filename of one iteration instead of the - * file-based expansion pattern. - * In that case, the filename must not be constructed from prefix, infix and - * suffix as usual in file-based iteration encoding. - * Instead, the user-specified filename should be used directly. - * Store that filename in the following Option to indicate this situation. - */ - std::optional< std::string > m_overrideFilebasedFilename; - /** - * Name of the iteration without filename suffix. - * In case of file-based iteration encoding, with expansion pattern. - * E.g.: simData.bp -> simData - * simData_%06T.h5 -> simData_%06T - */ - std::string m_name; - /** - * Filename leading up to the expansion pattern. - * Only used for file-based iteration encoding. - */ - std::string m_filenamePrefix; - /** - * Filename after the expansion pattern without filename extension. - */ - std::string m_filenamePostfix; - /** - * The padding in file-based iteration encoding. - * 0 if no padding is given (%T pattern). - * -1 if no expansion pattern has been parsed. - */ - int m_filenamePadding = -1; - /** - * The iteration encoding used in this series. - */ - IterationEncoding m_iterationEncoding{}; - /** - * Detected IO format (backend). - */ - Format m_format; - /** - * Whether a step is currently active for this iteration. - * Used for group-based iteration layout, see SeriesData.hpp for - * iteration-based layout. - * Access via stepStatus() method to automatically select the correct - * one among both flags. - */ - StepStatus m_stepStatus = StepStatus::NoStep; - /** - * True if a user opts into lazy parsing. - */ - bool m_parseLazily = false; /** - * This is to avoid that the destructor tries flushing again if an error - * happened. Otherwise, this would lead to confusing error messages. - * Initialized as false, set to true after successful construction. - * If flushing results in an error, set this back to false. - * The destructor will only attempt flushing again if this is true. + * @brief Data members for Series. Pinned at one memory location. + * + * (Not movable or copyable) + * */ - bool m_lastFlushSuccessful = false; -}; // SeriesData - -class SeriesInternal; + class SeriesData : public AttributableData + { + public: + explicit SeriesData() = default; + + virtual ~SeriesData(); + + SeriesData(SeriesData const &) = delete; + SeriesData(SeriesData &&) = delete; + + SeriesData &operator=(SeriesData const &) = delete; + SeriesData &operator=(SeriesData &&) = delete; + + Container iterations{}; + + /** + * For each instance of Series, there is only one instance + * of WriteIterations, stored in this Option. + * This ensures that Series::writeIteration() always returns + * the same instance. + */ + std::optional m_writeIterations; + /** + * Needed if reading a single iteration of a file-based series. + * Users may specify the concrete filename of one iteration instead of + * the file-based expansion pattern. In that case, the filename must not + * be constructed from prefix, infix and suffix as usual in file-based + * iteration encoding. Instead, the user-specified filename should be + * used directly. Store that filename in the following Option to + * indicate this situation. + */ + std::optional m_overrideFilebasedFilename; + /** + * Name of the iteration without filename suffix. + * In case of file-based iteration encoding, with expansion pattern. + * E.g.: simData.bp -> simData + * simData_%06T.h5 -> simData_%06T + */ + std::string m_name; + /** + * Filename leading up to the expansion pattern. + * Only used for file-based iteration encoding. + */ + std::string m_filenamePrefix; + /** + * Filename after the expansion pattern without filename extension. + */ + std::string m_filenamePostfix; + /** + * The padding in file-based iteration encoding. + * 0 if no padding is given (%T pattern). + * -1 if no expansion pattern has been parsed. + */ + int m_filenamePadding = -1; + /** + * The iteration encoding used in this series. + */ + IterationEncoding m_iterationEncoding{}; + /** + * Detected IO format (backend). + */ + Format m_format; + /** + * Whether a step is currently active for this iteration. + * Used for group-based iteration layout, see SeriesData.hpp for + * iteration-based layout. + * Access via stepStatus() method to automatically select the correct + * one among both flags. + */ + StepStatus m_stepStatus = StepStatus::NoStep; + /** + * True if a user opts into lazy parsing. + */ + bool m_parseLazily = false; + /** + * This is to avoid that the destructor tries flushing again if an error + * happened. Otherwise, this would lead to confusing error messages. + * Initialized as false, set to true after successful construction. + * If flushing results in an error, set this back to false. + * The destructor will only attempt flushing again if this is true. + */ + bool m_lastFlushSuccessful = false; + }; // SeriesData + + class SeriesInternal; } // namespace internal /** @brief Implementation for the root level of the openPMD hierarchy. * * Entry point and common link between all iterations of particle and mesh data. * - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#hierarchy-of-the-data-file - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#iterations-and-time-series + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#hierarchy-of-the-data-file + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#iterations-and-time-series */ class Series : public Attributable { @@ -169,17 +167,17 @@ class Series : public Attributable protected: // Should not be called publicly, only by implementing classes - Series( std::shared_ptr< internal::SeriesData > ); + Series(std::shared_ptr); public: explicit Series(); #if openPMD_HAVE_MPI Series( - std::string const & filepath, + std::string const &filepath, Access at, MPI_Comm comm, - std::string const & options = "{}" ); + std::string const &options = "{}"); #endif /** @@ -192,13 +190,13 @@ class Series : public Attributable * to a JSON textfile, prepended by an at sign '@'. */ Series( - std::string const & filepath, + std::string const &filepath, Access at, - std::string const & options = "{}" ); + std::string const &options = "{}"); virtual ~Series() = default; - Container< Iteration, uint64_t > iterations; + Container iterations; /** * @brief Is this a usable Series object? @@ -209,101 +207,135 @@ class Series : public Attributable operator bool() const; /** - * @return String representing the current enforced version of the openPMD standard. + * @return String representing the current enforced version of the openPMD + * standard. */ std::string openPMD() const; - /** Set the version of the enforced openPMD standard. + /** Set the version of the enforced openPMD + * standard. * - * @param openPMD String MAJOR.MINOR.REVISION of the desired version of the openPMD standard. + * @param openPMD String MAJOR.MINOR.REVISION of the + * desired version of the openPMD standard. * @return Reference to modified series. */ - Series& setOpenPMD(std::string const& openPMD); + Series &setOpenPMD(std::string const &openPMD); /** - * @return 32-bit mask of applied extensions to the openPMD standard. + * @return 32-bit mask of applied extensions to the openPMD + * standard. */ uint32_t openPMDextension() const; - /** Set a 32-bit mask of applied extensions to the openPMD standard. + /** Set a 32-bit mask of applied extensions to the openPMD + * standard. * - * @param openPMDextension Unsigned 32-bit integer used as a bit-mask of applied extensions. + * @param openPMDextension Unsigned 32-bit integer used as a bit-mask of + * applied extensions. * @return Reference to modified series. */ - Series& setOpenPMDextension(uint32_t openPMDextension); + Series &setOpenPMDextension(uint32_t openPMDextension); /** - * @return String representing the common prefix for all data sets and sub-groups of a specific iteration. + * @return String representing the common prefix for all data sets and + * sub-groups of a specific iteration. */ std::string basePath() const; - /** Set the common prefix for all data sets and sub-groups of a specific iteration. + /** Set the common prefix for all data sets and sub-groups of a specific + * iteration. * - * @param basePath String of the common prefix for all data sets and sub-groups of a specific iteration. + * @param basePath String of the common prefix for all data sets and + * sub-groups of a specific iteration. * @return Reference to modified series. */ - Series& setBasePath(std::string const& basePath); + Series &setBasePath(std::string const &basePath); /** * @throw no_such_attribute_error If optional attribute is not present. - * @return String representing the path to mesh records, relative(!) to basePath. + * @return String representing the path to mesh records, relative(!) to + * basePath. */ std::string meshesPath() const; - /** Set the path to mesh records, relative(!) to basePath. + /** Set the path to mesh + * records, relative(!) to basePath. * - * @param meshesPath String of the path to mesh records, relative(!) to basePath. + * @param meshesPath String of the path to mesh + * records, relative(!) to basePath. * @return Reference to modified series. */ - Series& setMeshesPath(std::string const& meshesPath); + Series &setMeshesPath(std::string const &meshesPath); /** * @throw no_such_attribute_error If optional attribute is not present. - * @return String representing the path to particle species, relative(!) to basePath. + * @return String representing the path to particle species, relative(!) to + * basePath. */ std::string particlesPath() const; - /** Set the path to groups for each particle species, relative(!) to basePath. + /** Set the path to groups for each particle + * species, relative(!) to basePath. * - * @param particlesPath String of the path to groups for each particle species, relative(!) to basePath. + * @param particlesPath String of the path to groups for each particle + * species, relative(!) to basePath. * @return Reference to modified series. */ - Series& setParticlesPath(std::string const& particlesPath); + Series &setParticlesPath(std::string const &particlesPath); /** * @throw no_such_attribute_error If optional attribute is not present. - * @return String indicating author and contact for the information in the file. + * @return String indicating author and contact for the information in the + * file. */ std::string author() const; /** Indicate the author and contact for the information in the file. * - * @param author String indicating author and contact for the information in the file. + * @param author String indicating author and contact for the information + * in the file. * @return Reference to modified series. */ - Series& setAuthor(std::string const& author); + Series &setAuthor(std::string const &author); /** * @throw no_such_attribute_error If optional attribute is not present. - * @return String indicating the software/code/simulation that created the file; + * @return String indicating the software/code/simulation that created the + * file; */ std::string software() const; /** Indicate the software/code/simulation that created the file. * - * @param newName String indicating the software/code/simulation that created the file. - * @param newVersion String indicating the version of the software/code/simulation that created the file. + * @param newName String indicating the software/code/simulation that + * created the file. + * @param newVersion String indicating the version of the + * software/code/simulation that created the file. * @return Reference to modified series. */ - Series& setSoftware(std::string const& newName, std::string const& newVersion = std::string("unspecified")); + Series &setSoftware( + std::string const &newName, + std::string const &newVersion = std::string("unspecified")); /** * @throw no_such_attribute_error If optional attribute is not present. - * @return String indicating the version of the software/code/simulation that created the file. + * @return String indicating the version of the software/code/simulation + * that created the file. */ std::string softwareVersion() const; - /** Indicate the version of the software/code/simulation that created the file. + /** Indicate the version of the software/code/simulation that created the + * file. * * @deprecated Set the version with the second argument of setSoftware() * - * @param softwareVersion String indicating the version of the software/code/simulation that created the file. + * @param softwareVersion String indicating the version of the + * software/code/simulation that created the file. * @return Reference to modified series. */ - [[deprecated("Set the version with the second argument of setSoftware()")]] - Series& setSoftwareVersion(std::string const& softwareVersion); + [[deprecated( + "Set the version with the second argument of setSoftware()")]] Series & + setSoftwareVersion(std::string const &softwareVersion); /** * @throw no_such_attribute_error If optional attribute is not present. @@ -315,61 +347,78 @@ class Series : public Attributable * @param date String indicating the date of creation. * @return Reference to modified series. */ - Series& setDate(std::string const& date); + Series &setDate(std::string const &date); /** * @throw no_such_attribute_error If optional attribute is not present. - * @return String indicating dependencies of software that were used to create the file. + * @return String indicating dependencies of software that were used to + * create the file. */ std::string softwareDependencies() const; /** Indicate dependencies of software that were used to create the file. * - * @param newSoftwareDependencies String indicating dependencies of software that were used to create the file (semicolon-separated list if needed). + * @param newSoftwareDependencies String indicating dependencies of + * software that were used to create the file (semicolon-separated list if + * needed). * @return Reference to modified series. */ - Series& setSoftwareDependencies(std::string const& newSoftwareDependencies); + Series &setSoftwareDependencies(std::string const &newSoftwareDependencies); /** * @throw no_such_attribute_error If optional attribute is not present. - * @return String indicating the machine or relevant hardware that created the file. + * @return String indicating the machine or relevant hardware that created + * the file. */ std::string machine() const; /** Indicate the machine or relevant hardware that created the file. * - * @param newMachine String indicating the machine or relevant hardware that created the file (semicolon-separated list if needed).. + * @param newMachine String indicating the machine or relevant hardware + * that created the file (semicolon-separated list if needed).. * @return Reference to modified series. */ - Series& setMachine(std::string const& newMachine); + Series &setMachine(std::string const &newMachine); /** * @return Current encoding style for multiple iterations in this series. */ IterationEncoding iterationEncoding() const; - /** Set the encoding style for multiple iterations in this series. - * A preview on the openPMD 2.0 variable-based iteration encoding can be activated with this call. - * Making full use of the variable-based iteration encoding requires (1) explicit support by the backend (available only in ADIOS2) and (2) use of the openPMD streaming API. - * In other backends and without the streaming API, only one iteration/snapshot may be written in the variable-based encoding, making this encoding a good choice for single-snapshot data dumps. + /** Set the encoding + * style for multiple iterations in this series. A preview on the openPMD 2.0 + * variable-based iteration encoding can be activated with this call. + * Making full use of the variable-based iteration encoding requires (1) + * explicit support by the backend (available only in ADIOS2) and (2) use of + * the openPMD streaming API. In other backends and without the streaming + * API, only one iteration/snapshot may be written in the variable-based + * encoding, making this encoding a good choice for single-snapshot data + * dumps. * - * @param iterationEncoding Desired encoding style for multiple iterations in this series. + * @param iterationEncoding Desired encoding + * style for multiple iterations in this series. * @return Reference to modified series. */ - Series& setIterationEncoding(IterationEncoding iterationEncoding); + Series &setIterationEncoding(IterationEncoding iterationEncoding); /** - * @return String describing a pattern describing how to access single iterations in the raw file. + * @return String describing a pattern + * describing how to access single iterations in the raw file. */ std::string iterationFormat() const; - /** Set a pattern describing how to access single iterations in the raw file. + /** Set a pattern + * describing how to access single iterations in the raw file. * - * @param iterationFormat String with the iteration regex \%T defining either - * the series of files (fileBased) - * or the series of groups within a single file (groupBased) - * that allows to extract the iteration from it. - * For fileBased formats the iteration must be included in the file name. - * The format depends on the selected iterationEncoding method. + * @param iterationFormat String with the iteration regex \%T + * defining either the series of files (fileBased) or the series of groups + * within a single file (groupBased) that allows to extract the iteration + * from it. For fileBased formats the iteration must be included in the file + * name. The format depends on the selected iterationEncoding method. * @return Reference to modified series. */ - Series& setIterationFormat(std::string const& iterationFormat); + Series &setIterationFormat(std::string const &iterationFormat); /** * @return String of a pattern for file names. @@ -378,10 +427,11 @@ class Series : public Attributable /** Set the pattern for file names. * - * @param name String of the pattern for file names. Must include iteration regex \%T for fileBased data. + * @param name String of the pattern for file names. Must include + * iteration regex \%T for fileBased data. * @return Reference to modified series. */ - Series& setName(std::string const& name); + Series &setName(std::string const &name); /** The currently used backend * @@ -421,41 +471,45 @@ class Series : public Attributable */ WriteIterations writeIterations(); -OPENPMD_private: - static constexpr char const * const BASEPATH = "/data/%T/"; + // clang-format off +OPENPMD_private + // clang-format on + + static constexpr char const *const BASEPATH = "/data/%T/"; struct ParsedInput; using iterations_t = decltype(internal::SeriesData::iterations); using iterations_iterator = iterations_t::iterator; - std::shared_ptr< internal::SeriesData > m_series = nullptr; + std::shared_ptr m_series = nullptr; - inline internal::SeriesData & get() + inline internal::SeriesData &get() { - if( m_series ) + if (m_series) { return *m_series; } else { throw std::runtime_error( - "[Series] Cannot use default-constructed Series." ); + "[Series] Cannot use default-constructed Series."); } } - inline internal::SeriesData const & get() const + inline internal::SeriesData const &get() const { - if( m_series ) + if (m_series) { return *m_series; } else { throw std::runtime_error( - "[Series] Cannot use default-constructed Series." ); - } } + "[Series] Cannot use default-constructed Series."); + } + } - std::unique_ptr< ParsedInput > parseInput(std::string); + std::unique_ptr parseInput(std::string); /** * @brief Parse non-backend-specific configuration in JSON config. * @@ -465,12 +519,12 @@ class Series : public Attributable * @tparam TracingJSON template parameter so we don't have * to include the JSON lib here */ - template< typename TracingJSON > - void parseJsonOptions( TracingJSON & options, ParsedInput & ); - bool hasExpansionPattern( std::string filenameWithExtension ); - bool reparseExpansionPattern( std::string filenameWithExtension ); - void init(std::shared_ptr< AbstractIOHandler >, std::unique_ptr< ParsedInput >); - void initDefaults( IterationEncoding ); + template + void parseJsonOptions(TracingJSON &options, ParsedInput &); + bool hasExpansionPattern(std::string filenameWithExtension); + bool reparseExpansionPattern(std::string filenameWithExtension); + void init(std::shared_ptr, std::unique_ptr); + void initDefaults(IterationEncoding); /** * @brief Internal call for flushing a Series. * @@ -482,12 +536,12 @@ class Series : public Attributable * @param flushIOHandler Tasks will always be enqueued to the backend. * If this flag is true, tasks will be flushed to the backend. */ - std::future< void > flush_impl( + std::future flush_impl( iterations_iterator begin, iterations_iterator end, FlushLevel level, - bool flushIOHandler = true ); - void flushFileBased( iterations_iterator begin, iterations_iterator end ); + bool flushIOHandler = true); + void flushFileBased(iterations_iterator begin, iterations_iterator end); /* * Group-based and variable-based iteration layouts share a lot of logic * (realistically, the variable-based iteration layout only throws out @@ -495,19 +549,19 @@ class Series : public Attributable * As a convention, methods that deal with both layouts are called * .*GorVBased, short for .*GroupOrVariableBased */ - void flushGorVBased( iterations_iterator begin, iterations_iterator end ); + void flushGorVBased(iterations_iterator begin, iterations_iterator end); void flushMeshesPath(); void flushParticlesPath(); - void readFileBased( ); - void readOneIterationFileBased( std::string const & filePath ); + void readFileBased(); + void readOneIterationFileBased(std::string const &filePath); /** * Note on re-parsing of a Series: * If init == false, the parsing process will seek for new * Iterations/Records/Record Components etc. */ - void readGorVBased( bool init = true ); + void readGorVBased(bool init = true); void readBase(); - std::string iterationFilename( uint64_t i ); + std::string iterationFilename(uint64_t i); enum class IterationOpened : bool { @@ -520,21 +574,20 @@ class Series : public Attributable * Only open if the iteration is dirty and if it is not in deferred * parse state. */ - IterationOpened openIterationIfDirty( uint64_t index, Iteration iteration ); + IterationOpened openIterationIfDirty(uint64_t index, Iteration iteration); /* * Open an iteration. Ensures that the iteration's m_closed status * is set properly and that any files pertaining to the iteration * is opened. * Does not create files when called in CREATE mode. */ - void openIteration( uint64_t index, Iteration iteration ); + void openIteration(uint64_t index, Iteration iteration); /** * Find the given iteration in Series::iterations and return an iterator * into Series::iterations at that place. */ - iterations_iterator - indexOf( Iteration const & ); + iterations_iterator indexOf(Iteration const &); /** * @brief In step-based IO mode, begin or end an IO step for the given @@ -551,12 +604,11 @@ class Series : public Attributable * @param iteration The actual Iteration object. * @return AdvanceStatus */ - AdvanceStatus - advance( + AdvanceStatus advance( AdvanceMode mode, - internal::AttributableData & file, + internal::AttributableData &file, iterations_iterator it, - Iteration & iteration ); + Iteration &iteration); }; // Series } // namespace openPMD diff --git a/include/openPMD/Span.hpp b/include/openPMD/Span.hpp index 68bb865f2d..48b24b02f9 100644 --- a/include/openPMD/Span.hpp +++ b/include/openPMD/Span.hpp @@ -33,37 +33,36 @@ namespace openPMD * Any existing member behaves equivalently to those documented here: * https://en.cppreference.com/w/cpp/container/span */ -template< typename T > +template class Span { - template< typename > + template friend class DynamicMemoryView; private: - T * m_ptr; + T *m_ptr; size_t m_size; - Span( T * ptr, size_t size ) : m_ptr( ptr ), m_size( size ) - { - } + Span(T *ptr, size_t size) : m_ptr(ptr), m_size(size) + {} public: using iterator = T *; - using reverse_iterator = std::reverse_iterator< iterator >; + using reverse_iterator = std::reverse_iterator; size_t size() const { return m_size; } - inline T * data() const + inline T *data() const { return m_ptr; } - inline T & operator[]( size_t i ) const + inline T &operator[](size_t i) const { - return data()[ i ]; + return data()[i]; } inline iterator begin() const @@ -77,11 +76,11 @@ class Span inline reverse_iterator rbegin() const { // std::reverse_iterator does the -1 thing automatically - return reverse_iterator{ data() + size() }; + return reverse_iterator{data() + size()}; } inline reverse_iterator rend() const { - return reverse_iterator{ data() }; + return reverse_iterator{data()}; } }; @@ -93,22 +92,22 @@ class Span * Hence, the concrete pointer needs to be acquired right before writing * to it. Otherwise, a use after free might occur. */ -template< typename T > +template class DynamicMemoryView { friend class RecordComponent; private: - using param_t = Parameter< Operation::GET_BUFFER_VIEW >; + using param_t = Parameter; param_t m_param; size_t m_size = 0; RecordComponent m_recordComponent; DynamicMemoryView( - param_t param, size_t size, RecordComponent recordComponent ) - : m_param( std::move( param ) ) - , m_size( size ) - , m_recordComponent( std::move( recordComponent ) ) + param_t param, size_t size, RecordComponent recordComponent) + : m_param(std::move(param)) + , m_size(size) + , m_recordComponent(std::move(recordComponent)) { m_param.update = true; } @@ -119,16 +118,16 @@ class DynamicMemoryView /** * @brief Acquire the underlying buffer at its current position in memory. */ - Span< T > currentBuffer() + Span currentBuffer() { - if( m_param.out->backendManagedBuffer ) + if (m_param.out->backendManagedBuffer) { // might need to update m_recordComponent.IOHandler()->enqueue( - IOTask( &m_recordComponent, m_param ) ); + IOTask(&m_recordComponent, m_param)); m_recordComponent.IOHandler()->flush(); } - return Span< T >{ static_cast< T * >( m_param.out->ptr ), m_size }; + return Span{static_cast(m_param.out->ptr), m_size}; } }; -} +} // namespace openPMD diff --git a/include/openPMD/Streaming.hpp b/include/openPMD/Streaming.hpp index 19c6bdd88c..7bc84341aa 100644 --- a/include/openPMD/Streaming.hpp +++ b/include/openPMD/Streaming.hpp @@ -19,7 +19,7 @@ namespace openPMD */ enum class AdvanceStatus : unsigned char { - OK, /* stream goes on */ + OK, /* stream goes on */ OVER /* stream is over */ }; @@ -44,6 +44,6 @@ enum class AdvanceMode : unsigned char enum class StepStatus : unsigned char { DuringStep, /* step is currently active */ - NoStep /* no step is currently active */ + NoStep /* no step is currently active */ }; } // namespace openPMD diff --git a/include/openPMD/UnitDimension.hpp b/include/openPMD/UnitDimension.hpp index 11ad683d86..232a6ee1f7 100644 --- a/include/openPMD/UnitDimension.hpp +++ b/include/openPMD/UnitDimension.hpp @@ -22,21 +22,20 @@ #include - namespace openPMD { - /** Physical dimension of a record - * - * Dimensional base quantities of the international system of quantities - */ - enum class UnitDimension : uint8_t - { - L = 0, //!< length - M, //!< mass - T, //!< time - I, //!< electric current - theta, //!< thermodynamic temperature - N, //!< amount of substance - J //!< luminous intensity - }; +/** Physical dimension of a record + * + * Dimensional base quantities of the international system of quantities + */ +enum class UnitDimension : uint8_t +{ + L = 0, //!< length + M, //!< mass + T, //!< time + I, //!< electric current + theta, //!< thermodynamic temperature + N, //!< amount of substance + J //!< luminous intensity +}; } // namespace openPMD diff --git a/include/openPMD/WriteIterations.hpp b/include/openPMD/WriteIterations.hpp index 137e99bc7c..414dd1c9e4 100644 --- a/include/openPMD/WriteIterations.hpp +++ b/include/openPMD/WriteIterations.hpp @@ -25,7 +25,6 @@ #include - namespace openPMD { class Series; @@ -50,7 +49,7 @@ class WriteIterations friend class Series; private: - using iterations_t = Container< Iteration, uint64_t >; + using iterations_t = Container; public: using key_type = typename iterations_t::key_type; @@ -62,19 +61,19 @@ class WriteIterations struct SharedResources { iterations_t iterations; - std::optional< uint64_t > currentlyOpen; + std::optional currentlyOpen; - SharedResources( iterations_t ); + SharedResources(iterations_t); ~SharedResources(); }; - WriteIterations( iterations_t ); + WriteIterations(iterations_t); explicit WriteIterations() = default; //! Index of the last opened iteration - std::shared_ptr< SharedResources > shared; + std::shared_ptr shared; public: - mapped_type & operator[]( key_type const & key ); - mapped_type & operator[]( key_type && key ); + mapped_type &operator[](key_type const &key); + mapped_type &operator[](key_type &&key); }; } // namespace openPMD diff --git a/include/openPMD/auxiliary/Date.hpp b/include/openPMD/auxiliary/Date.hpp index 0377716c9f..c17a9aca2c 100644 --- a/include/openPMD/auxiliary/Date.hpp +++ b/include/openPMD/auxiliary/Date.hpp @@ -22,16 +22,17 @@ #include - namespace openPMD { namespace auxiliary { /** Return the current datetime as string * - * @param format time format string, @see http://www.cplusplus.com/reference/ctime/strftime/ + * @param format time format string, @see + * http://www.cplusplus.com/reference/ctime/strftime/ * @return std::string with formatted date */ - std::string getDateString( std::string const & format = std::string( "%F %T %z" ) ); + std::string + getDateString(std::string const &format = std::string("%F %T %z")); } // namespace auxiliary } // namespace openPMD diff --git a/include/openPMD/auxiliary/DerefDynamicCast.hpp b/include/openPMD/auxiliary/DerefDynamicCast.hpp index 9a483e147c..1620334ea0 100644 --- a/include/openPMD/auxiliary/DerefDynamicCast.hpp +++ b/include/openPMD/auxiliary/DerefDynamicCast.hpp @@ -22,10 +22,10 @@ #include - namespace openPMD { -namespace auxiliary { +namespace auxiliary +{ /** Returns a value reference stored in a dynamically casted pointer * * Safe version of *dynamic_cast< New_Type* >( some_ptr ); This function @@ -35,12 +35,13 @@ namespace auxiliary { * @tparam New_Type new type to cast to * @tparam Old_Type old type to cast from * @param[in] ptr and input pointer type - * @return value reference of a dereferenced, dynamically casted ptr to New_Type* + * @return value reference of a dereferenced, dynamically casted ptr to + * New_Type* */ - template - inline New_Type & - deref_dynamic_cast(Old_Type *ptr) { - auto const tmp_ptr = dynamic_cast< New_Type * >( ptr ); + template + inline New_Type &deref_dynamic_cast(Old_Type *ptr) + { + auto const tmp_ptr = dynamic_cast(ptr); if (tmp_ptr == nullptr) throw std::runtime_error("Dynamic cast returned a nullptr!"); return *tmp_ptr; diff --git a/include/openPMD/auxiliary/Environment.hpp b/include/openPMD/auxiliary/Environment.hpp index 7971141b7a..2995d2d673 100644 --- a/include/openPMD/auxiliary/Environment.hpp +++ b/include/openPMD/auxiliary/Environment.hpp @@ -24,34 +24,34 @@ #include #include #include -#include #include - +#include namespace openPMD { namespace auxiliary { - inline std::string getEnvString( std::string const & key, std::string const defaultValue ) + inline std::string + getEnvString(std::string const &key, std::string const defaultValue) { - char const * env = std::getenv( key.c_str( ) ); - if ( env != nullptr ) + char const *env = std::getenv(key.c_str()); + if (env != nullptr) return std::string{env}; else return defaultValue; } - inline int getEnvNum( std::string const & key, int defaultValue ) + inline int getEnvNum(std::string const &key, int defaultValue) { - char const * env = std::getenv( key.c_str( ) ); - if ( env != nullptr ) + char const *env = std::getenv(key.c_str()); + if (env != nullptr) { std::string env_string{env}; try { - return std::stoi( env_string ); + return std::stoi(env_string); } - catch ( std::invalid_argument const & ) + catch (std::invalid_argument const &) { return defaultValue; } diff --git a/include/openPMD/auxiliary/Export.hpp b/include/openPMD/auxiliary/Export.hpp index 4ad10a571e..394397a092 100644 --- a/include/openPMD/auxiliary/Export.hpp +++ b/include/openPMD/auxiliary/Export.hpp @@ -21,20 +21,22 @@ #pragma once #ifndef OPENPMDAPI_EXPORT -# ifdef _MSC_VER -# define OPENPMDAPI_EXPORT __declspec( dllexport ) -# elif defined(__NVCC__) -# define OPENPMDAPI_EXPORT -# else -# define OPENPMDAPI_EXPORT __attribute__((visibility("default"))) -# endif +#ifdef _MSC_VER +#define OPENPMDAPI_EXPORT __declspec(dllexport) +#elif defined(__NVCC__) +#define OPENPMDAPI_EXPORT +#else +#define OPENPMDAPI_EXPORT __attribute__((visibility("default"))) +#endif #endif #ifndef OPENPMDAPI_EXPORT_ENUM_CLASS -# if defined(__GNUC__) && (__GNUC__ < 6) && !defined(__clang__) && !defined(__INTEL_COMPILER) - // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=43407 -# define OPENPMDAPI_EXPORT_ENUM_CLASS(ECNAME) enum class ECNAME : OPENPMDAPI_EXPORT unsigned int -# else -# define OPENPMDAPI_EXPORT_ENUM_CLASS(ECNAME) enum class OPENPMDAPI_EXPORT ECNAME -# endif +#if defined(__GNUC__) && (__GNUC__ < 6) && !defined(__clang__) && \ + !defined(__INTEL_COMPILER) +// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=43407 +#define OPENPMDAPI_EXPORT_ENUM_CLASS(ECNAME) \ + enum class ECNAME : OPENPMDAPI_EXPORT unsigned int +#else +#define OPENPMDAPI_EXPORT_ENUM_CLASS(ECNAME) enum class OPENPMDAPI_EXPORT ECNAME +#endif #endif diff --git a/include/openPMD/auxiliary/Filesystem.hpp b/include/openPMD/auxiliary/Filesystem.hpp index 686cbf1384..ab728b05d0 100644 --- a/include/openPMD/auxiliary/Filesystem.hpp +++ b/include/openPMD/auxiliary/Filesystem.hpp @@ -26,7 +26,7 @@ #include "openPMD/config.hpp" #if openPMD_HAVE_MPI -# include +#include #endif namespace openPMD @@ -34,70 +34,70 @@ namespace openPMD namespace auxiliary { #ifdef _WIN32 -static constexpr char const directory_separator = '\\'; + static constexpr char const directory_separator = '\\'; #else -static constexpr char const directory_separator = '/'; + static constexpr char const directory_separator = '/'; #endif -/** Check if a directory exists at a give absolute or relative path. - * - * @param path Absolute or relative path to examine. - * @return true if the given path or file status corresponds to an existing directory, false otherwise. - */ -bool -directory_exists(std::string const& path); + /** Check if a directory exists at a give absolute or relative path. + * + * @param path Absolute or relative path to examine. + * @return true if the given path or file status corresponds to an existing + * directory, false otherwise. + */ + bool directory_exists(std::string const &path); -/** Check if a file exists at a given absolute or relative path. - * - * @param path Absolute or relative path to examine. - * @return true if the given path or file status corresponds to an existing file, false otherwise. - */ -bool -file_exists(std::string const& path); + /** Check if a file exists at a given absolute or relative path. + * + * @param path Absolute or relative path to examine. + * @return true if the given path or file status corresponds to an existing + * file, false otherwise. + */ + bool file_exists(std::string const &path); -/** List all contents of a directory at a given absolute or relative path. - * - * @note The equivalent of `ls path` - * @note Both contained files and directories are listed. - * `.` and `..` are not returned. - * @throw std::system_error when the given path is not a valid directory. - * @param path Absolute or relative path of directory to examine. - * @return Vector of all contained files and directories. - */ -std::vector< std::string > -list_directory(std::string const& path ); + /** List all contents of a directory at a given absolute or relative path. + * + * @note The equivalent of `ls path` + * @note Both contained files and directories are listed. + * `.` and `..` are not returned. + * @throw std::system_error when the given path is not a valid directory. + * @param path Absolute or relative path of directory to examine. + * @return Vector of all contained files and directories. + */ + std::vector list_directory(std::string const &path); -/** Create all required directories to have a reachable given absolute or relative path. - * - * @note The equivalent of `mkdir -p path` - * @param path Absolute or relative path to the new directory to create. - * @return true if a directory was created for the directory p resolves to, false otherwise. - */ -bool -create_directories(std::string const& path); + /** Create all required directories to have a reachable given absolute or + * relative path. + * + * @note The equivalent of `mkdir -p path` + * @param path Absolute or relative path to the new directory to + * create. + * @return true if a directory was created for the directory p resolves to, + * false otherwise. + */ + bool create_directories(std::string const &path); -/** Remove the directory identified by the given path. - * - * @note The equivalent of `rm -r path`. - * @param path Absolute or relative path to the directory to delete. - * @return true if the directory was deleted, false otherwise and if it did not exist. - */ -bool -remove_directory(std::string const& path); + /** Remove the directory identified by the given path. + * + * @note The equivalent of `rm -r path`. + * @param path Absolute or relative path to the directory to delete. + * @return true if the directory was deleted, false otherwise and if it did + * not exist. + */ + bool remove_directory(std::string const &path); -/** Remove the file identified by the given path. - * - * @note The equivalent of `rm path`. - * @param path Absolute or relative path to the file to delete. - * @return true if the file was deleted, false otherwise and if it did not exist. - */ -bool -remove_file(std::string const& path); + /** Remove the file identified by the given path. + * + * @note The equivalent of `rm path`. + * @param path Absolute or relative path to the file to delete. + * @return true if the file was deleted, false otherwise and if it did not + * exist. + */ + bool remove_file(std::string const &path); #if openPMD_HAVE_MPI -std::string -collective_file_read( std::string const & path, MPI_Comm ); + std::string collective_file_read(std::string const &path, MPI_Comm); #endif } // namespace auxiliary diff --git a/include/openPMD/auxiliary/JSON.hpp b/include/openPMD/auxiliary/JSON.hpp index eace4191d7..c4cc5832aa 100644 --- a/include/openPMD/auxiliary/JSON.hpp +++ b/include/openPMD/auxiliary/JSON.hpp @@ -56,8 +56,7 @@ namespace json * @param overwrite * @return std::string */ - std::string merge( - std::string const & defaultValue, - std::string const & overwrite ); -} -} + std::string + merge(std::string const &defaultValue, std::string const &overwrite); +} // namespace json +} // namespace openPMD diff --git a/include/openPMD/auxiliary/JSON_internal.hpp b/include/openPMD/auxiliary/JSON_internal.hpp index 12120d4533..e2dc838423 100644 --- a/include/openPMD/auxiliary/JSON_internal.hpp +++ b/include/openPMD/auxiliary/JSON_internal.hpp @@ -31,10 +31,10 @@ #include #if openPMD_HAVE_MPI -# include +#include #endif -#include // std::shared_ptr +#include // std::shared_ptr #include // std::forward namespace openPMD @@ -43,13 +43,14 @@ namespace json { enum class SupportedLanguages { - JSON, TOML + JSON, + TOML }; struct ParsedConfig { nlohmann::json config; - SupportedLanguages originallySpecifiedAs{ SupportedLanguages::JSON }; + SupportedLanguages originallySpecifiedAs{SupportedLanguages::JSON}; }; /** @@ -69,22 +70,21 @@ namespace json { public: TracingJSON(); - TracingJSON( nlohmann::json, SupportedLanguages ); - TracingJSON( ParsedConfig ); + TracingJSON(nlohmann::json, SupportedLanguages); + TracingJSON(ParsedConfig); /** * @brief Access the underlying JSON value * * @return nlohmann::json& */ - inline nlohmann::json & - json() + inline nlohmann::json &json() { return *m_positionInOriginal; } - template< typename Key > - TracingJSON operator[]( Key && key ); + template + TracingJSON operator[](Key &&key); /** * @brief Get the "shadow", i.e. a copy of the original JSON value @@ -92,7 +92,7 @@ namespace json * * @return nlohmann::json const& */ - nlohmann::json const & getShadow() const; + nlohmann::json const &getShadow() const; /** * @brief Invert the "shadow", i.e. a copy of the original JSON value @@ -109,10 +109,9 @@ namespace json * contained in an array). Use this call to explicitly declare * an array as read. */ - void - declareFullyRead(); + void declareFullyRead(); - SupportedLanguages originallySpecifiedAs{ SupportedLanguages::JSON }; + SupportedLanguages originallySpecifiedAs{SupportedLanguages::JSON}; private: /** @@ -121,7 +120,7 @@ namespace json * operator[]() in order to avoid use-after-free situations. * */ - std::shared_ptr< nlohmann::json > m_originalJSON; + std::shared_ptr m_originalJSON; /** * @brief A JSON object keeping track of all accessed indices within the * original JSON object. Initially an empty JSON object, @@ -131,45 +130,45 @@ namespace json * operator[]() in order to avoid use-after-free situations. * */ - std::shared_ptr< nlohmann::json > m_shadow; + std::shared_ptr m_shadow; /** * @brief The sub-expression within m_originalJSON corresponding with * the current instance. * */ - nlohmann::json * m_positionInOriginal; + nlohmann::json *m_positionInOriginal; /** * @brief The sub-expression within m_positionInOriginal corresponding * with the current instance. * */ - nlohmann::json * m_positionInShadow; + nlohmann::json *m_positionInShadow; bool m_trace = true; void invertShadow( - nlohmann::json & result, nlohmann::json const & shadow ) const; + nlohmann::json &result, nlohmann::json const &shadow) const; TracingJSON( - std::shared_ptr< nlohmann::json > originalJSON, - std::shared_ptr< nlohmann::json > shadow, - nlohmann::json * positionInOriginal, - nlohmann::json * positionInShadow, + std::shared_ptr originalJSON, + std::shared_ptr shadow, + nlohmann::json *positionInOriginal, + nlohmann::json *positionInShadow, SupportedLanguages originallySpecifiedAs, - bool trace ); + bool trace); }; - template< typename Key > - TracingJSON TracingJSON::operator[]( Key && key ) + template + TracingJSON TracingJSON::operator[](Key &&key) { - nlohmann::json * newPositionInOriginal = - &m_positionInOriginal->operator[]( key ); + nlohmann::json *newPositionInOriginal = + &m_positionInOriginal->operator[](key); // If accessing a leaf in the JSON tree from an object (not an array!) // erase the corresponding key static nlohmann::json nullvalue; - nlohmann::json * newPositionInShadow = &nullvalue; - if( m_trace && m_positionInOriginal->is_object() ) + nlohmann::json *newPositionInShadow = &nullvalue; + if (m_trace && m_positionInOriginal->is_object()) { - newPositionInShadow = &m_positionInShadow->operator[]( key ); + newPositionInShadow = &m_positionInShadow->operator[](key); } bool traceFurther = newPositionInOriginal->is_object(); return TracingJSON( @@ -178,11 +177,11 @@ namespace json newPositionInOriginal, newPositionInShadow, originallySpecifiedAs, - traceFurther ); + traceFurther); } - nlohmann::json tomlToJson( toml::value const & val ); - toml::value jsonToToml( nlohmann::json const & val ); + nlohmann::json tomlToJson(toml::value const &val); + toml::value jsonToToml(nlohmann::json const &val); /** * Check if options points to a file (indicated by an '@' for the first @@ -193,16 +192,15 @@ namespace json * @param considerFiles If yes, check if `options` refers to a file and read * from there. */ - ParsedConfig - parseOptions( std::string const & options, bool considerFiles ); + ParsedConfig parseOptions(std::string const &options, bool considerFiles); #if openPMD_HAVE_MPI /** * Parallel version of parseOptions(). MPI-collective. */ - ParsedConfig parseOptions( - std::string const & options, MPI_Comm comm, bool considerFiles ); + ParsedConfig + parseOptions(std::string const &options, MPI_Comm comm, bool considerFiles); #endif @@ -215,7 +213,7 @@ namespace json * This helps us forward configurations from these locations to ADIOS2 * "as-is". */ - nlohmann::json & lowerCase( nlohmann::json & ); + nlohmann::json &lowerCase(nlohmann::json &); /** * Read a JSON literal as a string. @@ -224,19 +222,18 @@ namespace json * If it is a bool, convert it to either "0" or "1". * If it is not a literal, return an empty option. */ - std::optional< std::string > asStringDynamic( nlohmann::json const & ); + std::optional asStringDynamic(nlohmann::json const &); /** * Like asStringDynamic(), but convert the string to lowercase afterwards. */ - std::optional< std::string > - asLowerCaseStringDynamic( nlohmann::json const & ); + std::optional asLowerCaseStringDynamic(nlohmann::json const &); /** * Vector containing the lower-case keys to the single backends' * configurations. */ - extern std::vector< std::string > backendKeys; + extern std::vector backendKeys; /** * Function that can be called after reading all global options from the @@ -244,13 +241,13 @@ namespace json * single backends). * If any unread value persists, a warning is printed to stderr. */ - void warnGlobalUnusedOptions( TracingJSON const & config ); + void warnGlobalUnusedOptions(TracingJSON const &config); /** * Like merge() as defined in JSON.hpp, but this overload works directly * on nlohmann::json values. */ nlohmann::json & - merge( nlohmann::json & defaultVal, nlohmann::json const & overwrite ); + merge(nlohmann::json &defaultVal, nlohmann::json const &overwrite); } // namespace json } // namespace openPMD diff --git a/include/openPMD/auxiliary/Memory.hpp b/include/openPMD/auxiliary/Memory.hpp index 130398d4d1..430df374a9 100644 --- a/include/openPMD/auxiliary/Memory.hpp +++ b/include/openPMD/auxiliary/Memory.hpp @@ -28,127 +28,135 @@ #include #include - namespace openPMD { namespace auxiliary { -inline std::unique_ptr< void, std::function< void(void*) > > -allocatePtr(Datatype dtype, uint64_t numPoints) -{ - void* data = nullptr; - std::function< void(void*) > del = [](void*){}; - switch( dtype ) + inline std::unique_ptr> + allocatePtr(Datatype dtype, uint64_t numPoints) { - using DT = Datatype; + void *data = nullptr; + std::function del = [](void *) {}; + switch (dtype) + { + using DT = Datatype; case DT::VEC_STRING: - data = new char*[numPoints]; - del = [](void* p){ delete[] static_cast< char** >(p); }; + data = new char *[numPoints]; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_LONG_DOUBLE: case DT::LONG_DOUBLE: data = new long double[numPoints]; - del = [](void* p){ delete[] static_cast< long double* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::ARR_DBL_7: case DT::VEC_DOUBLE: case DT::DOUBLE: data = new double[numPoints]; - del = [](void* p){ delete[] static_cast< double* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_FLOAT: case DT::FLOAT: data = new float[numPoints]; - del = [](void* p){ delete[] static_cast< float* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_CLONG_DOUBLE: case DT::CLONG_DOUBLE: data = new std::complex[numPoints]; - del = [](void* p){ delete[] static_cast< std::complex* >(p); }; + del = [](void *p) { + delete[] static_cast *>(p); + }; break; case DT::VEC_CDOUBLE: case DT::CDOUBLE: data = new std::complex[numPoints]; - del = [](void* p){ delete[] static_cast< std::complex* >(p); }; + del = [](void *p) { + delete[] static_cast *>(p); + }; break; case DT::VEC_CFLOAT: case DT::CFLOAT: data = new std::complex[numPoints]; - del = [](void* p){ delete[] static_cast< std::complex* >(p); }; + del = [](void *p) { + delete[] static_cast *>(p); + }; break; case DT::VEC_SHORT: case DT::SHORT: data = new short[numPoints]; - del = [](void* p){ delete[] static_cast< short* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_INT: case DT::INT: data = new int[numPoints]; - del = [](void* p){ delete[] static_cast< int* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_LONG: case DT::LONG: data = new long[numPoints]; - del = [](void* p){ delete[] static_cast< long* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_LONGLONG: case DT::LONGLONG: data = new long long[numPoints]; - del = [](void* p){ delete[] static_cast< long long* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_USHORT: case DT::USHORT: data = new unsigned short[numPoints]; - del = [](void* p){ delete[] static_cast< unsigned short* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_UINT: case DT::UINT: data = new unsigned int[numPoints]; - del = [](void* p){ delete[] static_cast< unsigned int* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_ULONG: case DT::ULONG: data = new unsigned long[numPoints]; - del = [](void* p){ delete[] static_cast< unsigned long* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_ULONGLONG: case DT::ULONGLONG: data = new unsigned long long[numPoints]; - del = [](void* p){ delete[] static_cast< unsigned long long* >(p); }; + del = [](void *p) { + delete[] static_cast(p); + }; break; case DT::VEC_CHAR: case DT::CHAR: data = new char[numPoints]; - del = [](void* p){ delete[] static_cast< char* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::VEC_UCHAR: case DT::UCHAR: data = new unsigned char[numPoints]; - del = [](void* p){ delete[] static_cast< unsigned char* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::BOOL: data = new bool[numPoints]; - del = [](void* p){ delete[] static_cast< bool* >(p); }; + del = [](void *p) { delete[] static_cast(p); }; break; case DT::STRING: /* user assigns c_str pointer */ break; case DT::UNDEFINED: default: - throw std::runtime_error("Unknown Attribute datatype (Pointer allocation)"); - } + throw std::runtime_error( + "Unknown Attribute datatype (Pointer allocation)"); + } - return std::unique_ptr< void, std::function< void(void*) > >(data, del); -} + return std::unique_ptr>(data, del); + } -inline std::unique_ptr< void, std::function< void(void*) > > -allocatePtr(Datatype dtype, Extent const& e) -{ - uint64_t numPoints = 1u; - for( auto const& dimensionSize : e ) - numPoints *= dimensionSize; - return allocatePtr(dtype, numPoints); -} + inline std::unique_ptr> + allocatePtr(Datatype dtype, Extent const &e) + { + uint64_t numPoints = 1u; + for (auto const &dimensionSize : e) + numPoints *= dimensionSize; + return allocatePtr(dtype, numPoints); + } -} // auxiliary -} // openPMD +} // namespace auxiliary +} // namespace openPMD diff --git a/include/openPMD/auxiliary/OutOfRangeMsg.hpp b/include/openPMD/auxiliary/OutOfRangeMsg.hpp index bc0d96593d..30d6cf0763 100644 --- a/include/openPMD/auxiliary/OutOfRangeMsg.hpp +++ b/include/openPMD/auxiliary/OutOfRangeMsg.hpp @@ -23,7 +23,6 @@ #include #include - namespace openPMD { namespace auxiliary @@ -40,40 +39,35 @@ namespace auxiliary std::string m_description; public: - OutOfRangeMsg() : - m_name( "Key" ), - m_description( "does not exist (read-only)." ) + OutOfRangeMsg() + : m_name("Key"), m_description("does not exist (read-only).") + {} + OutOfRangeMsg(std::string const name, std::string const description) + : m_name(name), m_description(description) {} - OutOfRangeMsg( - std::string const name, - std::string const description - ) : - m_name(name), m_description( description ) {} - template< + template < typename T_Key, typename = typename std::enable_if< - std::is_integral< T_Key >::value || - std::is_floating_point< T_Key >::value - >::type - > - std::string operator()( T_Key const key ) const + std::is_integral::value || + std::is_floating_point::value>::type> + std::string operator()(T_Key const key) const { - return m_name + std::string(" '") + std::to_string( key ) + - std::string( "' " ) + m_description; + return m_name + std::string(" '") + std::to_string(key) + + std::string("' ") + m_description; } - std::string operator()( std::string const key ) const + std::string operator()(std::string const key) const { - return m_name + std::string(" '") + std::string( key ) + - std::string( "' " ) + m_description; + return m_name + std::string(" '") + std::string(key) + + std::string("' ") + m_description; } - std::string operator()( ... ) const + std::string operator()(...) const { - return m_name + std::string( " " ) + m_description; + return m_name + std::string(" ") + m_description; } }; -} // auxiliary -} // openPMD +} // namespace auxiliary +} // namespace openPMD diff --git a/include/openPMD/auxiliary/ShareRaw.hpp b/include/openPMD/auxiliary/ShareRaw.hpp index 1e1dc636ef..e3d2d1efb7 100644 --- a/include/openPMD/auxiliary/ShareRaw.hpp +++ b/include/openPMD/auxiliary/ShareRaw.hpp @@ -20,56 +20,53 @@ */ #pragma once -#include -#include #include +#include #include - +#include namespace openPMD { - //! @{ - /** Share ownership with a raw pointer - * - * Helper function to share load/store data ownership - * unprotected and without reference counting with a - * raw pointer or stdlib container (that implements a - * contiguous data storage). - * - * @warning this is a helper function to bypass the shared-pointer - * API for storing data behind raw pointers. Using it puts - * the responsibility of buffer-consistency between stores - * and flushes to the users side without an indication via - * reference counting. - */ - template< typename T > - std::shared_ptr< T > - shareRaw( T * x ) - { - return std::shared_ptr< T >( x, [](T *){} ); - } +//! @{ +/** Share ownership with a raw pointer + * + * Helper function to share load/store data ownership + * unprotected and without reference counting with a + * raw pointer or stdlib container (that implements a + * contiguous data storage). + * + * @warning this is a helper function to bypass the shared-pointer + * API for storing data behind raw pointers. Using it puts + * the responsibility of buffer-consistency between stores + * and flushes to the users side without an indication via + * reference counting. + */ +template +std::shared_ptr shareRaw(T *x) +{ + return std::shared_ptr(x, [](T *) {}); +} - template< typename T > - std::shared_ptr< T const > - shareRaw( T const * x ) - { - return std::shared_ptr< T const >( x, [](T const *){} ); - } +template +std::shared_ptr shareRaw(T const *x) +{ + return std::shared_ptr(x, [](T const *) {}); +} - template< typename T > - auto - shareRaw( T & c ) -> std::shared_ptr< typename std::remove_pointer< decltype( c.data() ) >::type > - { - using value_type = typename std::remove_pointer< decltype( c.data() ) >::type; - return std::shared_ptr< value_type >( c.data(), [](value_type *){} ); - } +template +auto shareRaw(T &c) + -> std::shared_ptr::type> +{ + using value_type = typename std::remove_pointer::type; + return std::shared_ptr(c.data(), [](value_type *) {}); +} - template< typename T > - auto - shareRaw( T const & c ) -> std::shared_ptr< typename std::remove_pointer< decltype( c.data() ) >::type > - { - using value_type = typename std::remove_pointer< decltype( c.data() ) >::type; - return std::shared_ptr< value_type >( c.data(), [](value_type *){} ); - } - //! @} -} // openPMD +template +auto shareRaw(T const &c) + -> std::shared_ptr::type> +{ + using value_type = typename std::remove_pointer::type; + return std::shared_ptr(c.data(), [](value_type *) {}); +} +//! @} +} // namespace openPMD diff --git a/include/openPMD/auxiliary/StringManip.hpp b/include/openPMD/auxiliary/StringManip.hpp index 92b12a058d..36778f205f 100644 --- a/include/openPMD/auxiliary/StringManip.hpp +++ b/include/openPMD/auxiliary/StringManip.hpp @@ -32,242 +32,215 @@ namespace openPMD { namespace auxiliary { -inline bool -contains(std::string const &s, - std::string const &infix) -{ - return s.find(infix) != std::string::npos; -} - -inline bool -contains(std::string const &s, - char const infix) -{ - return s.find(infix) != std::string::npos; -} + inline bool contains(std::string const &s, std::string const &infix) + { + return s.find(infix) != std::string::npos; + } -inline bool -starts_with(std::string const &s, - std::string const &prefix) -{ - return (s.size() >= prefix.size()) && - (0 == s.compare(0, prefix.size(), prefix)); -} + inline bool contains(std::string const &s, char const infix) + { + return s.find(infix) != std::string::npos; + } -inline bool -starts_with(std::string const &s, - char const prefix) -{ - return !s.empty() && - s[0] == prefix; -} + inline bool starts_with(std::string const &s, std::string const &prefix) + { + return (s.size() >= prefix.size()) && + (0 == s.compare(0, prefix.size(), prefix)); + } -inline bool -ends_with(std::string const &s, - std::string const &suffix) -{ - return (s.size() >= suffix.size()) && - (0 == s.compare(s.size() - suffix.size(), suffix.size(), suffix)); -} + inline bool starts_with(std::string const &s, char const prefix) + { + return !s.empty() && s[0] == prefix; + } -inline bool -ends_with(std::string const &s, - char const suffix) -{ - return !s.empty() && s.back() == suffix; -} + inline bool ends_with(std::string const &s, std::string const &suffix) + { + return (s.size() >= suffix.size()) && + (0 == s.compare(s.size() - suffix.size(), suffix.size(), suffix)); + } -inline std::string -replace_first(std::string s, - std::string const& target, - std::string const& replacement) -{ - std::string::size_type pos = s.find(target); - if( pos == std::string::npos ) - return s; - s.replace(pos, target.size(), replacement); - s.shrink_to_fit(); + inline bool ends_with(std::string const &s, char const suffix) + { + return !s.empty() && s.back() == suffix; + } - return s; -} + inline std::string replace_first( + std::string s, + std::string const &target, + std::string const &replacement) + { + std::string::size_type pos = s.find(target); + if (pos == std::string::npos) + return s; + s.replace(pos, target.size(), replacement); + s.shrink_to_fit(); -inline std::string -replace_last(std::string s, - std::string const& target, - std::string const& replacement) -{ - std::string::size_type pos = s.rfind(target); - if( pos == std::string::npos ) return s; - s.replace(pos, target.size(), replacement); - s.shrink_to_fit(); - - return s; -} + } -inline std::string -replace_all_nonrecursively(std::string s, - std::string const& target, - std::string const& replacement) -{ - std::string::size_type pos = 0; - auto tsize = target.size(); - auto rsize = replacement.size(); - while (true) + inline std::string replace_last( + std::string s, + std::string const &target, + std::string const &replacement) { - pos = s.find(target, pos); + std::string::size_type pos = s.rfind(target); if (pos == std::string::npos) - break; - s.replace(pos, tsize, replacement); - pos += rsize; + return s; + s.replace(pos, target.size(), replacement); + s.shrink_to_fit(); + + return s; } - s.shrink_to_fit(); - return s; -} -inline std::string -replace_all(std::string s, - std::string const& target, - std::string const& replacement) -{ - std::string::size_type pos = 0; - auto tsize = target.size(); - assert(tsize > 0); - auto rsize = replacement.size(); - while (true) + inline std::string replace_all_nonrecursively( + std::string s, + std::string const &target, + std::string const &replacement) { - pos = s.find(target, pos); - if (pos == std::string::npos) - break; - s.replace(pos, tsize, replacement); - // Allow replacing recursively, but only if - // the next replaced substring overlaps with - // some parts of the original word. - // This avoids loops. - pos += rsize - std::min(tsize - 1, rsize); + std::string::size_type pos = 0; + auto tsize = target.size(); + auto rsize = replacement.size(); + while (true) + { + pos = s.find(target, pos); + if (pos == std::string::npos) + break; + s.replace(pos, tsize, replacement); + pos += rsize; + } + s.shrink_to_fit(); + return s; } - s.shrink_to_fit(); - return s; -} -inline std::vector< std::string > -split(std::string const &s, - std::string const &delimiter, - bool includeDelimiter = false) -{ - std::vector< std::string > ret; - std::string::size_type pos, lastPos = 0, length = s.size(); - while( lastPos < length + 1 ) + inline std::string replace_all( + std::string s, + std::string const &target, + std::string const &replacement) { - pos = s.find_first_of(delimiter, lastPos); - if( pos == std::string::npos ) + std::string::size_type pos = 0; + auto tsize = target.size(); + assert(tsize > 0); + auto rsize = replacement.size(); + while (true) { - pos = length; - includeDelimiter = false; + pos = s.find(target, pos); + if (pos == std::string::npos) + break; + s.replace(pos, tsize, replacement); + // Allow replacing recursively, but only if + // the next replaced substring overlaps with + // some parts of the original word. + // This avoids loops. + pos += rsize - std::min(tsize - 1, rsize); } + s.shrink_to_fit(); + return s; + } - if( pos != lastPos ) - ret.push_back(s.substr(lastPos, pos + (includeDelimiter ? delimiter.size() : 0) - lastPos)); + inline std::vector split( + std::string const &s, + std::string const &delimiter, + bool includeDelimiter = false) + { + std::vector ret; + std::string::size_type pos, lastPos = 0, length = s.size(); + while (lastPos < length + 1) + { + pos = s.find_first_of(delimiter, lastPos); + if (pos == std::string::npos) + { + pos = length; + includeDelimiter = false; + } + + if (pos != lastPos) + ret.push_back(s.substr( + lastPos, + pos + (includeDelimiter ? delimiter.size() : 0) - lastPos)); + + lastPos = pos + 1; + } - lastPos = pos + 1; + return ret; } - return ret; -} - -inline std::string -strip(std::string s, std::vector< char > to_remove) -{ - for( auto const& c : to_remove ) - s.erase(std::remove(s.begin(), s.end(), c), s.end()); - s.shrink_to_fit(); + inline std::string strip(std::string s, std::vector to_remove) + { + for (auto const &c : to_remove) + s.erase(std::remove(s.begin(), s.end(), c), s.end()); + s.shrink_to_fit(); - return s; -} + return s; + } -template< typename F > -std::string -trim( std::string const & s, F && to_remove ) -{ - auto begin = s.begin(); - for( ; begin != s.end(); ++begin ) + template + std::string trim(std::string const &s, F &&to_remove) { - if( !to_remove( *begin ) ) + auto begin = s.begin(); + for (; begin != s.end(); ++begin) { - break; + if (!to_remove(*begin)) + { + break; + } } - } - auto end = s.rbegin(); - for( ; end != s.rend(); ++end ) - { - if( !to_remove( *end ) ) + auto end = s.rbegin(); + for (; end != s.rend(); ++end) { - break; + if (!to_remove(*end)) + { + break; + } } + return s.substr(begin - s.begin(), end.base() - begin); } - return s.substr( begin - s.begin(), end.base() - begin ); -} -inline std::string -join(std::vector< std::string > const& vs, std::string const& delimiter) -{ - switch( vs.size() ) + inline std::string + join(std::vector const &vs, std::string const &delimiter) { + switch (vs.size()) + { case 0: return ""; case 1: return vs[0]; default: std::ostringstream ss; - std::copy(vs.begin(), - vs.end() - 1, - std::ostream_iterator< std::string >(ss, delimiter.c_str())); + std::copy( + vs.begin(), + vs.end() - 1, + std::ostream_iterator(ss, delimiter.c_str())); ss << *(vs.end() - 1); return ss.str(); + } } -} -/** - * @brief Remove surrounding slashes from a string. - * - * @param s A string, possibly with a slash as first and/or last letter. - * @return std::string The same string without those slashes. - */ -inline std::string -removeSlashes( std::string s ) -{ - if( auxiliary::starts_with( - s, - '/' - ) ) + /** + * @brief Remove surrounding slashes from a string. + * + * @param s A string, possibly with a slash as first and/or last letter. + * @return std::string The same string without those slashes. + */ + inline std::string removeSlashes(std::string s) { - s = auxiliary::replace_first( - s, - "/", - "" - ); + if (auxiliary::starts_with(s, '/')) + { + s = auxiliary::replace_first(s, "/", ""); + } + if (auxiliary::ends_with(s, '/')) + { + s = auxiliary::replace_last(s, "/", ""); + } + return s; } - if( auxiliary::ends_with( - s, - '/' - ) ) + + template + S &&lowerCase(S &&s) { - s = auxiliary::replace_last( - s, - "/", - "" - ); + std::transform(s.begin(), s.end(), s.begin(), [](unsigned char c) { + return std::tolower(c); + }); + return std::forward(s); } - return s; -} - -template< typename S > -S && lowerCase( S && s ) -{ - std::transform( s.begin(), s.end(), s.begin(), []( unsigned char c ) { - return std::tolower( c ); - } ); - return std::forward< S >( s ); -} -} // auxiliary -} // openPMD +} // namespace auxiliary +} // namespace openPMD diff --git a/include/openPMD/auxiliary/TypeTraits.hpp b/include/openPMD/auxiliary/TypeTraits.hpp index b4d121fd81..2e806d2176 100644 --- a/include/openPMD/auxiliary/TypeTraits.hpp +++ b/include/openPMD/auxiliary/TypeTraits.hpp @@ -29,34 +29,34 @@ namespace openPMD::auxiliary { namespace detail { - template< typename > + template struct IsVector { static constexpr bool value = false; }; - template< typename T > - struct IsVector< std::vector< T > > + template + struct IsVector> { static constexpr bool value = true; }; - template< typename > + template struct IsArray { static constexpr bool value = false; }; - template< typename T, size_t n > - struct IsArray< std::array< T, n > > + template + struct IsArray> { static constexpr bool value = true; }; -} +} // namespace detail -template< typename T > -inline constexpr bool IsVector_v = detail::IsVector< T >::value; +template +inline constexpr bool IsVector_v = detail::IsVector::value; -template< typename T > -inline constexpr bool IsArray_v = detail::IsArray< T >::value; -} +template +inline constexpr bool IsArray_v = detail::IsArray::value; +} // namespace openPMD::auxiliary diff --git a/include/openPMD/auxiliary/Variant.hpp b/include/openPMD/auxiliary/Variant.hpp index 217ad5a44d..89190b882e 100644 --- a/include/openPMD/auxiliary/Variant.hpp +++ b/include/openPMD/auxiliary/Variant.hpp @@ -24,69 +24,71 @@ #include #include // IWYU pragma: export - namespace openPMD { namespace auxiliary { -/** Generic object to store a set of datatypes in without losing type safety. - * - * @tparam T_DTYPES Enumeration of datatypes to be stored and identified. - * @tparam T Varaidic template argument list of datatypes to be stored. - */ -template< class T_DTYPES, typename ... T > -class Variant -{ - static_assert(std::is_enum< T_DTYPES >::value, "Datatypes to Variant must be supplied as enum."); - -public: - using resource = std::variant< T ... >; - /** Construct a lightweight wrapper around a generic object that indicates - * the concrete datatype of the specific object stored. - * - * @note Gerneric objects can only generated implicitly if their datatype - * is contained in T_DTYPES. - * @param r Generic object to be stored. - */ - Variant(resource r) - : dtype{static_cast(r.index())}, - m_data{r} - { } - - /** Retrieve a stored specific object of known datatype with ensured type-safety. + /** Generic object to store a set of datatypes in without losing type + * safety. * - * @throw std::bad_variant_access if stored object is not of type U. - * @tparam U Type of the object to be retrieved. - * @return Copy of the retrieved object of type U. + * @tparam T_DTYPES Enumeration of datatypes to be stored and identified. + * @tparam T Varaidic template argument list of datatypes to be + * stored. */ - template< typename U > - U get() const + template + class Variant { - return std::get< U >(m_data); - } + static_assert( + std::is_enum::value, + "Datatypes to Variant must be supplied as enum."); - /** Retrieve the stored generic object. - * - * @return Copy of the stored generic object. - */ - resource getResource() const - { - return m_data; - } + public: + using resource = std::variant; + /** Construct a lightweight wrapper around a generic object that + * indicates the concrete datatype of the specific object stored. + * + * @note Gerneric objects can only generated implicitly if their + * datatype is contained in T_DTYPES. + * @param r Generic object to be stored. + */ + Variant(resource r) : dtype{static_cast(r.index())}, m_data{r} + {} - /** Retrieve the index of the alternative that is currently been held - * - * @return zero-based index - */ - constexpr size_t index() const noexcept - { - return m_data.index(); - } + /** Retrieve a stored specific object of known datatype with ensured + * type-safety. + * + * @throw std::bad_variant_access if stored object is not of type U. + * @tparam U Type of the object to be retrieved. + * @return Copy of the retrieved object of type U. + */ + template + U get() const + { + return std::get(m_data); + } + + /** Retrieve the stored generic object. + * + * @return Copy of the stored generic object. + */ + resource getResource() const + { + return m_data; + } + + /** Retrieve the index of the alternative that is currently been held + * + * @return zero-based index + */ + constexpr size_t index() const noexcept + { + return m_data.index(); + } - T_DTYPES dtype; + T_DTYPES dtype; -private: - resource m_data; -}; -} // auxiliary -} // openPMD + private: + resource m_data; + }; +} // namespace auxiliary +} // namespace openPMD diff --git a/include/openPMD/backend/Attributable.hpp b/include/openPMD/backend/Attributable.hpp index fa6af52ca0..e4d7f1e454 100644 --- a/include/openPMD/backend/Attributable.hpp +++ b/include/openPMD/backend/Attributable.hpp @@ -21,31 +21,30 @@ #pragma once #include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/auxiliary/OutOfRangeMsg.hpp" #include "openPMD/backend/Attribute.hpp" #include "openPMD/backend/Writable.hpp" -#include "openPMD/auxiliary/OutOfRangeMsg.hpp" +#include #include #include #include -#include #include -#include #include +#include // expose private and protected members for invasive testing #ifndef OPENPMD_protected -# define OPENPMD_protected protected +#define OPENPMD_protected protected: #endif - namespace openPMD { namespace traits { - template< typename T > + template struct GenerationPolicy; -} // traits +} // namespace traits class AbstractFilePosition; class Attributable; class Iteration; @@ -54,63 +53,62 @@ class Series; class no_such_attribute_error : public std::runtime_error { public: - no_such_attribute_error(std::string const& what_arg) - : std::runtime_error(what_arg) - { } - virtual ~no_such_attribute_error() { } + no_such_attribute_error(std::string const &what_arg) + : std::runtime_error(what_arg) + {} + virtual ~no_such_attribute_error() + {} }; namespace internal { -class AttributableData -{ - friend class openPMD::Attributable; + class AttributableData + { + friend class openPMD::Attributable; -public: - AttributableData(); - AttributableData( AttributableData const & ) = delete; - AttributableData( AttributableData && ) = delete; - virtual ~AttributableData() = default; + public: + AttributableData(); + AttributableData(AttributableData const &) = delete; + AttributableData(AttributableData &&) = delete; + virtual ~AttributableData() = default; - AttributableData & operator=( AttributableData const & ) = delete; - AttributableData & operator=( AttributableData && ) = delete; + AttributableData &operator=(AttributableData const &) = delete; + AttributableData &operator=(AttributableData &&) = delete; - using A_MAP = std::map< std::string, Attribute >; - /** - * The Writable associated with this Attributable. - * There is a one-to-one relation between Attributable and Writable objects, - * Writable captures the part that backends can see. - */ - Writable m_writable; + using A_MAP = std::map; + /** + * The Writable associated with this Attributable. + * There is a one-to-one relation between Attributable and Writable + * objects, Writable captures the part that backends can see. + */ + Writable m_writable; -private: - /** - * The attributes defined by this Attributable. - */ - A_MAP m_attributes; -}; + private: + /** + * The attributes defined by this Attributable. + */ + A_MAP m_attributes; + }; -/** Verify values of attributes in the frontend - * - * verify string attributes are not empty (backend restriction, e.g., HDF5) - */ -template< typename T > -inline void -attr_value_check( std::string const /* key */, T /* value */ ) -{ -} + /** Verify values of attributes in the frontend + * + * verify string attributes are not empty (backend restriction, e.g., HDF5) + */ + template + inline void attr_value_check(std::string const /* key */, T /* value */) + {} -template< > -inline void -attr_value_check( std::string const key, std::string const value ) -{ - if( value.empty() ) - throw std::runtime_error( + template <> + inline void attr_value_check(std::string const key, std::string const value) + { + if (value.empty()) + throw std::runtime_error( "[setAttribute] Value for string attribute '" + key + - "' must not be empty!" ); -} + "' must not be empty!"); + } -template< typename > class BaseRecordData; + template + class BaseRecordData; } // namespace internal /** @brief Layer to manage storage of attributes associated with file objects. @@ -121,21 +119,17 @@ template< typename > class BaseRecordData; class Attributable { // @todo remove unnecessary friend (wew that sounds bitter) - using A_MAP = std::map< std::string, Attribute >; - friend Writable* getWritable(Attributable*); - template< typename T_elem > + using A_MAP = std::map; + friend Writable *getWritable(Attributable *); + template friend class BaseRecord; - template< typename T_elem > + template friend class BaseRecordInterface; - template< typename > + template friend class internal::BaseRecordData; - template< - typename T, - typename T_key, - typename T_container - > + template friend class Container; - template< typename T > + template friend struct traits::GenerationPolicy; friend class Iteration; friend class Series; @@ -143,11 +137,11 @@ class Attributable friend class WriteIterations; protected: - std::shared_ptr< internal::AttributableData > m_attri{ - new internal::AttributableData() }; + std::shared_ptr m_attri{ + new internal::AttributableData()}; // Should not be called publicly, only by implementing classes - Attributable( std::shared_ptr< internal::AttributableData > ); + Attributable(std::shared_ptr); public: Attributable(); @@ -168,32 +162,34 @@ class Attributable * * @{ */ - template< typename T > - bool setAttribute(std::string const& key, T value); - bool setAttribute(std::string const& key, char const value[]); + template + bool setAttribute(std::string const &key, T value); + bool setAttribute(std::string const &key, char const value[]); /** @} */ /** Retrieve value of Attribute stored with provided key. * - * @throw no_such_attribute_error If no Attribute is currently stored with the provided key. + * @throw no_such_attribute_error If no Attribute is currently stored with + * the provided key. * @param key Key (i.e. name) of the Attribute to retrieve value for. * @return Stored Attribute in Variant form. */ - Attribute getAttribute(std::string const& key) const; + Attribute getAttribute(std::string const &key) const; /** Remove Attribute of provided value both logically and physically. * * @param key Key (i.e. name) of the Attribute to remove. - * @return true if provided key was present and removal succeeded, false otherwise. + * @return true if provided key was present and removal succeeded, false + * otherwise. */ - bool deleteAttribute(std::string const& key); + bool deleteAttribute(std::string const &key); /** List all currently stored Attributes' keys. * * @return Vector of keys (i.e. names) of all currently stored Attributes. */ - std::vector< std::string > attributes() const; + std::vector attributes() const; /** Count all currently stored Attributes. * * @return Number of currently stored Attributes. @@ -204,7 +200,7 @@ class Attributable * @param key Key (i.e. name) of the Attribute to find. * @return true if provided key was present, false otherwise. */ - bool containsAttribute(std::string const& key) const; + bool containsAttribute(std::string const &key) const; /** Retrieve a user-supplied comment associated with the object. * @@ -212,12 +208,13 @@ class Attributable * @return String containing the user-supplied comment. */ std::string comment() const; - /** Populate Attribute corresponding to a comment with the user-supplied comment. + /** Populate Attribute corresponding to a comment with the user-supplied + * comment. * * @param comment String value to be stored as a comment. * @return Reference to modified Attributable. */ - Attributable& setComment(std::string const& comment); + Attributable &setComment(std::string const &comment); /** Flush the corresponding Series object * @@ -236,8 +233,8 @@ class Attributable */ struct MyPath { - std::string directory; //! e.g., samples/git-samples/ - std::string seriesName; //! e.g., data%T + std::string directory; //! e.g., samples/git-samples/ + std::string seriesName; //! e.g., data%T std::string seriesExtension; //! e.g., .bp, .h5, .json, ... /** A vector of openPMD object names * @@ -246,7 +243,7 @@ class Attributable * "iterations", "100", "meshes", "E", "x" * Notice that RecordComponent::SCALAR is included in this list, too. */ - std::vector< std::string > group; + std::vector group; /** Reconstructs a path that can be passed to a Series constructor */ std::string filePath() const; @@ -259,7 +256,9 @@ class Attributable */ MyPath myPath() const; -OPENPMD_protected: + // clang-format off +OPENPMD_protected + // clang-format on Series retrieveSeries() const; @@ -270,14 +269,15 @@ class Attributable * Throws an error otherwise, e.g., for Series objects. * @{ */ - Iteration const & containingIteration() const; - Iteration & containingIteration(); + Iteration const &containingIteration() const; + Iteration &containingIteration(); /** @} */ - void seriesFlush( FlushLevel ); + void seriesFlush(FlushLevel); void flushAttributes(); - enum ReadMode { + enum ReadMode + { /** * Don't read an attribute from the backend if it has been previously * read. @@ -294,89 +294,112 @@ class Attributable */ FullyReread }; - void readAttributes( ReadMode ); + void readAttributes(ReadMode); - /** Retrieve the value of a floating point Attribute of user-defined precision with ensured type-safety. + /** Retrieve the value of a floating point Attribute of user-defined + * precision with ensured type-safety. * * @note Since the precision of certain Attributes is intentionally left - * unspecified in the openPMD standard, this provides a mechanism to - * retrieve those values without giving up type-safety. - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#conventions-throughout-these-documents + * unspecified in the openPMD standard, this provides a mechanism + * to retrieve those values without giving up type-safety. + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#conventions-throughout-these-documents * @note If the supplied and stored floating point precision are not the - * same, the value is cast to the desired precision unconditionally. + * same, the value is cast to the desired precision + * unconditionally. * - * @throw no_such_attribute_error If no Attribute is currently stored with the provided key. - * @tparam T Floating point type of user-defined precision to retrieve the value as. - * @param key Key (i.e. name) of the floating-point Attribute to retrieve value for. + * @throw no_such_attribute_error If no Attribute is currently stored with + * the provided key. + * @tparam T Floating point type of user-defined precision to retrieve + * the value as. + * @param key Key (i.e. name) of the floating-point Attribute to retrieve + * value for. * @return Value of stored Attribute as supplied floating point type. */ - template< typename T > - T readFloatingpoint(std::string const& key) const; - /** Retrieve a vector of values of a floating point Attributes of user-defined precision with ensured type-safety. + template + T readFloatingpoint(std::string const &key) const; + /** Retrieve a vector of values of a floating point Attributes of + * user-defined precision with ensured type-safety. * * @note Since the precision of certain Attributes is intentionally left - * unspecified in the openPMD standard, this provides a mechanism to - * retrieve those values without giving up type-safety. - * @see https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#conventions-throughout-these-documents + * unspecified in the openPMD standard, this provides a mechanism + * to retrieve those values without giving up type-safety. + * @see + * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#conventions-throughout-these-documents * @note If the supplied and stored floating point precision are not the - * same, the values are cast to the desired precision unconditionally. + * same, the values are cast to the desired precision + * unconditionally. * - * @throw no_such_attribute_error If no Attribute is currently stored with the provided key. - * @tparam T Floating point type of user-defined precision to retrieve the values as. - * @param key Key (i.e. name) of the floating-point Attribute to retrieve values for. - * @return Vector of values of stored Attribute as supplied floating point type. + * @throw no_such_attribute_error If no Attribute is currently stored with + * the provided key. + * @tparam T Floating point type of user-defined precision to retrieve + * the values as. + * @param key Key (i.e. name) of the floating-point Attribute to retrieve + * values for. + * @return Vector of values of stored Attribute as supplied floating point + * type. */ - template< typename T > - std::vector< T > readVectorFloatingpoint(std::string const& key) const; + template + std::vector readVectorFloatingpoint(std::string const &key) const; /* views into the resources held by m_writable - * purely for convenience so code that uses these does not have to go through m_writable-> */ - AbstractIOHandler * IOHandler() + * purely for convenience so code that uses these does not have to go + * through m_writable-> */ + AbstractIOHandler *IOHandler() { return m_attri->m_writable.IOHandler.get(); } - AbstractIOHandler const * IOHandler() const + AbstractIOHandler const *IOHandler() const { return m_attri->m_writable.IOHandler.get(); } - Writable *& parent() + Writable *&parent() { return m_attri->m_writable.parent; } - Writable const * parent() const + Writable const *parent() const { return m_attri->m_writable.parent; } - Writable & writable() + Writable &writable() { return m_attri->m_writable; } - Writable const & writable() const + Writable const &writable() const { return m_attri->m_writable; } - inline - void setData( std::shared_ptr< internal::AttributableData > attri ) + inline void setData(std::shared_ptr attri) { - m_attri = std::move( attri ); + m_attri = std::move(attri); } - inline - internal::AttributableData & get() + inline internal::AttributableData &get() { return *m_attri; } - inline - internal::AttributableData const & get() const + inline internal::AttributableData const &get() const { return *m_attri; } - bool dirty() const { return writable().dirty; } - bool& dirty() { return writable().dirty; } - bool written() const { return writable().written; } - bool& written() { return writable().written; } + bool dirty() const + { + return writable().dirty; + } + bool &dirty() + { + return writable().dirty; + } + bool written() const + { + return writable().written; + } + bool &written() + { + return writable().written; + } private: /** @@ -384,35 +407,33 @@ class Attributable * * @param w The Writable representing the parent. */ - virtual void linkHierarchy(Writable& w); + virtual void linkHierarchy(Writable &w); }; // Attributable -//TODO explicitly instantiate Attributable::setAttribute for all T in Datatype -template< typename T > -inline bool -Attributable::setAttribute( std::string const & key, T value ) +// TODO explicitly instantiate Attributable::setAttribute for all T in Datatype +template +inline bool Attributable::setAttribute(std::string const &key, T value) { - internal::attr_value_check( key, value ); + internal::attr_value_check(key, value); - auto & attri = get(); - if(IOHandler() && Access::READ_ONLY == IOHandler()->m_frontendAccess ) + auto &attri = get(); + if (IOHandler() && Access::READ_ONLY == IOHandler()->m_frontendAccess) { auxiliary::OutOfRangeMsg const out_of_range_msg( - "Attribute", - "can not be set (read-only)." - ); + "Attribute", "can not be set (read-only)."); throw no_such_attribute_error(out_of_range_msg(key)); } dirty() = true; auto it = attri.m_attributes.lower_bound(key); - if( it != attri.m_attributes.end() - && !attri.m_attributes.key_comp()(key, it->first) ) + if (it != attri.m_attributes.end() && + !attri.m_attributes.key_comp()(key, it->first)) { // key already exists in map, just replace the value it->second = Attribute(std::move(value)); return true; - } else + } + else { // emplace a new map element for an unknown key attri.m_attributes.emplace_hint( @@ -422,25 +443,29 @@ Attributable::setAttribute( std::string const & key, T value ) } inline bool -Attributable::setAttribute( std::string const & key, char const value[] ) +Attributable::setAttribute(std::string const &key, char const value[]) { return this->setAttribute(key, std::string(value)); } -template< typename T > -inline T Attributable::readFloatingpoint( std::string const & key ) const +template +inline T Attributable::readFloatingpoint(std::string const &key) const { - static_assert(std::is_floating_point< T >::value, "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); - return getAttribute(key).get< T >(); + return getAttribute(key).get(); } -template< typename T > -inline std::vector< T > -Attributable::readVectorFloatingpoint( std::string const & key ) const +template +inline std::vector +Attributable::readVectorFloatingpoint(std::string const &key) const { - static_assert(std::is_floating_point< T >::value, "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); - return getAttribute(key).get< std::vector< T > >(); + return getAttribute(key).get>(); } } // namespace openPMD diff --git a/include/openPMD/backend/Attribute.hpp b/include/openPMD/backend/Attribute.hpp index af9ad78f13..f43ff48ac9 100644 --- a/include/openPMD/backend/Attribute.hpp +++ b/include/openPMD/backend/Attribute.hpp @@ -35,50 +35,63 @@ #include #include - namespace openPMD { -//TODO This might have to be a Writable -//Reasoning - Flushes are expected to be done often. -//Attributes should not be written unless dirty. -//At the moment the dirty check is done at Attributable level, -//resulting in all of an Attributables Attributes being written to disk even if only one changes -/** Varidic datatype supporting at least all formats for attributes specified in the openPMD standard. +// TODO This might have to be a Writable +// Reasoning - Flushes are expected to be done often. +// Attributes should not be written unless dirty. +// At the moment the dirty check is done at Attributable level, +// resulting in all of an Attributables Attributes being written to disk even if +// only one changes +/** Varidic datatype supporting at least all formats for attributes specified in + * the openPMD standard. * * @note Extending and/or modifying the available formats requires identical * modifications to Datatype. */ -class Attribute : - public auxiliary::Variant< Datatype, - char, unsigned char, // signed char, - short, int, long, long long, - unsigned short, unsigned int, unsigned long, unsigned long long, - float, double, long double, - std::complex< float >, std::complex< double >, std::complex< long double >, - std::string, - std::vector< char >, - std::vector< short >, - std::vector< int >, - std::vector< long >, - std::vector< long long >, - std::vector< unsigned char >, - std::vector< unsigned short >, - std::vector< unsigned int >, - std::vector< unsigned long >, - std::vector< unsigned long long >, - std::vector< float >, - std::vector< double >, - std::vector< long double >, - std::vector< std::complex< float > >, - std::vector< std::complex< double > >, - std::vector< std::complex< long double > >, - std::vector< std::string >, - std::array< double, 7 >, - bool > +class Attribute + : public auxiliary::Variant< + Datatype, + char, + unsigned char, // signed char, + short, + int, + long, + long long, + unsigned short, + unsigned int, + unsigned long, + unsigned long long, + float, + double, + long double, + std::complex, + std::complex, + std::complex, + std::string, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector, + std::vector>, + std::vector>, + std::vector>, + std::vector, + std::array, + bool> { public: Attribute(resource r) : Variant(std::move(r)) - { } + {} /** * Compiler bug: CUDA (nvcc) releases 11.0.3 (v11.0.221), 11.1 (v11.1.105): @@ -89,10 +102,9 @@ class Attribute : * * Fix by explicitly instantiating resource */ - template< typename T > - Attribute( T && val ) : Variant( resource( std::forward< T >( val ) ) ) - { - } + template + Attribute(T &&val) : Variant(resource(std::forward(val))) + {} /** Retrieve a stored specific Attribute and cast if convertible. * @@ -103,94 +115,92 @@ class Attribute : * @tparam U Type of the object to be casted to. * @return Copy of the retrieved object, casted to type U. */ - template< typename U > + template U get() const; }; -template< typename T, typename U > -auto doConvert( T * pv ) -> U +template +auto doConvert(T *pv) -> U { - if constexpr( std::is_convertible_v< T, U > ) + if constexpr (std::is_convertible_v) { - return static_cast< U >( *pv ); + return static_cast(*pv); } - else if constexpr( - auxiliary::IsVector_v< T > && auxiliary::IsVector_v< U > ) + else if constexpr (auxiliary::IsVector_v && auxiliary::IsVector_v) { - if constexpr( std::is_convertible_v< + if constexpr (std::is_convertible_v< typename T::value_type, - typename U::value_type > ) + typename U::value_type>) { U res; - res.reserve( pv->size() ); - std::copy( pv->begin(), pv->end(), std::back_inserter( res ) ); + res.reserve(pv->size()); + std::copy(pv->begin(), pv->end(), std::back_inserter(res)); return res; } - throw std::runtime_error( "getCast: no vector cast possible." ); + throw std::runtime_error("getCast: no vector cast possible."); } // conversion cast: array to vector // if a backend reports a std::array<> for something where // the frontend expects a vector - else if constexpr( auxiliary::IsArray_v< T > && auxiliary::IsVector_v< U > ) + else if constexpr (auxiliary::IsArray_v && auxiliary::IsVector_v) { - if constexpr( std::is_convertible_v< + if constexpr (std::is_convertible_v< typename T::value_type, - typename U::value_type > ) + typename U::value_type>) { U res; - res.reserve( pv->size() ); - std::copy( pv->begin(), pv->end(), std::back_inserter( res ) ); + res.reserve(pv->size()); + std::copy(pv->begin(), pv->end(), std::back_inserter(res)); return res; } throw std::runtime_error( - "getCast: no array to vector conversion possible." ); + "getCast: no array to vector conversion possible."); } // conversion cast: vector to array // if a backend reports a std::vector<> for something where // the frontend expects an array - else if constexpr( auxiliary::IsVector_v< T > && auxiliary::IsArray_v< U > ) + else if constexpr (auxiliary::IsVector_v && auxiliary::IsArray_v) { - if constexpr( std::is_convertible_v< + if constexpr (std::is_convertible_v< typename T::value_type, - typename U::value_type > ) + typename U::value_type>) { U res; - if( res.size() != pv->size() ) + if (res.size() != pv->size()) { throw std::runtime_error( "getCast: no vector to array conversion possible (wrong " - "requested array size)." ); + "requested array size)."); } - for( size_t i = 0; i < res.size(); ++i ) + for (size_t i = 0; i < res.size(); ++i) { - res[ i ] = - static_cast< typename U::value_type >( ( *pv )[ i ] ); + res[i] = static_cast((*pv)[i]); } return res; } throw std::runtime_error( - "getCast: no vector to array conversion possible." ); + "getCast: no vector to array conversion possible."); } // conversion cast: turn a single value into a 1-element vector - else if constexpr( auxiliary::IsVector_v< U > ) + else if constexpr (auxiliary::IsVector_v) { - if constexpr( std::is_convertible_v< T, typename U::value_type > ) + if constexpr (std::is_convertible_v) { U res; - res.reserve( 1 ); - res.push_back( static_cast< typename U::value_type >( *pv ) ); + res.reserve(1); + res.push_back(static_cast(*pv)); return res; } throw std::runtime_error( - "getCast: no scalar to vector conversion possible." ); + "getCast: no scalar to vector conversion possible."); } - ( void )pv; - throw std::runtime_error( "getCast: no cast possible." ); + (void)pv; + throw std::runtime_error("getCast: no cast possible."); } /** Retrieve a stored specific Attribute and cast if convertible. @@ -199,24 +209,23 @@ auto doConvert( T * pv ) -> U * @tparam U Type of the object to be casted to. * @return Copy of the retrieved object, casted to type U. */ -template< typename U > -inline U -getCast( Attribute const & a ) +template +inline U getCast(Attribute const &a) { auto v = a.getResource(); return std::visit( - []( auto && containedValue ) -> U { - using containedType = std::decay_t< decltype( containedValue ) >; - return doConvert< containedType, U >( &containedValue ); + [](auto &&containedValue) -> U { + using containedType = std::decay_t; + return doConvert(&containedValue); }, - v ); + v); } -template< typename U > +template U Attribute::get() const { - return getCast< U >( Variant::getResource() ); + return getCast(Variant::getResource()); } } // namespace openPMD diff --git a/include/openPMD/backend/BaseRecord.hpp b/include/openPMD/backend/BaseRecord.hpp index 1373f4f133..353afa9b7b 100644 --- a/include/openPMD/backend/BaseRecord.hpp +++ b/include/openPMD/backend/BaseRecord.hpp @@ -20,21 +20,20 @@ */ #pragma once -#include "openPMD/backend/Container.hpp" #include "openPMD/RecordComponent.hpp" #include "openPMD/UnitDimension.hpp" +#include "openPMD/backend/Container.hpp" #include -#include #include - +#include namespace openPMD { namespace internal { - template< typename T_elem > - class BaseRecordData : public ContainerData< T_elem > + template + class BaseRecordData : public ContainerData { public: /** @@ -47,16 +46,16 @@ namespace internal BaseRecordData(); - BaseRecordData( BaseRecordData const & ) = delete; - BaseRecordData( BaseRecordData && ) = delete; + BaseRecordData(BaseRecordData const &) = delete; + BaseRecordData(BaseRecordData &&) = delete; - BaseRecordData & operator=( BaseRecordData const & ) = delete; - BaseRecordData & operator=( BaseRecordData && ) = delete; + BaseRecordData &operator=(BaseRecordData const &) = delete; + BaseRecordData &operator=(BaseRecordData &&) = delete; }; -} +} // namespace internal -template< typename T_elem > -class BaseRecord : public Container< T_elem > +template +class BaseRecord : public Container { friend class Iteration; friend class ParticleSpecies; @@ -64,15 +63,15 @@ class BaseRecord : public Container< T_elem > friend class Record; friend class Mesh; - std::shared_ptr< internal::BaseRecordData< T_elem > > m_baseRecordData{ - new internal::BaseRecordData< T_elem >() }; + std::shared_ptr> m_baseRecordData{ + new internal::BaseRecordData()}; - inline internal::BaseRecordData< T_elem > & get() + inline internal::BaseRecordData &get() { return *m_baseRecordData; } - inline internal::BaseRecordData< T_elem > const & get() const + inline internal::BaseRecordData const &get() const { return *m_baseRecordData; } @@ -80,34 +79,33 @@ class BaseRecord : public Container< T_elem > BaseRecord(); protected: + BaseRecord(std::shared_ptr>); - BaseRecord( std::shared_ptr< internal::BaseRecordData< T_elem > > ); - - inline void setData( internal::BaseRecordData< T_elem > * data ) + inline void setData(internal::BaseRecordData *data) { - m_baseRecordData = std::move( data ); - Container< T_elem >::setData( m_baseRecordData ); + m_baseRecordData = std::move(data); + Container::setData(m_baseRecordData); } public: - using key_type = typename Container< T_elem >::key_type; - using mapped_type = typename Container< T_elem >::mapped_type; - using value_type = typename Container< T_elem >::value_type; - using size_type = typename Container< T_elem >::size_type; - using difference_type = typename Container< T_elem >::difference_type; - using allocator_type = typename Container< T_elem >::allocator_type; - using reference = typename Container< T_elem >::reference; - using const_reference = typename Container< T_elem >::const_reference; - using pointer = typename Container< T_elem >::pointer; - using const_pointer = typename Container< T_elem >::const_pointer; - using iterator = typename Container< T_elem >::iterator; - using const_iterator = typename Container< T_elem >::const_iterator; + using key_type = typename Container::key_type; + using mapped_type = typename Container::mapped_type; + using value_type = typename Container::value_type; + using size_type = typename Container::size_type; + using difference_type = typename Container::difference_type; + using allocator_type = typename Container::allocator_type; + using reference = typename Container::reference; + using const_reference = typename Container::const_reference; + using pointer = typename Container::pointer; + using const_pointer = typename Container::const_pointer; + using iterator = typename Container::iterator; + using const_iterator = typename Container::const_iterator; virtual ~BaseRecord() = default; - mapped_type& operator[](key_type const& key) override; - mapped_type& operator[](key_type&& key) override; - size_type erase(key_type const& key) override; + mapped_type &operator[](key_type const &key) override; + mapped_type &operator[](key_type &&key) override; + size_type erase(key_type const &key) override; iterator erase(iterator res) override; //! @todo add also, as soon as added in Container: // iterator erase(const_iterator first, const_iterator last) override; @@ -123,12 +121,14 @@ class BaseRecord : public Container< T_elem > * (ISQ). * * @see https://en.wikipedia.org/wiki/Dimensional_analysis - * @see https://en.wikipedia.org/wiki/International_System_of_Quantities#Base_quantities - * @see https://github.com/openPMD/openPMD-standard/blob/1.1.0/STANDARD.md#required-for-each-record + * @see + * https://en.wikipedia.org/wiki/International_System_of_Quantities#Base_quantities + * @see + * https://github.com/openPMD/openPMD-standard/blob/1.1.0/STANDARD.md#required-for-each-record * * @return powers of the 7 base measures in the order specified above */ - std::array< double, 7 > unitDimension() const; + std::array unitDimension() const; /** Returns true if this record only contains a single component * @@ -137,12 +137,12 @@ class BaseRecord : public Container< T_elem > bool scalar() const; protected: - BaseRecord( internal::BaseRecordData< T_elem > * ); + BaseRecord(internal::BaseRecordData *); void readBase(); private: - void flush(std::string const&) final; - virtual void flush_impl(std::string const&) = 0; + void flush(std::string const &) final; + virtual void flush_impl(std::string const &) = 0; virtual void read() = 0; /** @@ -153,55 +153,53 @@ class BaseRecord : public Container< T_elem > * @return true If dirty. * @return false Otherwise. */ - bool - dirtyRecursive() const; + bool dirtyRecursive() const; }; // BaseRecord - // implementation namespace internal { - template< typename T_elem > - BaseRecordData< T_elem >::BaseRecordData() + template + BaseRecordData::BaseRecordData() { - Attributable impl{ { this, []( auto const * ){} } }; + Attributable impl{{this, [](auto const *) {}}}; impl.setAttribute( "unitDimension", - std::array< double, 7 >{ { 0., 0., 0., 0., 0., 0., 0. } } ); + std::array{{0., 0., 0., 0., 0., 0., 0.}}); } } // namespace internal -template< typename T_elem > -BaseRecord< T_elem >::BaseRecord() : Container< T_elem >{ nullptr } +template +BaseRecord::BaseRecord() : Container{nullptr} { - Container< T_elem >::setData( m_baseRecordData ); + Container::setData(m_baseRecordData); } -template< typename T_elem > -BaseRecord< T_elem >::BaseRecord( - std::shared_ptr< internal::BaseRecordData< T_elem > > data ) - : Container< T_elem >{ data } - , m_baseRecordData{ std::move( data ) } -{ -} +template +BaseRecord::BaseRecord( + std::shared_ptr> data) + : Container{data}, m_baseRecordData{std::move(data)} +{} -template< typename T_elem > -inline typename BaseRecord< T_elem >::mapped_type & -BaseRecord< T_elem >::operator[]( key_type const & key ) +template +inline typename BaseRecord::mapped_type & +BaseRecord::operator[](key_type const &key) { auto it = this->find(key); - if( it != this->end() ) + if (it != this->end()) return it->second; else { bool const keyScalar = (key == RecordComponent::SCALAR); - if( (keyScalar && !Container< T_elem >::empty() && !scalar()) || (scalar() && !keyScalar) ) - throw std::runtime_error("A scalar component can not be contained at " - "the same time as one or more regular components."); - - mapped_type& ret = Container< T_elem >::operator[](key); - if( keyScalar ) + if ((keyScalar && !Container::empty() && !scalar()) || + (scalar() && !keyScalar)) + throw std::runtime_error( + "A scalar component can not be contained at " + "the same time as one or more regular components."); + + mapped_type &ret = Container::operator[](key); + if (keyScalar) { get().m_containsScalar = true; ret.parent() = this->parent(); @@ -210,22 +208,24 @@ BaseRecord< T_elem >::operator[]( key_type const & key ) } } -template< typename T_elem > -inline typename BaseRecord< T_elem >::mapped_type& -BaseRecord< T_elem >::operator[](key_type&& key) +template +inline typename BaseRecord::mapped_type & +BaseRecord::operator[](key_type &&key) { auto it = this->find(key); - if( it != this->end() ) + if (it != this->end()) return it->second; else { bool const keyScalar = (key == RecordComponent::SCALAR); - if( (keyScalar && !Container< T_elem >::empty() && !scalar()) || (scalar() && !keyScalar) ) - throw std::runtime_error("A scalar component can not be contained at " - "the same time as one or more regular components."); - - mapped_type& ret = Container< T_elem >::operator[](std::move(key)); - if( keyScalar ) + if ((keyScalar && !Container::empty() && !scalar()) || + (scalar() && !keyScalar)) + throw std::runtime_error( + "A scalar component can not be contained at " + "the same time as one or more regular components."); + + mapped_type &ret = Container::operator[](std::move(key)); + if (keyScalar) { get().m_containsScalar = true; ret.parent() = this->parent(); @@ -234,28 +234,28 @@ BaseRecord< T_elem >::operator[](key_type&& key) } } -template< typename T_elem > -inline typename BaseRecord< T_elem >::size_type -BaseRecord< T_elem >::erase(key_type const& key) +template +inline typename BaseRecord::size_type +BaseRecord::erase(key_type const &key) { bool const keyScalar = (key == RecordComponent::SCALAR); size_type res; - if( !keyScalar || (keyScalar && this->at(key).constant()) ) - res = Container< T_elem >::erase(key); + if (!keyScalar || (keyScalar && this->at(key).constant())) + res = Container::erase(key); else { - mapped_type& rc = this->find(RecordComponent::SCALAR)->second; - if( rc.written() ) + mapped_type &rc = this->find(RecordComponent::SCALAR)->second; + if (rc.written()) { - Parameter< Operation::DELETE_DATASET > dDelete; + Parameter dDelete; dDelete.name = "."; this->IOHandler()->enqueue(IOTask(&rc, dDelete)); this->IOHandler()->flush(); } - res = Container< T_elem >::erase(key); + res = Container::erase(key); } - if( keyScalar ) + if (keyScalar) { this->written() = false; this->writable().abstractFilePosition.reset(); @@ -264,28 +264,28 @@ BaseRecord< T_elem >::erase(key_type const& key) return res; } -template< typename T_elem > -inline typename BaseRecord< T_elem >::iterator -BaseRecord< T_elem >::erase(iterator res) +template +inline typename BaseRecord::iterator +BaseRecord::erase(iterator res) { bool const keyScalar = (res->first == RecordComponent::SCALAR); iterator ret; - if( !keyScalar || (keyScalar && this->at(res->first).constant()) ) - ret = Container< T_elem >::erase(res); + if (!keyScalar || (keyScalar && this->at(res->first).constant())) + ret = Container::erase(res); else { - mapped_type& rc = this->find(RecordComponent::SCALAR)->second; - if( rc.written() ) + mapped_type &rc = this->find(RecordComponent::SCALAR)->second; + if (rc.written()) { - Parameter< Operation::DELETE_DATASET > dDelete; + Parameter dDelete; dDelete.name = "."; this->IOHandler()->enqueue(IOTask(&rc, dDelete)); this->IOHandler()->flush(); } - ret = Container< T_elem >::erase(res); + ret = Container::erase(res); } - if( keyScalar ) + if (keyScalar) { this->written() = false; this->writable().abstractFilePosition.reset(); @@ -294,82 +294,88 @@ BaseRecord< T_elem >::erase(iterator res) return ret; } -template< typename T_elem > -inline std::array< double, 7 > -BaseRecord< T_elem >::unitDimension() const +template +inline std::array BaseRecord::unitDimension() const { - return this->getAttribute("unitDimension").template get< std::array< double, 7 > >(); + return this->getAttribute("unitDimension") + .template get>(); } -template< typename T_elem > -inline bool -BaseRecord< T_elem >::scalar() const +template +inline bool BaseRecord::scalar() const { return get().m_containsScalar; } -template< typename T_elem > -inline void -BaseRecord< T_elem >::readBase() +template +inline void BaseRecord::readBase() { using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "unitDimension"; this->IOHandler()->enqueue(IOTask(this, aRead)); this->IOHandler()->flush(); - if( *aRead.dtype == DT::ARR_DBL_7 ) - this->setAttribute("unitDimension", Attribute(*aRead.resource).template get< std::array< double, 7 > >()); - else if( *aRead.dtype == DT::VEC_DOUBLE ) + if (*aRead.dtype == DT::ARR_DBL_7) + this->setAttribute( + "unitDimension", + Attribute(*aRead.resource).template get>()); + else if (*aRead.dtype == DT::VEC_DOUBLE) { - auto vec = Attribute(*aRead.resource).template get< std::vector< double > >(); - if( vec.size() == 7 ) + auto vec = + Attribute(*aRead.resource).template get>(); + if (vec.size() == 7) { - std::array< double, 7 > arr; - std::copy(vec.begin(), - vec.end(), - arr.begin()); + std::array arr; + std::copy(vec.begin(), vec.end(), arr.begin()); this->setAttribute("unitDimension", arr); - } else - throw std::runtime_error("Unexpected Attribute datatype for 'unitDimension'"); + } + else + throw std::runtime_error( + "Unexpected Attribute datatype for 'unitDimension'"); } else - throw std::runtime_error("Unexpected Attribute datatype for 'unitDimension'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'unitDimension'"); aRead.name = "timeOffset"; this->IOHandler()->enqueue(IOTask(this, aRead)); this->IOHandler()->flush(); - if( *aRead.dtype == DT::FLOAT ) - this->setAttribute("timeOffset", Attribute(*aRead.resource).template get< float >()); - else if( *aRead.dtype == DT::DOUBLE ) - this->setAttribute("timeOffset", Attribute(*aRead.resource).template get< double >()); + if (*aRead.dtype == DT::FLOAT) + this->setAttribute( + "timeOffset", Attribute(*aRead.resource).template get()); + else if (*aRead.dtype == DT::DOUBLE) + this->setAttribute( + "timeOffset", Attribute(*aRead.resource).template get()); else - throw std::runtime_error("Unexpected Attribute datatype for 'timeOffset'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'timeOffset'"); } -template< typename T_elem > -inline void -BaseRecord< T_elem >::flush(std::string const& name) +template +inline void BaseRecord::flush(std::string const &name) { - if( !this->written() && this->empty() ) - throw std::runtime_error("A Record can not be written without any contained RecordComponents: " + name); + if (!this->written() && this->empty()) + throw std::runtime_error( + "A Record can not be written without any contained " + "RecordComponents: " + + name); this->flush_impl(name); // flush_impl must take care to correctly set the dirty() flag so this // method doesn't do it } -template< typename T_elem > -inline bool -BaseRecord< T_elem >::dirtyRecursive() const +template +inline bool BaseRecord::dirtyRecursive() const { - if( this->dirty() ) + if (this->dirty()) { return true; } - for( auto const & pair : *this ) + for (auto const &pair : *this) { - if( pair.second.dirtyRecursive() ) + if (pair.second.dirtyRecursive()) { return true; } diff --git a/include/openPMD/backend/BaseRecordComponent.hpp b/include/openPMD/backend/BaseRecordComponent.hpp index f80b93f125..a2ea09e23e 100644 --- a/include/openPMD/backend/BaseRecordComponent.hpp +++ b/include/openPMD/backend/BaseRecordComponent.hpp @@ -20,16 +20,15 @@ */ #pragma once -#include "openPMD/backend/Attributable.hpp" #include "openPMD/Dataset.hpp" #include "openPMD/Error.hpp" +#include "openPMD/backend/Attributable.hpp" // expose private and protected members for invasive testing #ifndef OPENPMD_protected -# define OPENPMD_protected protected +#define OPENPMD_protected protected: #endif - namespace openPMD { namespace internal @@ -40,7 +39,7 @@ namespace internal /** * The type and extent of the dataset defined by this component. */ - Dataset m_dataset{ Datatype::UNDEFINED, {} }; + Dataset m_dataset{Datatype::UNDEFINED, {}}; /** * True if this is defined as a constant record component as specified * in the openPMD standard. @@ -49,32 +48,26 @@ namespace internal */ bool m_isConstant = false; - BaseRecordComponentData( BaseRecordComponentData const & ) = delete; - BaseRecordComponentData( BaseRecordComponentData && ) = delete; + BaseRecordComponentData(BaseRecordComponentData const &) = delete; + BaseRecordComponentData(BaseRecordComponentData &&) = delete; - BaseRecordComponentData & operator=( - BaseRecordComponentData const & ) = delete; - BaseRecordComponentData & operator=( - BaseRecordComponentData && ) = delete; + BaseRecordComponentData & + operator=(BaseRecordComponentData const &) = delete; + BaseRecordComponentData &operator=(BaseRecordComponentData &&) = delete; BaseRecordComponentData() = default; }; -} +} // namespace internal class BaseRecordComponent : public Attributable { - template< - typename T, - typename T_key, - typename T_container - > - friend - class Container; + template + friend class Container; public: double unitSI() const; - BaseRecordComponent& resetDatatype(Datatype); + BaseRecordComponent &resetDatatype(Datatype); Datatype getDatatype() const; @@ -104,33 +97,29 @@ class BaseRecordComponent : public Attributable * may additionally wish to use to store user-defined, backend-independent * chunking information on particle datasets. */ - ChunkTable - availableChunks(); + ChunkTable availableChunks(); protected: + std::shared_ptr + m_baseRecordComponentData{new internal::BaseRecordComponentData()}; - std::shared_ptr< internal::BaseRecordComponentData > - m_baseRecordComponentData{ - new internal::BaseRecordComponentData() }; - - inline internal::BaseRecordComponentData const & get() const + inline internal::BaseRecordComponentData const &get() const { return *m_baseRecordComponentData; } - inline internal::BaseRecordComponentData & get() + inline internal::BaseRecordComponentData &get() { return *m_baseRecordComponentData; } - inline void setData( - std::shared_ptr< internal::BaseRecordComponentData > data ) + inline void setData(std::shared_ptr data) { - m_baseRecordComponentData = std::move( data ); - Attributable::setData( m_baseRecordComponentData ); + m_baseRecordComponentData = std::move(data); + Attributable::setData(m_baseRecordComponentData); } - BaseRecordComponent( std::shared_ptr< internal::BaseRecordComponentData > ); + BaseRecordComponent(std::shared_ptr); private: BaseRecordComponent(); @@ -138,29 +127,29 @@ class BaseRecordComponent : public Attributable namespace detail { -/** - * Functor template to be used in combination with switchType::operator() - * to set a default value for constant record components via the - * respective type's default constructor. - * Used to implement empty datasets in subclasses of BaseRecordComponent - * (currently RecordComponent). - * @param T_RecordComponent - */ -template< typename T_RecordComponent > -struct DefaultValue -{ - template< typename T > - static void call( T_RecordComponent & rc ) - { - rc.makeConstant( T() ); - } - - template< unsigned n, typename... Args > - static void call( Args &&... ) + /** + * Functor template to be used in combination with switchType::operator() + * to set a default value for constant record components via the + * respective type's default constructor. + * Used to implement empty datasets in subclasses of BaseRecordComponent + * (currently RecordComponent). + * @param T_RecordComponent + */ + template + struct DefaultValue { - throw std::runtime_error( - "makeEmpty: Datatype not supported by openPMD." ); - } -}; + template + static void call(T_RecordComponent &rc) + { + rc.makeConstant(T()); + } + + template + static void call(Args &&...) + { + throw std::runtime_error( + "makeEmpty: Datatype not supported by openPMD."); + } + }; } // namespace detail } // namespace openPMD diff --git a/include/openPMD/backend/Container.hpp b/include/openPMD/backend/Container.hpp index eb2280c37a..9639ba9806 100644 --- a/include/openPMD/backend/Container.hpp +++ b/include/openPMD/backend/Container.hpp @@ -33,10 +33,9 @@ // expose private and protected members for invasive testing #ifndef OPENPMD_protected -# define OPENPMD_protected protected +#define OPENPMD_protected protected: #endif - namespace openPMD { namespace traits @@ -47,25 +46,25 @@ namespace traits * insert() of a new element. The passed parameter is an iterator to the * newly added element. */ - template< typename U > + template struct GenerationPolicy { - template< typename T > + template void operator()(T &) - { - } + {} }; -} // traits +} // namespace traits namespace internal { class SeriesData; - template< typename > class EraseStaleEntries; + template + class EraseStaleEntries; - template< + template < typename T, typename T_key = std::string, - typename T_container = std::map< T_key, T > > + typename T_container = std::map> class ContainerData : public AttributableData { public: @@ -78,42 +77,43 @@ namespace internal ContainerData() = default; - ContainerData( ContainerData const & ) = delete; - ContainerData( ContainerData && ) = delete; + ContainerData(ContainerData const &) = delete; + ContainerData(ContainerData &&) = delete; - ContainerData & operator=( ContainerData const & ) = delete; - ContainerData & operator=( ContainerData && ) = delete; + ContainerData &operator=(ContainerData const &) = delete; + ContainerData &operator=(ContainerData &&) = delete; }; -} +} // namespace internal namespace detail { -/* - * This converts the key (first parameter) to its string name within the - * openPMD hierarchy. - * If the key is found to be equal to RecordComponent::SCALAR, the parentKey - * will be returned, adding RecordComponent::SCALAR to its back. - * Reason: Scalar record components do not link their containing record as - * parent, but rather the parent's parent, so the own key within the "apparent" - * parent must be given as two steps. - */ -template< typename T > -std::vector< std::string > -keyAsString( T && key, std::vector< std::string > const & parentKey ) -{ - ( void )parentKey; - return { std::to_string( std::forward< T >( key ) ) }; -} + /* + * This converts the key (first parameter) to its string name within the + * openPMD hierarchy. + * If the key is found to be equal to RecordComponent::SCALAR, the parentKey + * will be returned, adding RecordComponent::SCALAR to its back. + * Reason: Scalar record components do not link their containing record as + * parent, but rather the parent's parent, so the own key within the + * "apparent" parent must be given as two steps. + */ + template + std::vector + keyAsString(T &&key, std::vector const &parentKey) + { + (void)parentKey; + return {std::to_string(std::forward(key))}; + } -// moved to a *.cpp file so we don't need to include RecordComponent.hpp here -template<> -std::vector< std::string > keyAsString< std::string const & >( - std::string const & key, std::vector< std::string > const & parentKey ); + // moved to a *.cpp file so we don't need to include RecordComponent.hpp + // here + template <> + std::vector keyAsString( + std::string const &key, std::vector const &parentKey); -template<> -std::vector< std::string > keyAsString< std::string >( - std::string && key, std::vector< std::string > const & parentKey ); -} + template <> + std::vector keyAsString( + std::string &&key, std::vector const &parentKey); +} // namespace detail /** @brief Map-like container that enforces openPMD requirements and handles IO. * @@ -121,16 +121,17 @@ std::vector< std::string > keyAsString< std::string >( * * @tparam T Type of objects stored * @tparam T_key Key type to look elements up by - * @tparam T_container Type of container used for internal storage (must supply the same type traits and interface as std::map) + * @tparam T_container Type of container used for internal storage (must supply + * the same type traits and interface as std::map) */ -template< +template < typename T, typename T_key = std::string, - typename T_container = std::map< T_key, T > > + typename T_container = std::map> class Container : public Attributable { static_assert( - std::is_base_of< Attributable, T >::value, + std::is_base_of::value, "Type of container element must be derived from Writable"); friend class Iteration; @@ -138,27 +139,27 @@ class Container : public Attributable friend class ParticlePatches; friend class internal::SeriesData; friend class Series; - template< typename > friend class internal::EraseStaleEntries; + template + friend class internal::EraseStaleEntries; protected: - using ContainerData = internal::ContainerData< T, T_key, T_container >; + using ContainerData = internal::ContainerData; using InternalContainer = T_container; - std::shared_ptr< ContainerData > m_containerData{ - new ContainerData() }; + std::shared_ptr m_containerData{new ContainerData()}; - inline void setData( std::shared_ptr< ContainerData > containerData ) + inline void setData(std::shared_ptr containerData) { - m_containerData = std::move( containerData ); - Attributable::setData( m_containerData ); + m_containerData = std::move(containerData); + Attributable::setData(m_containerData); } - inline InternalContainer const & container() const + inline InternalContainer const &container() const { return m_containerData->m_container; } - inline InternalContainer & container() + inline InternalContainer &container() { return m_containerData->m_container; } @@ -177,60 +178,117 @@ class Container : public Attributable using iterator = typename InternalContainer::iterator; using const_iterator = typename InternalContainer::const_iterator; - iterator begin() noexcept { return container().begin(); } - const_iterator begin() const noexcept { return container().begin(); } - const_iterator cbegin() const noexcept { return container().cbegin(); } + iterator begin() noexcept + { + return container().begin(); + } + const_iterator begin() const noexcept + { + return container().begin(); + } + const_iterator cbegin() const noexcept + { + return container().cbegin(); + } - iterator end() noexcept { return container().end(); } - const_iterator end() const noexcept { return container().end(); } - const_iterator cend() const noexcept { return container().cend(); } + iterator end() noexcept + { + return container().end(); + } + const_iterator end() const noexcept + { + return container().end(); + } + const_iterator cend() const noexcept + { + return container().cend(); + } - bool empty() const noexcept { return container().empty(); } + bool empty() const noexcept + { + return container().empty(); + } - size_type size() const noexcept { return container().size(); } + size_type size() const noexcept + { + return container().size(); + } /** Remove all objects from the container and (if written) from disk. * - * @note Calling this operation on any container in a Series with Access::READ_ONLY will throw an exception. + * @note Calling this operation on any container in a Series with + * Access::READ_ONLY will throw an exception. * @throws std::runtime_error */ void clear() { - if(Access::READ_ONLY == IOHandler()->m_frontendAccess ) - throw std::runtime_error("Can not clear a container in a read-only Series."); + if (Access::READ_ONLY == IOHandler()->m_frontendAccess) + throw std::runtime_error( + "Can not clear a container in a read-only Series."); clear_unchecked(); } - std::pair< iterator, bool > insert(value_type const& value) { return container().insert(value); } - template< class P > - std::pair< iterator, bool > insert(P&& value) { return container().insert(value); } - iterator insert(const_iterator hint, value_type const& value) { return container().insert(hint, value); } - template< class P > - iterator insert(const_iterator hint, P&& value) { return container().insert(hint, value); } - template< class InputIt > - void insert(InputIt first, InputIt last) { container().insert(first, last); } - void insert(std::initializer_list< value_type > ilist) { container().insert(ilist); } + std::pair insert(value_type const &value) + { + return container().insert(value); + } + template + std::pair insert(P &&value) + { + return container().insert(value); + } + iterator insert(const_iterator hint, value_type const &value) + { + return container().insert(hint, value); + } + template + iterator insert(const_iterator hint, P &&value) + { + return container().insert(hint, value); + } + template + void insert(InputIt first, InputIt last) + { + container().insert(first, last); + } + void insert(std::initializer_list ilist) + { + container().insert(ilist); + } - void swap(Container & other) { container().swap(other.m_container); } + void swap(Container &other) + { + container().swap(other.m_container); + } - mapped_type& at(key_type const& key) { return container().at(key); } - mapped_type const& at(key_type const& key) const { return container().at(key); } + mapped_type &at(key_type const &key) + { + return container().at(key); + } + mapped_type const &at(key_type const &key) const + { + return container().at(key); + } - /** Access the value that is mapped to a key equivalent to key, creating it if such key does not exist already. + /** Access the value that is mapped to a key equivalent to key, creating it + * if such key does not exist already. * * @param key Key of the element to find (lvalue). - * @return Reference to the mapped value of the new element if no element with key key existed. Otherwise a reference to the mapped value of the existing element whose key is equivalent to key. - * @throws std::out_of_range error if in READ_ONLY mode and key does not exist, otherwise key will be created + * @return Reference to the mapped value of the new element if no element + * with key key existed. Otherwise a reference to the mapped value of the + * existing element whose key is equivalent to key. + * @throws std::out_of_range error if in READ_ONLY mode and key does not + * exist, otherwise key will be created */ - virtual mapped_type& operator[](key_type const& key) + virtual mapped_type &operator[](key_type const &key) { auto it = container().find(key); - if( it != container().end() ) + if (it != container().end()) return it->second; else { - if(Access::READ_ONLY == IOHandler()->m_frontendAccess ) + if (Access::READ_ONLY == IOHandler()->m_frontendAccess) { auxiliary::OutOfRangeMsg const out_of_range_msg; throw std::out_of_range(out_of_range_msg(key)); @@ -238,28 +296,32 @@ class Container : public Attributable T t = T(); t.linkHierarchy(writable()); - auto& ret = container().insert({key, std::move(t)}).first->second; + auto &ret = container().insert({key, std::move(t)}).first->second; ret.writable().ownKeyWithinParent = - detail::keyAsString( key, writable().ownKeyWithinParent ); - traits::GenerationPolicy< T > gen; + detail::keyAsString(key, writable().ownKeyWithinParent); + traits::GenerationPolicy gen; gen(ret); return ret; } } - /** Access the value that is mapped to a key equivalent to key, creating it if such key does not exist already. + /** Access the value that is mapped to a key equivalent to key, creating it + * if such key does not exist already. * * @param key Key of the element to find (rvalue). - * @return Reference to the mapped value of the new element if no element with key key existed. Otherwise a reference to the mapped value of the existing element whose key is equivalent to key. - * @throws std::out_of_range error if in READ_ONLY mode and key does not exist, otherwise key will be created + * @return Reference to the mapped value of the new element if no element + * with key key existed. Otherwise a reference to the mapped value of the + * existing element whose key is equivalent to key. + * @throws std::out_of_range error if in READ_ONLY mode and key does not + * exist, otherwise key will be created */ - virtual mapped_type& operator[](key_type&& key) + virtual mapped_type &operator[](key_type &&key) { auto it = container().find(key); - if( it != container().end() ) + if (it != container().end()) return it->second; else { - if(Access::READ_ONLY == IOHandler()->m_frontendAccess ) + if (Access::READ_ONLY == IOHandler()->m_frontendAccess) { auxiliary::OutOfRangeMsg out_of_range_msg; throw std::out_of_range(out_of_range_msg(key)); @@ -267,48 +329,63 @@ class Container : public Attributable T t = T(); t.linkHierarchy(writable()); - auto& ret = container().insert({key, std::move(t)}).first->second; + auto &ret = container().insert({key, std::move(t)}).first->second; ret.writable().ownKeyWithinParent = detail::keyAsString( - std::move( key ), writable().ownKeyWithinParent ); - traits::GenerationPolicy< T > gen; - gen( ret ); + std::move(key), writable().ownKeyWithinParent); + traits::GenerationPolicy gen; + gen(ret); return ret; } } - iterator find(key_type const& key) { return container().find(key); } - const_iterator find(key_type const& key) const { return container().find(key); } + iterator find(key_type const &key) + { + return container().find(key); + } + const_iterator find(key_type const &key) const + { + return container().find(key); + } /** This returns either 1 if the key is found in the container of 0 if not. * * @param key key value of the element to count * @return since keys are unique in this container, returns 0 or 1 */ - size_type count(key_type const& key) const { return container().count(key); } + size_type count(key_type const &key) const + { + return container().count(key); + } - /** Checks if there is an element with a key equivalent to an exiting key in the container. + /** Checks if there is an element with a key equivalent to an exiting key in + * the container. * * @param key key value of the element to search for * @return true of key is found, else false */ - bool contains(key_type const& key) const { return container().find(key) != container().end(); } + bool contains(key_type const &key) const + { + return container().find(key) != container().end(); + } /** Remove a single element from the container and (if written) from disk. * - * @note Calling this operation on any container in a Series with Access::READ_ONLY will throw an exception. + * @note Calling this operation on any container in a Series with + * Access::READ_ONLY will throw an exception. * @throws std::runtime_error * @param key Key of the element to remove. * @return Number of elements removed (either 0 or 1). */ - virtual size_type erase(key_type const& key) + virtual size_type erase(key_type const &key) { - if(Access::READ_ONLY == IOHandler()->m_frontendAccess ) - throw std::runtime_error("Can not erase from a container in a read-only Series."); + if (Access::READ_ONLY == IOHandler()->m_frontendAccess) + throw std::runtime_error( + "Can not erase from a container in a read-only Series."); auto res = container().find(key); - if( res != container().end() && res->second.written() ) + if (res != container().end() && res->second.written()) { - Parameter< Operation::DELETE_PATH > pDelete; + Parameter pDelete; pDelete.path = "."; IOHandler()->enqueue(IOTask(&res->second, pDelete)); IOHandler()->flush(); @@ -319,12 +396,13 @@ class Container : public Attributable //! @todo why does const_iterator not work compile with pybind11? virtual iterator erase(iterator res) { - if(Access::READ_ONLY == IOHandler()->m_frontendAccess ) - throw std::runtime_error("Can not erase from a container in a read-only Series."); + if (Access::READ_ONLY == IOHandler()->m_frontendAccess) + throw std::runtime_error( + "Can not erase from a container in a read-only Series."); - if( res != container().end() && res->second.written() ) + if (res != container().end() && res->second.written()) { - Parameter< Operation::DELETE_PATH > pDelete; + Parameter pDelete; pDelete.path = "."; IOHandler()->enqueue(IOTask(&res->second, pDelete)); IOHandler()->flush(); @@ -335,32 +413,34 @@ class Container : public Attributable // virtual iterator erase(const_iterator first, const_iterator last) template - auto emplace(Args&&... args) - -> decltype(InternalContainer().emplace(std::forward(args)...)) + auto emplace(Args &&...args) + -> decltype(InternalContainer().emplace(std::forward(args)...)) { return container().emplace(std::forward(args)...); } -OPENPMD_protected: - Container( std::shared_ptr< ContainerData > containerData ) - : Attributable{ containerData } - , m_containerData{ std::move( containerData ) } - { - } + // clang-format off +OPENPMD_protected + // clang-format on + + Container(std::shared_ptr containerData) + : Attributable{containerData}, m_containerData{std::move(containerData)} + {} void clear_unchecked() { - if( written() ) - throw std::runtime_error("Clearing a written container not (yet) implemented."); + if (written()) + throw std::runtime_error( + "Clearing a written container not (yet) implemented."); container().clear(); } - virtual void flush(std::string const& path) + virtual void flush(std::string const &path) { - if( !written() ) + if (!written()) { - Parameter< Operation::CREATE_PATH > pCreate; + Parameter pCreate; pCreate.path = path; IOHandler()->enqueue(IOTask(this, pCreate)); } @@ -368,10 +448,13 @@ class Container : public Attributable flushAttributes(); } -OPENPMD_private: - Container() : Attributable{ nullptr } + // clang-format off +OPENPMD_private + // clang-format on + + Container() : Attributable{nullptr} { - Attributable::setData( m_containerData ); + Attributable::setData(m_containerData); } }; @@ -386,14 +469,14 @@ namespace internal * class. * Container_t can be instantiated either by a reference or value type. */ - template< typename Container_t > + template class EraseStaleEntries { using BareContainer_t = - typename std::remove_reference< Container_t >::type; + typename std::remove_reference::type; using key_type = typename BareContainer_t::key_type; using mapped_type = typename BareContainer_t::mapped_type; - std::set< key_type > m_accessedKeys; + std::set m_accessedKeys; /* * Note: Putting a copy here leads to weird bugs due to destructors * being called too eagerly upon destruction. @@ -404,33 +487,29 @@ namespace internal Container_t m_originalContainer; public: - explicit EraseStaleEntries( - Container_t & container_in ) - : m_originalContainer( container_in ) - { - } + explicit EraseStaleEntries(Container_t &container_in) + : m_originalContainer(container_in) + {} - explicit EraseStaleEntries( - BareContainer_t && container_in ) - : m_originalContainer( std::move( container_in ) ) - { - } + explicit EraseStaleEntries(BareContainer_t &&container_in) + : m_originalContainer(std::move(container_in)) + {} - EraseStaleEntries( EraseStaleEntries && ) = default; - EraseStaleEntries & operator=( EraseStaleEntries && ) = default; + EraseStaleEntries(EraseStaleEntries &&) = default; + EraseStaleEntries &operator=(EraseStaleEntries &&) = default; - template< typename K > - mapped_type & operator[]( K && k ) + template + mapped_type &operator[](K &&k) { - m_accessedKeys.insert( k ); // copy - return m_originalContainer[ std::forward< K >( k ) ]; + m_accessedKeys.insert(k); // copy + return m_originalContainer[std::forward(k)]; } - template< typename K > - mapped_type & at( K && k ) + template + mapped_type &at(K &&k) { - m_accessedKeys.insert( k ); // copy - return m_originalContainer.at( std::forward< K >( k ) ); + m_accessedKeys.insert(k); // copy + return m_originalContainer.at(std::forward(k)); } /** @@ -438,31 +517,31 @@ namespace internal * If the key is not accessed after this again, it will be deleted along * with all other unaccessed keys upon destruction. */ - template< typename K > - void forget( K && k ) + template + void forget(K &&k) { - m_accessedKeys.erase( std::forward< K >( k ) ); + m_accessedKeys.erase(std::forward(k)); } ~EraseStaleEntries() { - auto & map = m_originalContainer.container(); + auto &map = m_originalContainer.container(); using iterator_t = typename BareContainer_t::const_iterator; - std::vector< iterator_t > deleteMe; - deleteMe.reserve( map.size() - m_accessedKeys.size() ); - for( iterator_t it = map.begin(); it != map.end(); ++it ) + std::vector deleteMe; + deleteMe.reserve(map.size() - m_accessedKeys.size()); + for (iterator_t it = map.begin(); it != map.end(); ++it) { - auto lookup = m_accessedKeys.find( it->first ); - if( lookup == m_accessedKeys.end() ) + auto lookup = m_accessedKeys.find(it->first); + if (lookup == m_accessedKeys.end()) { - deleteMe.push_back( it ); + deleteMe.push_back(it); } } - for( auto & it : deleteMe ) + for (auto &it : deleteMe) { - map.erase( it ); + map.erase(it); } } }; -} // internal -} // openPMD +} // namespace internal +} // namespace openPMD diff --git a/include/openPMD/backend/MeshRecordComponent.hpp b/include/openPMD/backend/MeshRecordComponent.hpp index 18f9af2c9b..20f5a9b42a 100644 --- a/include/openPMD/backend/MeshRecordComponent.hpp +++ b/include/openPMD/backend/MeshRecordComponent.hpp @@ -24,18 +24,12 @@ #include - namespace openPMD { class MeshRecordComponent : public RecordComponent { - template< - typename T, - typename T_key, - typename T_container - > - friend - class Container; + template + friend class Container; friend class Mesh; @@ -52,8 +46,8 @@ class MeshRecordComponent : public RecordComponent * * @return relative position within range of [0.0:1.0) */ - template< typename T > - std::vector< T > position() const; + template + std::vector position() const; /** Position on an element * @@ -61,8 +55,8 @@ class MeshRecordComponent : public RecordComponent * * @param[in] pos relative position in range [0.0:1.0) */ - template< typename T > - MeshRecordComponent& setPosition(std::vector< T > pos); + template + MeshRecordComponent &setPosition(std::vector pos); /** Create a dataset with regular extent and constant value * @@ -74,19 +68,18 @@ class MeshRecordComponent : public RecordComponent * @tparam T type of the stored value * @return A reference to this RecordComponent. */ - template< typename T > - MeshRecordComponent& makeConstant(T); + template + MeshRecordComponent &makeConstant(T); }; +template +std::vector MeshRecordComponent::position() const +{ + return readVectorFloatingpoint("position"); +} -template< typename T > -std::vector< T > -MeshRecordComponent::position() const -{ return readVectorFloatingpoint< T >("position"); } - -template< typename T > -inline MeshRecordComponent& -MeshRecordComponent::makeConstant(T value) +template +inline MeshRecordComponent &MeshRecordComponent::makeConstant(T value) { RecordComponent::makeConstant(value); return *this; diff --git a/include/openPMD/backend/PatchRecord.hpp b/include/openPMD/backend/PatchRecord.hpp index 46e2a4bd42..27ddb0ba96 100644 --- a/include/openPMD/backend/PatchRecord.hpp +++ b/include/openPMD/backend/PatchRecord.hpp @@ -20,29 +20,28 @@ */ #pragma once -#include "openPMD/backend/PatchRecordComponent.hpp" #include "openPMD/backend/BaseRecord.hpp" +#include "openPMD/backend/PatchRecordComponent.hpp" -#include #include - +#include namespace openPMD { -class PatchRecord : public BaseRecord< PatchRecordComponent > +class PatchRecord : public BaseRecord { - friend class Container< PatchRecord >; + friend class Container; friend class ParticleSpecies; friend class ParticlePatches; public: - PatchRecord& setUnitDimension(std::map< UnitDimension, double > const&); + PatchRecord &setUnitDimension(std::map const &); ~PatchRecord() override = default; private: PatchRecord() = default; - void flush_impl(std::string const&) override; + void flush_impl(std::string const &) override; void read() override; -}; //PatchRecord -} // openPMD +}; // PatchRecord +} // namespace openPMD diff --git a/include/openPMD/backend/PatchRecordComponent.hpp b/include/openPMD/backend/PatchRecordComponent.hpp index 084af60627..5f5ed91952 100644 --- a/include/openPMD/backend/PatchRecordComponent.hpp +++ b/include/openPMD/backend/PatchRecordComponent.hpp @@ -22,17 +22,16 @@ #include "openPMD/backend/BaseRecordComponent.hpp" -#include -#include #include #include +#include +#include // expose private and protected members for invasive testing #ifndef OPENPMD_private -# define OPENPMD_private private +#define OPENPMD_private private: #endif - namespace openPMD { namespace internal @@ -40,57 +39,59 @@ namespace internal class PatchRecordComponentData : public BaseRecordComponentData { public: - /** * Chunk reading/writing requests on the contained dataset. */ - std::queue< IOTask > m_chunks; + std::queue m_chunks; - PatchRecordComponentData( PatchRecordComponentData const & ) = delete; - PatchRecordComponentData( PatchRecordComponentData && ) = delete; + PatchRecordComponentData(PatchRecordComponentData const &) = delete; + PatchRecordComponentData(PatchRecordComponentData &&) = delete; - PatchRecordComponentData & operator=( - PatchRecordComponentData const & ) = delete; - PatchRecordComponentData & operator=( - PatchRecordComponentData && ) = delete; + PatchRecordComponentData & + operator=(PatchRecordComponentData const &) = delete; + PatchRecordComponentData & + operator=(PatchRecordComponentData &&) = delete; PatchRecordComponentData(); }; -} +} // namespace internal /** * @todo add support for constant patch record components */ class PatchRecordComponent : public BaseRecordComponent { - template< typename T, typename T_key, typename T_container > + template friend class Container; - template< typename > friend class BaseRecord; + template + friend class BaseRecord; friend class ParticlePatches; friend class PatchRecord; friend class ParticleSpecies; friend class internal::PatchRecordComponentData; public: - PatchRecordComponent& setUnitSI(double); + PatchRecordComponent &setUnitSI(double); - PatchRecordComponent& resetDataset(Dataset); + PatchRecordComponent &resetDataset(Dataset); uint8_t getDimensionality() const; Extent getExtent() const; - template< typename T > - std::shared_ptr< T > load(); + template + std::shared_ptr load(); - template< typename T > - void load(std::shared_ptr< T >); + template + void load(std::shared_ptr); - template< typename T > + template void store(uint64_t idx, T); -OPENPMD_private: + // clang-format off +OPENPMD_private + // clang-format on - void flush(std::string const&); + void flush(std::string const &); void read(); /** @@ -101,98 +102,97 @@ class PatchRecordComponent : public BaseRecordComponent * @return true If dirty. * @return false Otherwise. */ - bool - dirtyRecursive() const; + bool dirtyRecursive() const; - std::shared_ptr< internal::PatchRecordComponentData > - m_patchRecordComponentData{ - new internal::PatchRecordComponentData() }; + std::shared_ptr + m_patchRecordComponentData{new internal::PatchRecordComponentData()}; PatchRecordComponent(); -OPENPMD_protected: - PatchRecordComponent( - std::shared_ptr< internal::PatchRecordComponentData > ); + // clang-format off +OPENPMD_protected + // clang-format on + + PatchRecordComponent(std::shared_ptr); - inline internal::PatchRecordComponentData const & get() const + inline internal::PatchRecordComponentData const &get() const { return *m_patchRecordComponentData; } - inline internal::PatchRecordComponentData & get() + inline internal::PatchRecordComponentData &get() { return *m_patchRecordComponentData; } - inline void setData( - std::shared_ptr< internal::PatchRecordComponentData > data ) + inline void + setData(std::shared_ptr data) { - m_patchRecordComponentData = std::move( data ); - BaseRecordComponent::setData( m_patchRecordComponentData ); + m_patchRecordComponentData = std::move(data); + BaseRecordComponent::setData(m_patchRecordComponentData); } }; // PatchRecordComponent -template< typename T > -inline std::shared_ptr< T > -PatchRecordComponent::load() +template +inline std::shared_ptr PatchRecordComponent::load() { uint64_t numPoints = getExtent()[0]; - auto newData = std::shared_ptr< T >(new T[numPoints], []( T *p ){ delete [] p; }); + auto newData = + std::shared_ptr(new T[numPoints], [](T *p) { delete[] p; }); load(newData); return newData; } -template< typename T > -inline void -PatchRecordComponent::load(std::shared_ptr< T > data) +template +inline void PatchRecordComponent::load(std::shared_ptr data) { - Datatype dtype = determineDatatype< T >(); - if( dtype != getDatatype() ) - throw std::runtime_error("Type conversion during particle patch loading not yet implemented"); + Datatype dtype = determineDatatype(); + if (dtype != getDatatype()) + throw std::runtime_error( + "Type conversion during particle patch loading not yet " + "implemented"); - if( !data ) - throw std::runtime_error("Unallocated pointer passed during ParticlePatch loading."); + if (!data) + throw std::runtime_error( + "Unallocated pointer passed during ParticlePatch loading."); uint64_t numPoints = getExtent()[0]; //! @todo add support for constant patch record components - Parameter< Operation::READ_DATASET > dRead; + Parameter dRead; dRead.offset = {0}; dRead.extent = {numPoints}; dRead.dtype = getDatatype(); - dRead.data = std::static_pointer_cast< void >(data); - auto & rc = get(); + dRead.data = std::static_pointer_cast(data); + auto &rc = get(); rc.m_chunks.push(IOTask(this, dRead)); } -template< typename T > -inline void -PatchRecordComponent::store(uint64_t idx, T data) +template +inline void PatchRecordComponent::store(uint64_t idx, T data) { - Datatype dtype = determineDatatype< T >(); - if( dtype != getDatatype() ) + Datatype dtype = determineDatatype(); + if (dtype != getDatatype()) { std::ostringstream oss; - oss << "Datatypes of patch data (" - << dtype - << ") and dataset (" - << getDatatype() - << ") do not match."; + oss << "Datatypes of patch data (" << dtype << ") and dataset (" + << getDatatype() << ") do not match."; throw std::runtime_error(oss.str()); } Extent dse = getExtent(); - if( dse[0] - 1u < idx ) - throw std::runtime_error("Index does not reside inside patch (no. patches: " + std::to_string(dse[0]) - + " - index: " + std::to_string(idx) + ")"); + if (dse[0] - 1u < idx) + throw std::runtime_error( + "Index does not reside inside patch (no. patches: " + + std::to_string(dse[0]) + " - index: " + std::to_string(idx) + ")"); - Parameter< Operation::WRITE_DATASET > dWrite; + Parameter dWrite; dWrite.offset = {idx}; dWrite.extent = {1}; dWrite.dtype = dtype; - dWrite.data = std::make_shared< T >(data); - auto & rc = get(); + dWrite.data = std::make_shared(data); + auto &rc = get(); rc.m_chunks.push(IOTask(this, dWrite)); } } // namespace openPMD diff --git a/include/openPMD/backend/Writable.hpp b/include/openPMD/backend/Writable.hpp index be1c6167bb..7006f3648d 100644 --- a/include/openPMD/backend/Writable.hpp +++ b/include/openPMD/backend/Writable.hpp @@ -22,37 +22,36 @@ #include "openPMD/IO/AbstractIOHandler.hpp" -#include #include +#include #include // expose private and protected members for invasive testing #ifndef OPENPMD_private -# define OPENPMD_private private +#define OPENPMD_private private: #endif - namespace openPMD { namespace test { -struct TestHelper; + struct TestHelper; } // namespace test class AbstractFilePosition; class AbstractIOHandler; struct ADIOS2FilePosition; template class AbstractIOHandlerImplCommon; -template +template class Span; namespace internal { -class AttributableData; + class AttributableData; } - -/** @brief Layer to mirror structure of logical data and persistent data in file. +/** @brief Layer to mirror structure of logical data and persistent data in + * file. * * Hierarchy of objects (datasets, groups, attributes, ...) in openPMD is * managed in this class. @@ -65,22 +64,19 @@ class Writable final { friend class internal::AttributableData; friend class Attributable; - template< typename T_elem > + template friend class BaseRecord; - template< typename T_elem > + template friend class BaseRecordInterface; - template< - typename T, - typename T_key, - typename T_container - > + template friend class Container; friend class Iteration; friend class Mesh; friend class ParticleSpecies; friend class Series; friend class Record; - template< typename > friend class CommonADIOS1IOHandlerImpl; + template + friend class CommonADIOS1IOHandlerImpl; friend class ADIOS1IOHandlerImpl; friend class ParallelADIOS1IOHandlerImpl; friend class ADIOS2IOHandlerImpl; @@ -89,21 +85,21 @@ class Writable final friend class AbstractIOHandlerImplCommon; friend class JSONIOHandlerImpl; friend struct test::TestHelper; - friend std::string concrete_h5_file_position(Writable*); - friend std::string concrete_bp1_file_position(Writable*); - template + friend std::string concrete_h5_file_position(Writable *); + friend std::string concrete_bp1_file_position(Writable *); + template friend class Span; private: - Writable( internal::AttributableData * ); + Writable(internal::AttributableData *); public: ~Writable() = default; - Writable( Writable const & other ) = delete; - Writable( Writable && other ) = delete; - Writable & operator=( Writable const & other ) = delete; - Writable & operator=( Writable && other ) = delete; + Writable(Writable const &other) = delete; + Writable(Writable &&other) = delete; + Writable &operator=(Writable const &other) = delete; + Writable &operator=(Writable &&other) = delete; /** Flush the corresponding Series object * @@ -114,16 +110,19 @@ class Writable final */ void seriesFlush(); -OPENPMD_private: - void seriesFlush( FlushLevel ); + // clang-format off +OPENPMD_private + // clang-format on + + void seriesFlush(FlushLevel); /* * These members need to be shared pointers since distinct instances of * Writable may share them. */ - std::shared_ptr< AbstractFilePosition > abstractFilePosition; - std::shared_ptr< AbstractIOHandler > IOHandler; - internal::AttributableData* attributable; - Writable* parent; + std::shared_ptr abstractFilePosition; + std::shared_ptr IOHandler; + internal::AttributableData *attributable; + Writable *parent; bool dirty; /** * If parent is not null, then this is a vector of keys such that: @@ -131,7 +130,7 @@ class Writable final * (Notice that scalar record components do not link their direct parent, * but instead their parent's parent, hence a vector of keys) */ - std::vector< std::string > ownKeyWithinParent; + std::vector ownKeyWithinParent; /** * @brief Whether a Writable has been written to the backend. * diff --git a/include/openPMD/benchmark/MemoryProfiler.hpp b/include/openPMD/benchmark/MemoryProfiler.hpp index ea534c8f6a..783052cccf 100644 --- a/include/openPMD/benchmark/MemoryProfiler.hpp +++ b/include/openPMD/benchmark/MemoryProfiler.hpp @@ -25,7 +25,6 @@ #include #include - namespace openPMD { namespace benchmark @@ -42,13 +41,13 @@ namespace benchmark * @param[in] rank MPI rank * @param[in] tag item name to measure */ - MemoryProfiler( int rank, const std::string& tag ) - : m_Rank( rank ), m_Name( "" ) + MemoryProfiler(int rank, const std::string &tag) + : m_Rank(rank), m_Name("") { #if defined(__linux) - //m_Name = "/proc/meminfo"; + // m_Name = "/proc/meminfo"; m_Name = "/proc/self/status"; - Display( tag ); + Display(tag); #endif } @@ -59,33 +58,36 @@ namespace benchmark * * @param tag item name to measure */ - void Display(const std::string& tag){ + void Display(const std::string &tag) + { if (0 == m_Name.size()) return; if (m_Rank > 0) return; - std::cout<<" memory at: "< #include - namespace openPMD { namespace benchmark { /** The Timer class for profiling purpose * - * Simple Timer that measures time consumption btw constructor and destructor - * Reports at rank 0 at the console, for immediate convenience + * Simple Timer that measures time consumption btw constructor and + * destructor Reports at rank 0 at the console, for immediate convenience */ class Timer { public: using Clock = std::chrono::system_clock; - using TimePoint = std::chrono::time_point< Clock >; + using TimePoint = std::chrono::time_point; /** Simple Timer * @@ -50,31 +49,40 @@ namespace benchmark * @param rank MPI rank * @param progStart time point at program start */ - Timer( const std::string& tag, int rank, TimePoint progStart ) - : m_ProgStart( progStart ), - m_Start( std::chrono::system_clock::now() ), - m_Tag( tag ), - m_Rank( rank ) + Timer(const std::string &tag, int rank, TimePoint progStart) + : m_ProgStart(progStart) + , m_Start(std::chrono::system_clock::now()) + , m_Tag(tag) + , m_Rank(rank) { - MemoryProfiler( rank, tag ); + MemoryProfiler(rank, tag); } - ~Timer() { + ~Timer() + { std::string tt = "~" + m_Tag; - MemoryProfiler (m_Rank, tt.c_str()); + MemoryProfiler(m_Rank, tt.c_str()); m_End = Clock::now(); - double millis = std::chrono::duration_cast< std::chrono::milliseconds >( m_End - m_Start ).count(); - double secs = millis/1000.0; - if( m_Rank > 0 ) + double millis = + std::chrono::duration_cast( + m_End - m_Start) + .count(); + double secs = millis / 1000.0; + if (m_Rank > 0) return; std::cout << " [" << m_Tag << "] took:" << secs << " seconds\n"; - std::cout<<" " << m_Tag <<" From ProgStart in seconds "<< - std::chrono::duration_cast(m_End - m_ProgStart).count()/1000.0<( + m_End - m_ProgStart) + .count() / + 1000.0 + << std::endl; - std::cout< sliceBlock( - Extent & totalExtent, - int size, - int rank - ) = 0; + virtual std::pair + sliceBlock(Extent &totalExtent, int size, int rank) = 0; - /** This class will be derived from - */ - virtual ~BlockSlicer() = default; - }; -} + /** This class will be derived from + */ + virtual ~BlockSlicer() = default; +}; +} // namespace openPMD diff --git a/include/openPMD/benchmark/mpi/DatasetFiller.hpp b/include/openPMD/benchmark/mpi/DatasetFiller.hpp index 23dec8023a..762d171cbb 100644 --- a/include/openPMD/benchmark/mpi/DatasetFiller.hpp +++ b/include/openPMD/benchmark/mpi/DatasetFiller.hpp @@ -21,105 +21,92 @@ #pragma once - -#include #include "openPMD/Dataset.hpp" +#include #include - namespace openPMD { - /** - * An abstract class to create one iteration of data per thread. - * @tparam T The type of data to produce. - */ - template< typename T > - class DatasetFiller - { - protected: - Extent::value_type m_numberOfItems; - public: - using resultType = T; - - explicit DatasetFiller( Extent::value_type numberOfItems = 0 ); - - /** This class will be derived from - */ - virtual ~DatasetFiller() = default; - - /** - * Create a shared pointer of m_numberOfItems items of type T. - * Should take roughly the same amount of time per call as long as - * m_numberOfItems does not change. - * @return - */ - virtual std::shared_ptr< T > produceData( ) = 0; - - /** - * Set number of items to be produced. - * @param numberOfItems The number. - */ - virtual void setNumberOfItems( Extent::value_type numberOfItems ) = 0; - }; - +/** + * An abstract class to create one iteration of data per thread. + * @tparam T The type of data to produce. + */ +template +class DatasetFiller +{ +protected: + Extent::value_type m_numberOfItems; - template< typename T > - DatasetFiller< T >::DatasetFiller( Extent::value_type numberOfItems ) : - m_numberOfItems( numberOfItems ) - {} +public: + using resultType = T; + explicit DatasetFiller(Extent::value_type numberOfItems = 0); - template< typename DF > - class SimpleDatasetFillerProvider - { - public: - using resultType = typename DF::resultType; - private: - std::shared_ptr< DF > m_df; + /** This class will be derived from + */ + virtual ~DatasetFiller() = default; + /** + * Create a shared pointer of m_numberOfItems items of type T. + * Should take roughly the same amount of time per call as long as + * m_numberOfItems does not change. + * @return + */ + virtual std::shared_ptr produceData() = 0; - template< - typename T, - typename Dummy=void - > - struct Helper - { - std::shared_ptr< DatasetFiller< T>> operator()( std::shared_ptr & ) - { - throw std::runtime_error( - "Can only create data of type " + - datatypeToString( determineDatatype< resultType >( ) ) - ); - } - }; - - template< typename Dummy > - struct Helper< - resultType, - Dummy - > - { - std::shared_ptr< DatasetFiller< resultType>> operator()(std::shared_ptr &df ) - { - return df; - } - }; + /** + * Set number of items to be produced. + * @param numberOfItems The number. + */ + virtual void setNumberOfItems(Extent::value_type numberOfItems) = 0; +}; - public: +template +DatasetFiller::DatasetFiller(Extent::value_type numberOfItems) + : m_numberOfItems(numberOfItems) +{} +template +class SimpleDatasetFillerProvider +{ +public: + using resultType = typename DF::resultType; - explicit SimpleDatasetFillerProvider( DF df ) : - m_df { std::make_shared< DF >( std::move( df ) ) } - {} +private: + std::shared_ptr m_df; + template + struct Helper + { + std::shared_ptr> operator()(std::shared_ptr &) + { + throw std::runtime_error( + "Can only create data of type " + + datatypeToString(determineDatatype())); + } + }; - template< typename T > - std::shared_ptr< DatasetFiller< T >> operator()( ) + template + struct Helper + { + std::shared_ptr> + operator()(std::shared_ptr &df) { - Helper< T > h; - return h( m_df ); + return df; } }; +public: + explicit SimpleDatasetFillerProvider(DF df) + : m_df{std::make_shared(std::move(df))} + {} + + template + std::shared_ptr> operator()() + { + Helper h; + return h(m_df); + } +}; -} +} // namespace openPMD diff --git a/include/openPMD/benchmark/mpi/MPIBenchmark.hpp b/include/openPMD/benchmark/mpi/MPIBenchmark.hpp index 8f48f8879b..14a260496e 100644 --- a/include/openPMD/benchmark/mpi/MPIBenchmark.hpp +++ b/include/openPMD/benchmark/mpi/MPIBenchmark.hpp @@ -26,164 +26,155 @@ #include "RandomDatasetFiller.hpp" -#include "openPMD/openPMD.hpp" #include "openPMD/DatatypeHelpers.hpp" -#include "openPMD/benchmark/mpi/MPIBenchmarkReport.hpp" -#include "openPMD/benchmark/mpi/DatasetFiller.hpp" #include "openPMD/benchmark/mpi/BlockSlicer.hpp" +#include "openPMD/benchmark/mpi/DatasetFiller.hpp" +#include "openPMD/benchmark/mpi/MPIBenchmarkReport.hpp" +#include "openPMD/openPMD.hpp" #include #include #include #include +#include #include +#include #include #include -#include -#include - namespace openPMD { - /** - * Class representing a benchmark. - * Allows to configure a benchmark and execute it. - * @tparam DatasetFillerProvider Functor type to create a DatasetFiller with - * the requested type. Should have a templated operator()() returning a value - * that can be dynamically casted to a std::shared_ptr>. - */ - template< typename DatasetFillerProvider > - class MPIBenchmark - { - - public: - using extentT = Extent::value_type; - MPI_Comm communicator = MPI_COMM_WORLD; - - /** - * Total extent of the hypercuboid used in the benchmark. - */ - Extent totalExtent; +/** + * Class representing a benchmark. + * Allows to configure a benchmark and execute it. + * @tparam DatasetFillerProvider Functor type to create a DatasetFiller with + * the requested type. Should have a templated operator()() returning a value + * that can be dynamically casted to a + * std::shared_ptr>. + */ +template +class MPIBenchmark +{ - std::shared_ptr< BlockSlicer > m_blockSlicer; +public: + using extentT = Extent::value_type; + MPI_Comm communicator = MPI_COMM_WORLD; - DatasetFillerProvider m_dfp; + /** + * Total extent of the hypercuboid used in the benchmark. + */ + Extent totalExtent; + std::shared_ptr m_blockSlicer; - /** - * Construct an MPI benchmark manually. - * @param basePath The path to write to. Will be extended with the - * backends' filename endings. May be overwritten if performing several - * benchmarks with the same backend, e.g. when using different compression - * schemes. - * @param tExtent The total extent of the dataset. - * @param blockSlicer An implementation of BlockSlicer class, associating - * each thread with a portion of the dataset to write to. - * @param dfp DatasetFillerProvider, a templated functor returning a - * std::shared_ptr> or a value dynamically - * castable to one. - * @param comm MPI communicator. - */ - MPIBenchmark( - std::string basePath, - Extent tExtent, - std::shared_ptr< BlockSlicer > blockSlicer, - DatasetFillerProvider dfp, - MPI_Comm comm = MPI_COMM_WORLD - ); - - /** - * @param jsonConfig Backend-specific configuration. - * @param backend Backend to use, specified by filename extension (eg "bp" or "h5"). - * @param dt Type of data to write and read. - * @param iterations The number of iterations to write and read for each - * compression strategy. The DatasetFiller functor will be called for each - * iteration, so it should create sufficient data for one iteration. - * @param threadSize Number of threads to use. - */ - void addConfiguration( - std::string jsonConfig, - std::string backend, - Datatype dt, - typename decltype( Series::iterations )::key_type iterations, - int threadSize - ); + DatasetFillerProvider m_dfp; - /** - * Version of addConfiguration() that automatically sets the number of used - * threads to the MPI size. - * @param jsonConfig Backend-specific configuration. - * @param backend Backend to use, specified by filename extension (eg "bp" or "h5"). - * @param dt Type of data to write and read. - * @param iterations The number of iterations to write and read for each - * compression strategy. The DatasetFiller functor will be called for each - * iteration, so it should create sufficient data for one iteration. - */ - void addConfiguration( - std::string jsonConfig, - std::string backend, - Datatype dt, - typename decltype( Series::iterations)::key_type iterations - ); + /** + * Construct an MPI benchmark manually. + * @param basePath The path to write to. Will be extended with the + * backends' filename endings. May be overwritten if performing several + * benchmarks with the same backend, e.g. when using different compression + * schemes. + * @param tExtent The total extent of the dataset. + * @param blockSlicer An implementation of BlockSlicer class, associating + * each thread with a portion of the dataset to write to. + * @param dfp DatasetFillerProvider, a templated functor returning a + * std::shared_ptr> or a value dynamically + * castable to one. + * @param comm MPI communicator. + */ + MPIBenchmark( + std::string basePath, + Extent tExtent, + std::shared_ptr blockSlicer, + DatasetFillerProvider dfp, + MPI_Comm comm = MPI_COMM_WORLD); - void resetConfigurations( ); + /** + * @param jsonConfig Backend-specific configuration. + * @param backend Backend to use, specified by filename extension (eg "bp" + * or "h5"). + * @param dt Type of data to write and read. + * @param iterations The number of iterations to write and read for each + * compression strategy. The DatasetFiller functor will be called for each + * iteration, so it should create sufficient data for one iteration. + * @param threadSize Number of threads to use. + */ + void addConfiguration( + std::string jsonConfig, + std::string backend, + Datatype dt, + typename decltype(Series::iterations)::key_type iterations, + int threadSize); + /** + * Version of addConfiguration() that automatically sets the number of used + * threads to the MPI size. + * @param jsonConfig Backend-specific configuration. + * @param backend Backend to use, specified by filename extension (eg "bp" + * or "h5"). + * @param dt Type of data to write and read. + * @param iterations The number of iterations to write and read for each + * compression strategy. The DatasetFiller functor will be called for each + * iteration, so it should create sufficient data for one iteration. + */ + void addConfiguration( + std::string jsonConfig, + std::string backend, + Datatype dt, + typename decltype(Series::iterations)::key_type iterations); - /** - * Main function for running a benchmark. The benchmark is repeated for all - * previously requested compressions strategies, backends and thread sizes. - * @tparam Clock Clock type to use. - * @param rootThread Rank at which the report will be read. - * @return A report about the time needed for writing and reading under each - * compression strategy. - */ - template< typename Clock > - MPIBenchmarkReport< typename Clock::duration > runBenchmark( - int rootThread = 0 - ); - - private: - std::string m_basePath; - std::vector< - std::tuple< - std::string, - std::string, - int, - Datatype, - typename decltype( Series::iterations)::key_type>> - m_configurations; - - enum Config - { - JSON_CONFIG = 0, - BACKEND, - NRANKS, - DTYPE, - ITERATIONS - }; - - std::pair< - Offset, - Extent - > slice( int size ); + void resetConfigurations(); - /** - * @brief Struct used by MPIBenchmark::runBenchmark in switchType. - * Does the actual heavy lifting. - * - * @tparam Clock Clock type to use. - */ - template< typename Clock > - struct BenchmarkExecution - { - MPIBenchmark< DatasetFillerProvider > * m_benchmark; + /** + * Main function for running a benchmark. The benchmark is repeated for all + * previously requested compressions strategies, backends and thread sizes. + * @tparam Clock Clock type to use. + * @param rootThread Rank at which the report will be read. + * @return A report about the time needed for writing and reading under each + * compression strategy. + */ + template + MPIBenchmarkReport + runBenchmark(int rootThread = 0); + +private: + std::string m_basePath; + std::vector> + m_configurations; + + enum Config + { + JSON_CONFIG = 0, + BACKEND, + NRANKS, + DTYPE, + ITERATIONS + }; + std::pair slice(int size); - explicit BenchmarkExecution( MPIBenchmark< DatasetFillerProvider > * benchmark ) : - m_benchmark { benchmark } - {} + /** + * @brief Struct used by MPIBenchmark::runBenchmark in switchType. + * Does the actual heavy lifting. + * + * @tparam Clock Clock type to use. + */ + template + struct BenchmarkExecution + { + MPIBenchmark *m_benchmark; + explicit BenchmarkExecution( + MPIBenchmark *benchmark) + : m_benchmark{benchmark} + {} /** * Execute a single read benchmark. @@ -196,368 +187,264 @@ namespace openPMD * @param iterations The number of iterations to write. * @return The time passed. */ - template< - typename T - > - typename Clock::duration writeBenchmark( - std::string const & jsonConfig, - Offset & offset, - Extent & extent, - std::string const & extension, - std::shared_ptr< DatasetFiller< T >> datasetFiller, - typename decltype( Series::iterations)::key_type iterations - ); - - /** - * Execute a single read benchmark. - * @tparam T Type of the dataset to read. - * @param offset Local offset of the chunk to read. - * @param extent Local extent of the chunk to read. - * @param extension File extension to control the openPMD backend. - * @param iterations The number of iterations to read. - * @return The time passed. - */ - template< - typename T - > - typename Clock::duration readBenchmark( - Offset & offset, - Extent & extent, - std::string extension, - typename decltype( Series::iterations)::key_type iterations - ); - - template< typename T > - static void call( - BenchmarkExecution< Clock > &, - MPIBenchmarkReport< typename Clock::duration > & report, - int rootThread = 0 - ); - - static constexpr char const * errorMsg = "BenchmarkExecution"; - }; - }; - - - // Implementation - + template + typename Clock::duration writeBenchmark( + std::string const &jsonConfig, + Offset &offset, + Extent &extent, + std::string const &extension, + std::shared_ptr> datasetFiller, + typename decltype(Series::iterations)::key_type iterations); + /** + * Execute a single read benchmark. + * @tparam T Type of the dataset to read. + * @param offset Local offset of the chunk to read. + * @param extent Local extent of the chunk to read. + * @param extension File extension to control the openPMD backend. + * @param iterations The number of iterations to read. + * @return The time passed. + */ + template + typename Clock::duration readBenchmark( + Offset &offset, + Extent &extent, + std::string extension, + typename decltype(Series::iterations)::key_type iterations); + + template + static void call( + BenchmarkExecution &, + MPIBenchmarkReport &report, + int rootThread = 0); + + static constexpr char const *errorMsg = "BenchmarkExecution"; + }; +}; +// Implementation +template +template +MPIBenchmarkReport +MPIBenchmark::runBenchmark(int rootThread) +{ + MPIBenchmarkReport res{this->communicator}; + BenchmarkExecution exec{this}; - template< typename DatasetFillerProvider > - template< typename Clock > - MPIBenchmarkReport< typename Clock::duration > - MPIBenchmark< DatasetFillerProvider >::runBenchmark( - int rootThread - ) + std::set datatypes; + for (auto const &conf : m_configurations) { - MPIBenchmarkReport< typename Clock::duration > res{this->communicator}; - BenchmarkExecution< Clock > exec { this }; - - std::set< Datatype > datatypes; - for( auto const & conf: m_configurations ) - { - datatypes.insert( std::get< DTYPE >( conf ) ); - } - for( Datatype dt: datatypes ) - { - switchType< BenchmarkExecution< Clock > >( - dt, - exec, - res, - rootThread - ); - } - - return res; + datatypes.insert(std::get(conf)); } - - - template< typename DatasetFillerProvider > - MPIBenchmark< DatasetFillerProvider >::MPIBenchmark( - std::string basePath, - Extent tExtent, - std::shared_ptr< BlockSlicer > blockSlicer, - DatasetFillerProvider dfp, - MPI_Comm comm - ): - communicator { comm }, - totalExtent { std::move( tExtent ) }, - m_blockSlicer { std::move( blockSlicer ) }, - m_dfp { dfp }, - m_basePath { std::move( basePath ) } + for (Datatype dt : datatypes) { - if( m_blockSlicer == nullptr ) - throw std::runtime_error("Argument blockSlicer cannot be a nullptr!"); + switchType>(dt, exec, res, rootThread); } + return res; +} - template< typename DatasetFillerProvider > - std::pair< - Offset, - Extent - > MPIBenchmark< DatasetFillerProvider >::slice( int size ) - { - int actualSize; - MPI_Comm_size( - this->communicator, - &actualSize - ); - int rank; - MPI_Comm_rank( - this->communicator, - &rank - ); - size = std::min( - size, - actualSize - ); - return m_blockSlicer->sliceBlock( - totalExtent, - size, - rank - ); - } - +template +MPIBenchmark::MPIBenchmark( + std::string basePath, + Extent tExtent, + std::shared_ptr blockSlicer, + DatasetFillerProvider dfp, + MPI_Comm comm) + : communicator{comm} + , totalExtent{std::move(tExtent)} + , m_blockSlicer{std::move(blockSlicer)} + , m_dfp{dfp} + , m_basePath{std::move(basePath)} +{ + if (m_blockSlicer == nullptr) + throw std::runtime_error("Argument blockSlicer cannot be a nullptr!"); +} - template< typename DatasetFillerProvider > - void MPIBenchmark< DatasetFillerProvider >::addConfiguration( - std::string jsonConfig, - std::string backend, - Datatype dt, - typename decltype( Series::iterations)::key_type iterations, - int threadSize - ) - { - this->m_configurations - .emplace_back( - std::move( jsonConfig ), - backend, - threadSize, - dt, - iterations - ); - } +template +std::pair MPIBenchmark::slice(int size) +{ + int actualSize; + MPI_Comm_size(this->communicator, &actualSize); + int rank; + MPI_Comm_rank(this->communicator, &rank); + size = std::min(size, actualSize); + return m_blockSlicer->sliceBlock(totalExtent, size, rank); +} +template +void MPIBenchmark::addConfiguration( + std::string jsonConfig, + std::string backend, + Datatype dt, + typename decltype(Series::iterations)::key_type iterations, + int threadSize) +{ + this->m_configurations.emplace_back( + std::move(jsonConfig), backend, threadSize, dt, iterations); +} - template< typename DatasetFillerProvider > - void MPIBenchmark< DatasetFillerProvider >::addConfiguration( - std::string jsonConfig, - std::string backend, - Datatype dt, - typename decltype( Series::iterations)::key_type iterations - ) - { - int size; - MPI_Comm_size( - communicator, - &size - ); - addConfiguration( - std::move( jsonConfig ), - backend, - dt, - iterations, - size - ); - } +template +void MPIBenchmark::addConfiguration( + std::string jsonConfig, + std::string backend, + Datatype dt, + typename decltype(Series::iterations)::key_type iterations) +{ + int size; + MPI_Comm_size(communicator, &size); + addConfiguration(std::move(jsonConfig), backend, dt, iterations, size); +} +template +void MPIBenchmark::resetConfigurations() +{ + this->m_compressions.clear(); +} - template< typename DatasetFillerProvider > - void MPIBenchmark< DatasetFillerProvider >::resetConfigurations( ) +template +template +template +typename Clock::duration +MPIBenchmark::BenchmarkExecution::writeBenchmark( + std::string const &jsonConfig, + Offset &offset, + Extent &extent, + std::string const &extension, + std::shared_ptr> datasetFiller, + typename decltype(Series::iterations)::key_type iterations) +{ + MPI_Barrier(m_benchmark->communicator); + auto start = Clock::now(); + + // open file for writing + Series series = Series( + m_benchmark->m_basePath + "." + extension, + Access::CREATE, + m_benchmark->communicator, + jsonConfig); + + for (typename decltype(Series::iterations)::key_type i = 0; i < iterations; + i++) { - this->m_compressions - .clear( ); - } + auto writeData = datasetFiller->produceData(); + MeshRecordComponent id = + series.iterations[i].meshes["id"][MeshRecordComponent::SCALAR]; - template< typename DatasetFillerProvider > - template< typename Clock > - template< typename T > - typename Clock::duration - MPIBenchmark< DatasetFillerProvider >::BenchmarkExecution< Clock >::writeBenchmark( - std::string const & jsonConfig, - Offset & offset, - Extent & extent, - std::string const & extension, - std::shared_ptr< DatasetFiller< T >> datasetFiller, - typename decltype( Series::iterations)::key_type iterations - ) - { - MPI_Barrier( m_benchmark->communicator ); - auto start = Clock::now( ); - - // open file for writing - Series series = Series( - m_benchmark->m_basePath + "." + extension, - Access::CREATE, - m_benchmark->communicator, - jsonConfig - ); - - for( typename decltype( Series::iterations)::key_type i = 0; - i < iterations; - i++ ) - { - auto writeData = datasetFiller->produceData( ); + Datatype datatype = determineDatatype(writeData); + Dataset dataset = Dataset(datatype, m_benchmark->totalExtent); + id.resetDataset(dataset); - MeshRecordComponent - id = - series.iterations[i].meshes["id"][MeshRecordComponent::SCALAR]; + series.flush(); - Datatype datatype = determineDatatype( writeData ); - Dataset dataset = Dataset( - datatype, - m_benchmark->totalExtent - ); + id.storeChunk(writeData, offset, extent); + series.flush(); + } - id.resetDataset( dataset ); + MPI_Barrier(m_benchmark->communicator); + auto end = Clock::now(); - series.flush( ); + // deduct the time needed for data generation + for (typename decltype(Series::iterations)::key_type i = 0; i < iterations; + i++) + { + datasetFiller->produceData(); + } + auto deduct = Clock::now(); - id.storeChunk< T >( - writeData, - offset, - extent - ); - series.flush( ); - } + return end - start - (deduct - end); +} - MPI_Barrier( m_benchmark->communicator ); - auto end = Clock::now( ); +template +template +template +typename Clock::duration +MPIBenchmark::BenchmarkExecution::readBenchmark( + Offset &offset, + Extent &extent, + std::string extension, + typename decltype(Series::iterations)::key_type iterations) +{ + MPI_Barrier(m_benchmark->communicator); + // let every thread measure time + auto start = Clock::now(); - // deduct the time needed for data generation - for( typename decltype( Series::iterations)::key_type i = 0; - i < iterations; - i++ ) - { - datasetFiller->produceData( ); - } - auto deduct = Clock::now( ); + Series series = Series( + m_benchmark->m_basePath + "." + extension, + Access::READ_ONLY, + m_benchmark->communicator); - return end - start - ( deduct - end ); + for (typename decltype(Series::iterations)::key_type i = 0; i < iterations; + i++) + { + MeshRecordComponent id = + series.iterations[i].meshes["id"][MeshRecordComponent::SCALAR]; + + auto chunk_data = id.loadChunk(offset, extent); + series.flush(); } + MPI_Barrier(m_benchmark->communicator); + auto end = Clock::now(); + return end - start; +} - template< typename DatasetFillerProvider > - template< typename Clock > - template< typename T > - typename Clock::duration - MPIBenchmark< DatasetFillerProvider >::BenchmarkExecution< Clock >::readBenchmark( - Offset & offset, - Extent & extent, - std::string extension, - typename decltype( Series::iterations)::key_type iterations - ) +template +template +template +void MPIBenchmark::BenchmarkExecution::call( + BenchmarkExecution &exec, + MPIBenchmarkReport &report, + int rootThread) +{ + Datatype dt = determineDatatype(); + auto dsf = std::dynamic_pointer_cast>( + exec.m_benchmark->m_dfp.template operator()()); + for (auto const &config : exec.m_benchmark->m_configurations) { - MPI_Barrier( m_benchmark->communicator ); - // let every thread measure time - auto start = Clock::now( ); - - Series series = Series( - m_benchmark->m_basePath + "." + extension, - Access::READ_ONLY, - m_benchmark->communicator - ); - - for( typename decltype( Series::iterations)::key_type i = 0; - i < iterations; - i++ ) - { - MeshRecordComponent - id = - series.iterations[i].meshes["id"][MeshRecordComponent::SCALAR]; - + std::string jsonConfig; + std::string backend; + int size; + Datatype dt2; + typename decltype(Series::iterations)::key_type iterations; + std::tie(jsonConfig, backend, size, dt2, iterations) = config; - auto chunk_data = id.loadChunk< T >( - offset, - extent - ); - series.flush( ); + if (dt != dt2) + { + continue; } - MPI_Barrier( m_benchmark->communicator ); - auto end = Clock::now( ); - return end - start; - } + auto localCuboid = exec.m_benchmark->slice(size); - - template< typename DatasetFillerProvider > - template< typename Clock > - template< typename T > - void - MPIBenchmark< DatasetFillerProvider >::BenchmarkExecution< Clock >::call( - BenchmarkExecution< Clock > & exec, - MPIBenchmarkReport< typename Clock::duration > & report, - int rootThread - ) - { - Datatype dt = determineDatatype< T >( ); - auto dsf = std::dynamic_pointer_cast< DatasetFiller< T>>( - exec.m_benchmark->m_dfp - .template operator( - )< T >( ) - ); - for( auto const & config: exec.m_benchmark->m_configurations ) + extentT blockSize = 1; + for (auto ext : localCuboid.second) { - std::string jsonConfig; - std::string backend; - int size; - Datatype dt2; - typename decltype( Series::iterations)::key_type iterations; - std::tie( - jsonConfig, - backend, - size, - dt2, - iterations - ) = config; - - if( dt != dt2 ) - { - continue; - } - - auto localCuboid = exec.m_benchmark->slice( size ); - - extentT blockSize = 1; - for( auto ext: localCuboid.second ) - { - blockSize *= ext; - } - dsf->setNumberOfItems( blockSize ); - - auto writeTime = exec.writeBenchmark< T >( - jsonConfig, - localCuboid.first, - localCuboid.second, - backend, - dsf, - iterations - ); - auto readTime = exec.readBenchmark< T >( - localCuboid.first, - localCuboid.second, - backend, - iterations - ); - report.addReport( - rootThread, - jsonConfig, - backend, - size, - dt2, - iterations, - std::make_pair( - writeTime, - readTime - ) - ); - + blockSize *= ext; } + dsf->setNumberOfItems(blockSize); + + auto writeTime = exec.writeBenchmark( + jsonConfig, + localCuboid.first, + localCuboid.second, + backend, + dsf, + iterations); + auto readTime = exec.readBenchmark( + localCuboid.first, localCuboid.second, backend, iterations); + report.addReport( + rootThread, + jsonConfig, + backend, + size, + dt2, + iterations, + std::make_pair(writeTime, readTime)); } } +} // namespace openPMD #endif diff --git a/include/openPMD/benchmark/mpi/MPIBenchmarkReport.hpp b/include/openPMD/benchmark/mpi/MPIBenchmarkReport.hpp index 3ee01dd948..34ac57f8e1 100644 --- a/include/openPMD/benchmark/mpi/MPIBenchmarkReport.hpp +++ b/include/openPMD/benchmark/mpi/MPIBenchmarkReport.hpp @@ -27,352 +27,252 @@ #include "openPMD/Datatype.hpp" #include "openPMD/Series.hpp" +#include "string.h" #include +#include #include #include -#include -#include "string.h" - namespace openPMD { - /** - * The report for a single benchmark produced by . - * @tparam Duration Datatype to be used for storing a time interval. - */ - template< typename Duration > - struct MPIBenchmarkReport - { - MPI_Comm communicator; - - MPIBenchmarkReport(MPI_Comm); - - /** - * Time needed for writing and reading per compression strategy and level. - */ - std::map< - std::tuple< - int, // rank - std::string, // jsonConfig - std::string, // extension - int, // thread size - Datatype, - typename decltype( Series::iterations )::key_type - >, - std::pair< - Duration, - Duration - > - > durations; - - enum Selector - { - RANK = 0, - COMPRESSION, - COMPRESSION_LEVEL, - BACKEND, - NRANKS, - DTYPE, - ITERATIONS - }; - - /** - * Add results for a certain compression strategy and level. - * - * @param rootThread The MPI rank which will collect the data. - * @param jsonConfig Compression strategy. - * @param extension The openPMD filename extension. - * @param threadSize The MPI size. - * @param dt The openPMD datatype. - * @param iterations The number of iterations per compression strategy. - * @param report A pair of write and read time measurements. - */ - void addReport( - int rootThread, - std::string jsonConfig, - std::string extension, - int threadSize, - Datatype dt, - typename decltype( Series::iterations )::key_type iterations, - std::pair< - Duration, - Duration - > const & report - ); - - /** Retrieve the time measured for a certain compression strategy. - * - * @param rank Which MPI rank's duration results to retrieve. - * @param jsonConfig Compression strategy. - * @param extension The openPMD filename extension. - * @param threadSize The MPI size. - * @param dt The openPMD datatype. - * @param iterations The number of iterations per compression strategy. - * @return A pair of write and read time measurements. - */ - std::pair< - Duration, - Duration - > getReport( - int rank, - std::string jsonConfig, - std::string extension, - int threadSize, - Datatype dt, - typename decltype( Series::iterations)::key_type iterations - ); - - private: - template< - typename D, - typename Dummy = D - > - struct MPIDatatype - { - }; +/** + * The report for a single benchmark produced by + * . + * @tparam Duration Datatype to be used for storing a time interval. + */ +template +struct MPIBenchmarkReport +{ + MPI_Comm communicator; + MPIBenchmarkReport(MPI_Comm); - template< typename Dummy > - struct MPIDatatype< - char, - Dummy - > - { - MPI_Datatype dt = MPI_CHAR; - }; - template< typename Dummy > - struct MPIDatatype< - unsigned char, - Dummy - > - { - MPI_Datatype dt = MPI_UNSIGNED_CHAR; - }; - template< typename Dummy > - struct MPIDatatype< - short, - Dummy - > - { - MPI_Datatype dt = MPI_SHORT; - }; - template< typename Dummy > - struct MPIDatatype< - int, - Dummy - > - { - MPI_Datatype dt = MPI_INT; - }; - template< typename Dummy > - struct MPIDatatype< - long, - Dummy - > - { - MPI_Datatype dt = MPI_LONG; - }; - template< typename Dummy > - struct MPIDatatype< - float, - Dummy - > - { - MPI_Datatype dt = MPI_FLOAT; - }; - template< typename Dummy > - struct MPIDatatype< - double, - Dummy - > - { - MPI_Datatype dt = MPI_DOUBLE; - }; - template< typename Dummy > - struct MPIDatatype< - unsigned short, - Dummy - > - { - MPI_Datatype dt = MPI_UNSIGNED_SHORT; - }; - template< typename Dummy > - struct MPIDatatype< - unsigned int, - Dummy - > - { - MPI_Datatype dt = MPI_UNSIGNED; - }; - template< typename Dummy > - struct MPIDatatype< - unsigned long, - Dummy - > - { - MPI_Datatype dt = MPI_UNSIGNED_LONG; - }; - template< typename Dummy > - struct MPIDatatype< - long double, - Dummy - > - { - MPI_Datatype dt = MPI_LONG_DOUBLE; - }; - template< typename Dummy > - struct MPIDatatype< - long long, - Dummy - > - { - MPI_Datatype dt = MPI_LONG_LONG_INT; - }; + /** + * Time needed for writing and reading per compression strategy and level. + */ + std::map< + std::tuple< + int, // rank + std::string, // jsonConfig + std::string, // extension + int, // thread size + Datatype, + typename decltype(Series::iterations)::key_type>, + std::pair> + durations; - MPIDatatype< typename Duration::rep > m_mpiDatatype; - MPI_Datatype mpiType = m_mpiDatatype.dt; + enum Selector + { + RANK = 0, + COMPRESSION, + COMPRESSION_LEVEL, + BACKEND, + NRANKS, + DTYPE, + ITERATIONS }; - // implementation - - - template< typename Duration > - void MPIBenchmarkReport< Duration >::addReport( + /** + * Add results for a certain compression strategy and level. + * + * @param rootThread The MPI rank which will collect the data. + * @param jsonConfig Compression strategy. + * @param extension The openPMD filename extension. + * @param threadSize The MPI size. + * @param dt The openPMD datatype. + * @param iterations The number of iterations per compression strategy. + * @param report A pair of write and read time measurements. + */ + void addReport( int rootThread, std::string jsonConfig, std::string extension, int threadSize, Datatype dt, - typename decltype( Series::iterations )::key_type iterations, - std::pair< - Duration, - Duration - > const & report - ) + typename decltype(Series::iterations)::key_type iterations, + std::pair const &report); + + /** Retrieve the time measured for a certain compression strategy. + * + * @param rank Which MPI rank's duration results to retrieve. + * @param jsonConfig Compression strategy. + * @param extension The openPMD filename extension. + * @param threadSize The MPI size. + * @param dt The openPMD datatype. + * @param iterations The number of iterations per compression strategy. + * @return A pair of write and read time measurements. + */ + std::pair getReport( + int rank, + std::string jsonConfig, + std::string extension, + int threadSize, + Datatype dt, + typename decltype(Series::iterations)::key_type iterations); + +private: + template + struct MPIDatatype + {}; + + template + struct MPIDatatype { - using rep = typename Duration::rep; - //auto mpi_dt = MPIDatatype::dt; - int rank; - MPI_Comm_rank( - communicator, - &rank - ); - int size; - MPI_Comm_size( - communicator, - &size - ); - MPI_Comm restricted; - MPI_Comm_split( - communicator, - rank < threadSize ? 0 : MPI_UNDEFINED, - rank, - &restricted - ); - rep readWrite[2]; - if( rank < threadSize ) - { - readWrite[0] = - report.first - .count( ); - readWrite[1] = - report.second - .count( ); - } - rep * recv = nullptr; - if( rank == rootThread ) - { - recv = new rep[2 * threadSize]; - } + MPI_Datatype dt = MPI_CHAR; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_UNSIGNED_CHAR; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_SHORT; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_INT; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_LONG; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_FLOAT; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_DOUBLE; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_UNSIGNED_SHORT; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_UNSIGNED; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_UNSIGNED_LONG; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_LONG_DOUBLE; + }; + template + struct MPIDatatype + { + MPI_Datatype dt = MPI_LONG_LONG_INT; + }; - if( restricted != MPI_COMM_NULL ) - { - MPI_Gather( - readWrite, - 2, // should be 2 but doesnt work then.. - this->mpiType, - recv, - 2, - this->mpiType, - rootThread, - restricted - ); - } + MPIDatatype m_mpiDatatype; + MPI_Datatype mpiType = m_mpiDatatype.dt; +}; +// implementation - if( rank == rootThread ) - { - for( int i = 0; i < threadSize; i++ ) - { - Duration dWrite { recv[2 * i] }; - Duration dRead { recv[2 * i + 1] }; - this->durations - .emplace( - std::make_tuple( - i, - jsonConfig, - extension, - threadSize, - dt, - iterations - ), - std::make_pair( - dWrite, - dRead - ) - ); - } - delete[] recv; - } - if( restricted != MPI_COMM_NULL ) - { - MPI_Comm_free( &restricted ); - } +template +void MPIBenchmarkReport::addReport( + int rootThread, + std::string jsonConfig, + std::string extension, + int threadSize, + Datatype dt, + typename decltype(Series::iterations)::key_type iterations, + std::pair const &report) +{ + using rep = typename Duration::rep; + // auto mpi_dt = MPIDatatype::dt; + int rank; + MPI_Comm_rank(communicator, &rank); + int size; + MPI_Comm_size(communicator, &size); + MPI_Comm restricted; + MPI_Comm_split( + communicator, rank < threadSize ? 0 : MPI_UNDEFINED, rank, &restricted); + rep readWrite[2]; + if (rank < threadSize) + { + readWrite[0] = report.first.count(); + readWrite[1] = report.second.count(); + } + rep *recv = nullptr; + if (rank == rootThread) + { + recv = new rep[2 * threadSize]; } - template< typename Duration > - MPIBenchmarkReport< Duration >::MPIBenchmarkReport( MPI_Comm comm ): - communicator {comm} - {} + if (restricted != MPI_COMM_NULL) + { + MPI_Gather( + readWrite, + 2, // should be 2 but doesnt work then.. + this->mpiType, + recv, + 2, + this->mpiType, + rootThread, + restricted); + } - template< typename Duration > - std::pair< - Duration, - Duration - > MPIBenchmarkReport< Duration >::getReport( - int rank, - std::string jsonConfig, - std::string extension, - int threadSize, - Datatype dt, - typename decltype( Series::iterations )::key_type iterations - ) + if (rank == rootThread) { - auto - it = - this->durations - .find( - std::make_tuple( - rank, - jsonConfig, - extension, - threadSize, - dt, - iterations - ) - ); - if( it == - this->durations - .end( ) ) + for (int i = 0; i < threadSize; i++) { - throw std::runtime_error( "Requested report not found. (Reports are available on the root thread only)" ); - } - else - { - return it->second; + Duration dWrite{recv[2 * i]}; + Duration dRead{recv[2 * i + 1]}; + this->durations.emplace( + std::make_tuple( + i, jsonConfig, extension, threadSize, dt, iterations), + std::make_pair(dWrite, dRead)); } + delete[] recv; + } + if (restricted != MPI_COMM_NULL) + { + MPI_Comm_free(&restricted); } +} + +template +MPIBenchmarkReport::MPIBenchmarkReport(MPI_Comm comm) + : communicator{comm} +{} +template +std::pair MPIBenchmarkReport::getReport( + int rank, + std::string jsonConfig, + std::string extension, + int threadSize, + Datatype dt, + typename decltype(Series::iterations)::key_type iterations) +{ + auto it = this->durations.find(std::make_tuple( + rank, jsonConfig, extension, threadSize, dt, iterations)); + if (it == this->durations.end()) + { + throw std::runtime_error( + "Requested report not found. (Reports are available on the root " + "thread only)"); + } + else + { + return it->second; + } } +} // namespace openPMD + #endif diff --git a/include/openPMD/benchmark/mpi/OneDimensionalBlockSlicer.hpp b/include/openPMD/benchmark/mpi/OneDimensionalBlockSlicer.hpp index 9569fd16b7..78f955524b 100644 --- a/include/openPMD/benchmark/mpi/OneDimensionalBlockSlicer.hpp +++ b/include/openPMD/benchmark/mpi/OneDimensionalBlockSlicer.hpp @@ -24,24 +24,16 @@ #include "openPMD/Dataset.hpp" #include "openPMD/benchmark/mpi/BlockSlicer.hpp" - namespace openPMD { - class OneDimensionalBlockSlicer : - public BlockSlicer - { - public: - Extent::value_type m_dim; +class OneDimensionalBlockSlicer : public BlockSlicer +{ +public: + Extent::value_type m_dim; - explicit OneDimensionalBlockSlicer( Extent::value_type dim = 0 ); + explicit OneDimensionalBlockSlicer(Extent::value_type dim = 0); - std::pair< - Offset, - Extent - > sliceBlock( - Extent & totalExtent, - int size, - int rank - ) override; - }; -} + std::pair + sliceBlock(Extent &totalExtent, int size, int rank) override; +}; +} // namespace openPMD diff --git a/include/openPMD/benchmark/mpi/RandomDatasetFiller.hpp b/include/openPMD/benchmark/mpi/RandomDatasetFiller.hpp index c384eeb5aa..786d4a134d 100644 --- a/include/openPMD/benchmark/mpi/RandomDatasetFiller.hpp +++ b/include/openPMD/benchmark/mpi/RandomDatasetFiller.hpp @@ -21,132 +21,98 @@ #pragma once - -#include "openPMD/benchmark/mpi/DatasetFiller.hpp" #include "openPMD/Dataset.hpp" -#include +#include "openPMD/benchmark/mpi/DatasetFiller.hpp" #include - +#include namespace openPMD { - template< - typename Distr, - typename T = typename Distr::result_type - > - class RandomDatasetFiller : - public DatasetFiller< T > - { - - private: - Distr distr; - std::default_random_engine engine; - std::shared_ptr< T > buffered; - public: - using resultType = T; - - - explicit RandomDatasetFiller( - Distr distribution, - Extent::value_type numOfItems = 0 - ) : - DatasetFiller< T >( numOfItems ), - distr( distribution ) - {} - - - std::shared_ptr< T > produceData( ) override - { - if( this->buffered ) - { - return buffered; - } - auto res = std::shared_ptr< T > { - new T[this->m_numberOfItems], - []( T * d ) - { - delete[] d; - } - }; - auto ptr = res.get( ); - for( typename Extent::value_type i = 0; - i < this->m_numberOfItems; - i++ ) - { - ptr[i] = this->distr( this->engine ); - } - return res; - } +template +class RandomDatasetFiller : public DatasetFiller +{ +private: + Distr distr; + std::default_random_engine engine; + std::shared_ptr buffered; - /** - * - * @tparam X Dummy template parameter such that the RandomDatasetFiller is - * usable also when this function's implementation does not work on the - * distribution's concrete type. - * @param numberOfItems Number of items to be produced per call of - * produceData. - * @param lower Lower bound for the random values to be generated. - * @param upper Upper bound for the random values to be generated. - * @return An instance of RandomDatasetFiller matching the given parameters. - */ - template< typename X = Distr > - static RandomDatasetFiller< - X, - T - > makeRandomDatasetFiller( - Extent::value_type numberOfItems, - typename X::result_type lower, - typename X::result_type upper - ) - { - return RandomDatasetFiller< X >( - X( - lower, - upper - ), - numberOfItems - ); - } +public: + using resultType = T; + explicit RandomDatasetFiller( + Distr distribution, Extent::value_type numOfItems = 0) + : DatasetFiller(numOfItems), distr(distribution) + {} - void setSeed( std::default_random_engine::result_type seed ) + std::shared_ptr produceData() override + { + if (this->buffered) { - this->engine = std::default_random_engine( seed ); + return buffered; } - - - void randomSeed( ) + auto res = std::shared_ptr{ + new T[this->m_numberOfItems], [](T *d) { delete[] d; }}; + auto ptr = res.get(); + for (typename Extent::value_type i = 0; i < this->m_numberOfItems; i++) { - std::random_device rd; - this->engine = std::default_random_engine( rd( ) ); + ptr[i] = this->distr(this->engine); } + return res; + } + + /** + * + * @tparam X Dummy template parameter such that the RandomDatasetFiller is + * usable also when this function's implementation does not work on the + * distribution's concrete type. + * @param numberOfItems Number of items to be produced per call of + * produceData. + * @param lower Lower bound for the random values to be generated. + * @param upper Upper bound for the random values to be generated. + * @return An instance of RandomDatasetFiller matching the given parameters. + */ + template + static RandomDatasetFiller makeRandomDatasetFiller( + Extent::value_type numberOfItems, + typename X::result_type lower, + typename X::result_type upper) + { + return RandomDatasetFiller(X(lower, upper), numberOfItems); + } + void setSeed(std::default_random_engine::result_type seed) + { + this->engine = std::default_random_engine(seed); + } - /** - * Activate buffer mode. Create a bunch of data to write (instantly) - * and return that upon calling (). - */ - void bufferMode( ) + void randomSeed() + { + std::random_device rd; + this->engine = std::default_random_engine(rd()); + } + + /** + * Activate buffer mode. Create a bunch of data to write (instantly) + * and return that upon calling (). + */ + void bufferMode() + { + if (!this->buffered) { - if( !this->buffered ) - { - this->buffered = this->produceData( ); - } + this->buffered = this->produceData(); } + } - - void setNumberOfItems( Extent::value_type numItems ) override + void setNumberOfItems(Extent::value_type numItems) override + { + this->m_numberOfItems = numItems; + if (this->buffered) { - this->m_numberOfItems = numItems; - if( this->buffered ) - { - this->buffered - .reset( ); - this->buffered = this->produceData( ); - } + this->buffered.reset(); + this->buffered = this->produceData(); } + } +}; - }; - -} +} // namespace openPMD diff --git a/include/openPMD/binding/python/Numpy.hpp b/include/openPMD/binding/python/Numpy.hpp index 291aa97a66..f0fc7c1b84 100644 --- a/include/openPMD/binding/python/Numpy.hpp +++ b/include/openPMD/binding/python/Numpy.hpp @@ -22,196 +22,197 @@ #include "openPMD/Datatype.hpp" +#include #include #include -#include -#include #include - +#include namespace openPMD { - inline Datatype - dtype_from_numpy( pybind11::dtype const dt ) +inline Datatype dtype_from_numpy(pybind11::dtype const dt) +{ + // ref: https://docs.scipy.org/doc/numpy/user/basics.types.html + // ref: https://github.com/numpy/numpy/issues/10678#issuecomment-369363551 + if (dt.is(pybind11::dtype("b"))) + return Datatype::CHAR; + else if (dt.is(pybind11::dtype("B"))) + return Datatype::UCHAR; + else if (dt.is(pybind11::dtype("short"))) + return Datatype::SHORT; + else if (dt.is(pybind11::dtype("intc"))) + return Datatype::INT; + else if (dt.is(pybind11::dtype("int_"))) + return Datatype::LONG; + else if (dt.is(pybind11::dtype("longlong"))) + return Datatype::LONGLONG; + else if (dt.is(pybind11::dtype("ushort"))) + return Datatype::USHORT; + else if (dt.is(pybind11::dtype("uintc"))) + return Datatype::UINT; + else if (dt.is(pybind11::dtype("uint"))) + return Datatype::ULONG; + else if (dt.is(pybind11::dtype("ulonglong"))) + return Datatype::ULONGLONG; + else if (dt.is(pybind11::dtype("clongdouble"))) + return Datatype::CLONG_DOUBLE; + else if (dt.is(pybind11::dtype("cdouble"))) + return Datatype::CDOUBLE; + else if (dt.is(pybind11::dtype("csingle"))) + return Datatype::CFLOAT; + else if (dt.is(pybind11::dtype("longdouble"))) + return Datatype::LONG_DOUBLE; + else if (dt.is(pybind11::dtype("double"))) + return Datatype::DOUBLE; + else if (dt.is(pybind11::dtype("single"))) + return Datatype::FLOAT; + else if (dt.is(pybind11::dtype("bool"))) + return Datatype::BOOL; + else { - // ref: https://docs.scipy.org/doc/numpy/user/basics.types.html - // ref: https://github.com/numpy/numpy/issues/10678#issuecomment-369363551 - if( dt.is(pybind11::dtype("b")) ) - return Datatype::CHAR; - else if( dt.is(pybind11::dtype("B")) ) - return Datatype::UCHAR; - else if( dt.is(pybind11::dtype("short")) ) - return Datatype::SHORT; - else if( dt.is(pybind11::dtype("intc")) ) - return Datatype::INT; - else if( dt.is(pybind11::dtype("int_")) ) - return Datatype::LONG; - else if( dt.is(pybind11::dtype("longlong")) ) - return Datatype::LONGLONG; - else if( dt.is(pybind11::dtype("ushort")) ) - return Datatype::USHORT; - else if( dt.is(pybind11::dtype("uintc")) ) - return Datatype::UINT; - else if( dt.is(pybind11::dtype("uint")) ) - return Datatype::ULONG; - else if( dt.is(pybind11::dtype("ulonglong")) ) - return Datatype::ULONGLONG; - else if( dt.is(pybind11::dtype("clongdouble")) ) - return Datatype::CLONG_DOUBLE; - else if( dt.is(pybind11::dtype("cdouble")) ) - return Datatype::CDOUBLE; - else if( dt.is(pybind11::dtype("csingle")) ) - return Datatype::CFLOAT; - else if( dt.is(pybind11::dtype("longdouble")) ) - return Datatype::LONG_DOUBLE; - else if( dt.is(pybind11::dtype("double")) ) - return Datatype::DOUBLE; - else if( dt.is(pybind11::dtype("single")) ) - return Datatype::FLOAT; - else if( dt.is(pybind11::dtype("bool")) ) - return Datatype::BOOL; - else { - pybind11::print(dt); - throw std::runtime_error("Datatype '...' not known in 'dtype_from_numpy'!"); // _s.format(dt) - } + pybind11::print(dt); + throw std::runtime_error( + "Datatype '...' not known in 'dtype_from_numpy'!"); // _s.format(dt) } +} - /** Return openPMD::Datatype from py::buffer_info::format - */ - inline Datatype - dtype_from_bufferformat( std::string const & fmt ) - { - using DT = Datatype; +/** Return openPMD::Datatype from py::buffer_info::format + */ +inline Datatype dtype_from_bufferformat(std::string const &fmt) +{ + using DT = Datatype; - // refs: - // https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.interface.html - // https://docs.python.org/3/library/struct.html#format-characters - // std::cout << " scalar type '" << fmt << "'" << std::endl; - // typestring: encoding + type + number of bytes - if( fmt.find("?") != std::string::npos ) - return DT::BOOL; - else if( fmt.find("b") != std::string::npos ) - return DT::CHAR; - else if( fmt.find("h") != std::string::npos ) - return DT::SHORT; - else if( fmt.find("i") != std::string::npos ) - return DT::INT; - else if( fmt.find("l") != std::string::npos ) - return DT::LONG; - else if( fmt.find("q") != std::string::npos ) - return DT::LONGLONG; - else if( fmt.find("B") != std::string::npos ) - return DT::UCHAR; - else if( fmt.find("H") != std::string::npos ) - return DT::USHORT; - else if( fmt.find("I") != std::string::npos ) - return DT::UINT; - else if( fmt.find("L") != std::string::npos ) - return DT::ULONG; - else if( fmt.find("Q") != std::string::npos ) - return DT::ULONGLONG; - else if( fmt.find("Zf") != std::string::npos ) - return DT::CFLOAT; - else if( fmt.find("Zd") != std::string::npos ) - return DT::CDOUBLE; - else if( fmt.find("Zg") != std::string::npos ) - return DT::CLONG_DOUBLE; - else if( fmt.find("f") != std::string::npos ) - return DT::FLOAT; - else if( fmt.find("d") != std::string::npos ) - return DT::DOUBLE; - else if( fmt.find("g") != std::string::npos ) - return DT::LONG_DOUBLE; - else - throw std::runtime_error("dtype_from_bufferformat: Unknown " - "Python type '" + fmt + "'"); - } + // refs: + // https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.interface.html + // https://docs.python.org/3/library/struct.html#format-characters + // std::cout << " scalar type '" << fmt << "'" << std::endl; + // typestring: encoding + type + number of bytes + if (fmt.find("?") != std::string::npos) + return DT::BOOL; + else if (fmt.find("b") != std::string::npos) + return DT::CHAR; + else if (fmt.find("h") != std::string::npos) + return DT::SHORT; + else if (fmt.find("i") != std::string::npos) + return DT::INT; + else if (fmt.find("l") != std::string::npos) + return DT::LONG; + else if (fmt.find("q") != std::string::npos) + return DT::LONGLONG; + else if (fmt.find("B") != std::string::npos) + return DT::UCHAR; + else if (fmt.find("H") != std::string::npos) + return DT::USHORT; + else if (fmt.find("I") != std::string::npos) + return DT::UINT; + else if (fmt.find("L") != std::string::npos) + return DT::ULONG; + else if (fmt.find("Q") != std::string::npos) + return DT::ULONGLONG; + else if (fmt.find("Zf") != std::string::npos) + return DT::CFLOAT; + else if (fmt.find("Zd") != std::string::npos) + return DT::CDOUBLE; + else if (fmt.find("Zg") != std::string::npos) + return DT::CLONG_DOUBLE; + else if (fmt.find("f") != std::string::npos) + return DT::FLOAT; + else if (fmt.find("d") != std::string::npos) + return DT::DOUBLE; + else if (fmt.find("g") != std::string::npos) + return DT::LONG_DOUBLE; + else + throw std::runtime_error( + "dtype_from_bufferformat: Unknown " + "Python type '" + + fmt + "'"); +} - inline pybind11::dtype - dtype_to_numpy( Datatype const dt ) +inline pybind11::dtype dtype_to_numpy(Datatype const dt) +{ + using DT = Datatype; + switch (dt) { - using DT = Datatype; - switch( dt ) - { - case DT::CHAR: - case DT::VEC_CHAR: - case DT::STRING: - case DT::VEC_STRING: - return pybind11::dtype("b"); - break; - case DT::UCHAR: - case DT::VEC_UCHAR: - return pybind11::dtype("B"); - break; - // case DT::SCHAR: - // case DT::VEC_SCHAR: - // pybind11::dtype("b"); - // break; - case DT::SHORT: - case DT::VEC_SHORT: - return pybind11::dtype("short"); - break; - case DT::INT: - case DT::VEC_INT: - return pybind11::dtype("intc"); - break; - case DT::LONG: - case DT::VEC_LONG: - return pybind11::dtype("int_"); - break; - case DT::LONGLONG: - case DT::VEC_LONGLONG: - return pybind11::dtype("longlong"); - break; - case DT::USHORT: - case DT::VEC_USHORT: - return pybind11::dtype("ushort"); - break; - case DT::UINT: - case DT::VEC_UINT: - return pybind11::dtype("uintc"); - break; - case DT::ULONG: - case DT::VEC_ULONG: - return pybind11::dtype("uint"); - break; - case DT::ULONGLONG: - case DT::VEC_ULONGLONG: - return pybind11::dtype("ulonglong"); - break; - case DT::FLOAT: - case DT::VEC_FLOAT: - return pybind11::dtype("single"); - break; - case DT::DOUBLE: - case DT::VEC_DOUBLE: - case DT::ARR_DBL_7: - return pybind11::dtype("double"); - break; - case DT::LONG_DOUBLE: - case DT::VEC_LONG_DOUBLE: - return pybind11::dtype("longdouble"); - break; - case DT::CFLOAT: - case DT::VEC_CFLOAT: - return pybind11::dtype("csingle"); - break; - case DT::CDOUBLE: - case DT::VEC_CDOUBLE: - return pybind11::dtype("cdouble"); - break; - case DT::CLONG_DOUBLE: - case DT::VEC_CLONG_DOUBLE: - return pybind11::dtype("clongdouble"); - break; - case DT::BOOL: - return pybind11::dtype("bool"); // also "?" - break; - case DT::UNDEFINED: - default: - throw std::runtime_error("dtype_to_numpy: Invalid Datatype '{...}'!"); // _s.format(dt) - break; - } + case DT::CHAR: + case DT::VEC_CHAR: + case DT::STRING: + case DT::VEC_STRING: + return pybind11::dtype("b"); + break; + case DT::UCHAR: + case DT::VEC_UCHAR: + return pybind11::dtype("B"); + break; + // case DT::SCHAR: + // case DT::VEC_SCHAR: + // pybind11::dtype("b"); + // break; + case DT::SHORT: + case DT::VEC_SHORT: + return pybind11::dtype("short"); + break; + case DT::INT: + case DT::VEC_INT: + return pybind11::dtype("intc"); + break; + case DT::LONG: + case DT::VEC_LONG: + return pybind11::dtype("int_"); + break; + case DT::LONGLONG: + case DT::VEC_LONGLONG: + return pybind11::dtype("longlong"); + break; + case DT::USHORT: + case DT::VEC_USHORT: + return pybind11::dtype("ushort"); + break; + case DT::UINT: + case DT::VEC_UINT: + return pybind11::dtype("uintc"); + break; + case DT::ULONG: + case DT::VEC_ULONG: + return pybind11::dtype("uint"); + break; + case DT::ULONGLONG: + case DT::VEC_ULONGLONG: + return pybind11::dtype("ulonglong"); + break; + case DT::FLOAT: + case DT::VEC_FLOAT: + return pybind11::dtype("single"); + break; + case DT::DOUBLE: + case DT::VEC_DOUBLE: + case DT::ARR_DBL_7: + return pybind11::dtype("double"); + break; + case DT::LONG_DOUBLE: + case DT::VEC_LONG_DOUBLE: + return pybind11::dtype("longdouble"); + break; + case DT::CFLOAT: + case DT::VEC_CFLOAT: + return pybind11::dtype("csingle"); + break; + case DT::CDOUBLE: + case DT::VEC_CDOUBLE: + return pybind11::dtype("cdouble"); + break; + case DT::CLONG_DOUBLE: + case DT::VEC_CLONG_DOUBLE: + return pybind11::dtype("clongdouble"); + break; + case DT::BOOL: + return pybind11::dtype("bool"); // also "?" + break; + case DT::UNDEFINED: + default: + throw std::runtime_error( + "dtype_to_numpy: Invalid Datatype '{...}'!"); // _s.format(dt) + break; } +} } // namespace openPMD diff --git a/include/openPMD/binding/python/Pickle.hpp b/include/openPMD/binding/python/Pickle.hpp index d455ded75c..b3a07252ba 100644 --- a/include/openPMD/binding/python/Pickle.hpp +++ b/include/openPMD/binding/python/Pickle.hpp @@ -20,9 +20,9 @@ */ #pragma once -#include "openPMD/backend/Attributable.hpp" #include "openPMD/IO/Access.hpp" #include "openPMD/Series.hpp" +#include "openPMD/backend/Attributable.hpp" #include #include @@ -32,60 +32,50 @@ #include #include - namespace openPMD { - /** Helper to Pickle Attributable Classes - * - * @tparam T_Args the types in pybind11::class_ - the first type will be pickled - * @tparam T_SeriesAccessor During unpickle, this accesses the object inside - * a newly constructed series - * @param cl the pybind11 class that gets the pickle methods defined - * @param seriesAccessor accessor from series to object during unpickling - */ - template< typename... T_Args, typename T_SeriesAccessor > - inline void - add_pickle( - pybind11::class_< T_Args... > & cl, - T_SeriesAccessor && seriesAccessor - ) - { - namespace py = pybind11; +/** Helper to Pickle Attributable Classes + * + * @tparam T_Args the types in pybind11::class_ - the first type will be pickled + * @tparam T_SeriesAccessor During unpickle, this accesses the object inside + * a newly constructed series + * @param cl the pybind11 class that gets the pickle methods defined + * @param seriesAccessor accessor from series to object during unpickling + */ +template +inline void +add_pickle(pybind11::class_ &cl, T_SeriesAccessor &&seriesAccessor) +{ + namespace py = pybind11; - // helper: get first class in py::class_ - that's the type we pickle - using PickledClass = typename std::tuple_element< - 0, - std::tuple< T_Args... > - >::type; + // helper: get first class in py::class_ - that's the type we pickle + using PickledClass = + typename std::tuple_element<0, std::tuple>::type; - cl.def(py::pickle( - // __getstate__ - []( const PickledClass &a ) { - // Return a tuple that fully encodes the state of the object - Attributable::MyPath const myPath = a.myPath(); - return py::make_tuple( myPath.filePath(), myPath.group ); - }, + cl.def(py::pickle( + // __getstate__ + [](const PickledClass &a) { + // Return a tuple that fully encodes the state of the object + Attributable::MyPath const myPath = a.myPath(); + return py::make_tuple(myPath.filePath(), myPath.group); + }, - // __setstate__ - [&seriesAccessor]( py::tuple t ) { - // our tuple has exactly two elements: filePath & group - if (t.size() != 2) - throw std::runtime_error("Invalid state!"); + // __setstate__ + [&seriesAccessor](py::tuple t) { + // our tuple has exactly two elements: filePath & group + if (t.size() != 2) + throw std::runtime_error("Invalid state!"); - std::string const filename = t[0].cast< std::string >(); - std::vector< std::string > const group = - t[1].cast< std::vector< std::string > >(); + std::string const filename = t[0].cast(); + std::vector const group = + t[1].cast>(); - // Create a new openPMD Series and keep it alive. - // This is a big hack for now, but it works for our use - // case, which is spinning up remote serial read series - // for DASK. - static auto series = openPMD::Series( - filename, - Access::READ_ONLY - ); - return seriesAccessor( series, group ); - } - )); - } + // Create a new openPMD Series and keep it alive. + // This is a big hack for now, but it works for our use + // case, which is spinning up remote serial read series + // for DASK. + static auto series = openPMD::Series(filename, Access::READ_ONLY); + return seriesAccessor(series, group); + })); +} } // namespace openPMD diff --git a/include/openPMD/cli/ls.hpp b/include/openPMD/cli/ls.hpp index cefe36682d..1d2313e250 100644 --- a/include/openPMD/cli/ls.hpp +++ b/include/openPMD/cli/ls.hpp @@ -28,90 +28,103 @@ #include #include - namespace openPMD { namespace cli { -namespace ls -{ - inline void - print_help( std::string const program_name ) - { - std::cout << "Usage: " << program_name << " openPMD-series\n"; - std::cout << "List information about an openPMD data series.\n\n"; - std::cout << "Options:\n"; - std::cout << " -h, --help display this help and exit\n"; - std::cout << " -v, --version output version information and exit\n"; - std::cout << "\n"; - std::cout << "Examples:\n"; - std::cout << " " << program_name << " ./samples/git-sample/data%T.h5\n"; - std::cout << " " << program_name << " ./samples/git-sample/data%08T.h5\n"; - std::cout << " " << program_name << " ./samples/serial_write.json\n"; - std::cout << " " << program_name << " ./samples/serial_patch.bp\n"; - } - - inline void - print_version( std::string const program_name ) - { - std::cout << program_name << " (openPMD-api) " - << getVersion() << "\n"; - std::cout << "Copyright 2017-2021 openPMD contributors\n"; - std::cout << "Authors: Axel Huebl et al.\n"; - std::cout << "License: LGPLv3+\n"; - std::cout << "This is free software: you are free to change and redistribute it.\n" - "There is NO WARRANTY, to the extent permitted by law.\n"; - } - - /** Run the openpmd-ls command line tool - * - * @param argv command line arguments 1-N - * @return exit code (zero for success) - */ - inline int - run( std::vector< std::string > const & argv ) + namespace ls { - using namespace openPMD; - auto const argc = argv.size(); + inline void print_help(std::string const program_name) + { + std::cout << "Usage: " << program_name << " openPMD-series\n"; + std::cout << "List information about an openPMD data series.\n\n"; + std::cout << "Options:\n"; + std::cout << " -h, --help display this help and exit\n"; + std::cout + << " -v, --version output version information and exit\n"; + std::cout << "\n"; + std::cout << "Examples:\n"; + std::cout << " " << program_name + << " ./samples/git-sample/data%T.h5\n"; + std::cout << " " << program_name + << " ./samples/git-sample/data%08T.h5\n"; + std::cout << " " << program_name + << " ./samples/serial_write.json\n"; + std::cout << " " << program_name + << " ./samples/serial_patch.bp\n"; + } - if (argc < 2) { - print_help(argv[0]); - return 0; + inline void print_version(std::string const program_name) + { + std::cout << program_name << " (openPMD-api) " << getVersion() + << "\n"; + std::cout << "Copyright 2017-2021 openPMD contributors\n"; + std::cout << "Authors: Axel Huebl et al.\n"; + std::cout << "License: LGPLv3+\n"; + std::cout + << "This is free software: you are free to change and " + "redistribute it.\n" + "There is NO WARRANTY, to the extent permitted by law.\n"; } - for (int c = 1; c < int(argc); c++) { - if (std::string("--help") == argv[c] || std::string("-h") == argv[c]) { + /** Run the openpmd-ls command line tool + * + * @param argv command line arguments 1-N + * @return exit code (zero for success) + */ + inline int run(std::vector const &argv) + { + using namespace openPMD; + auto const argc = argv.size(); + + if (argc < 2) + { print_help(argv[0]); return 0; } - if (std::string("--version") == argv[c] || std::string("-v") == argv[c]) { - print_version(argv[0]); - return 0; + + for (int c = 1; c < int(argc); c++) + { + if (std::string("--help") == argv[c] || + std::string("-h") == argv[c]) + { + print_help(argv[0]); + return 0; + } + if (std::string("--version") == argv[c] || + std::string("-v") == argv[c]) + { + print_version(argv[0]); + return 0; + } } - } - if (argc > 2) { - std::cerr << "Too many arguments! See: " << argv[0] << " --help\n"; - return 1; - } + if (argc > 2) + { + std::cerr << "Too many arguments! See: " << argv[0] + << " --help\n"; + return 1; + } - try { - auto s = Series( - argv[1], - Access::READ_ONLY, - R"({"defer_iteration_parsing": true})" - ); + try + { + auto s = Series( + argv[1], + Access::READ_ONLY, + R"({"defer_iteration_parsing": true})"); - helper::listSeries(s, true, std::cout); - } - catch (std::exception const &e) { - std::cerr << "An error occurred while opening the specified openPMD series!\n"; - std::cerr << e.what() << std::endl; - return 2; - } + helper::listSeries(s, true, std::cout); + } + catch (std::exception const &e) + { + std::cerr << "An error occurred while opening the specified " + "openPMD series!\n"; + std::cerr << e.what() << std::endl; + return 2; + } - return 0; - } -} // namespace ls + return 0; + } + } // namespace ls } // namespace cli } // namespace openPMD diff --git a/include/openPMD/config.hpp.in b/include/openPMD/config.hpp.in index 63be511355..fd4e461869 100644 --- a/include/openPMD/config.hpp.in +++ b/include/openPMD/config.hpp.in @@ -21,23 +21,23 @@ #pragma once #ifndef openPMD_HAS_CXX17 -# cmakedefine01 openPMD_HAS_CXX17 +#cmakedefine01 openPMD_HAS_CXX17 #endif #ifndef openPMD_HAVE_MPI -# cmakedefine01 openPMD_HAVE_MPI +#cmakedefine01 openPMD_HAVE_MPI #endif #define openPMD_HAVE_JSON 1 #ifndef openPMD_HAVE_HDF5 -# cmakedefine01 openPMD_HAVE_HDF5 +#cmakedefine01 openPMD_HAVE_HDF5 #endif #ifndef openPMD_HAVE_ADIOS1 -# cmakedefine01 openPMD_HAVE_ADIOS1 +#cmakedefine01 openPMD_HAVE_ADIOS1 #endif #ifndef openPMD_HAVE_ADIOS2 -# cmakedefine01 openPMD_HAVE_ADIOS2 +#cmakedefine01 openPMD_HAVE_ADIOS2 #endif diff --git a/include/openPMD/helper/list_series.hpp b/include/openPMD/helper/list_series.hpp index 1aba8a7b37..de0d5aca40 100644 --- a/include/openPMD/helper/list_series.hpp +++ b/include/openPMD/helper/list_series.hpp @@ -22,9 +22,8 @@ #include "openPMD/Series.hpp" -#include #include - +#include namespace openPMD { @@ -35,13 +34,12 @@ namespace helper * @param series a openPMD data path as in Series::Series * @param longer write more information * @param out an output stream to write textual information to - * @return reference to out as output stream, e.g. to pass the stream on via `operator<<` + * @return reference to out as output stream, e.g. to pass the stream on via + * `operator<<` */ - std::ostream & - listSeries( - Series & series, + std::ostream &listSeries( + Series &series, bool const longer = false, - std::ostream & out = std::cout - ); -} // helper -} // openPMD + std::ostream &out = std::cout); +} // namespace helper +} // namespace openPMD diff --git a/include/openPMD/openPMD.hpp b/include/openPMD/openPMD.hpp index 82403491e1..08853b4d1e 100644 --- a/include/openPMD/openPMD.hpp +++ b/include/openPMD/openPMD.hpp @@ -22,28 +22,29 @@ /** Public definitions of openPMD-api */ -namespace openPMD {} +namespace openPMD +{} // IWYU pragma: begin_exports #include "openPMD/Dataset.hpp" #include "openPMD/Datatype.hpp" #include "openPMD/Error.hpp" -#include "openPMD/IterationEncoding.hpp" #include "openPMD/Iteration.hpp" +#include "openPMD/IterationEncoding.hpp" #include "openPMD/Mesh.hpp" #include "openPMD/ParticlePatches.hpp" #include "openPMD/ParticleSpecies.hpp" #include "openPMD/ReadIterations.hpp" -#include "openPMD/RecordComponent.hpp" #include "openPMD/Record.hpp" +#include "openPMD/RecordComponent.hpp" #include "openPMD/Series.hpp" #include "openPMD/UnitDimension.hpp" #include "openPMD/WriteIterations.hpp" #include "openPMD/backend/Attributable.hpp" #include "openPMD/backend/Attribute.hpp" -#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/backend/BaseRecord.hpp" +#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/backend/Container.hpp" #include "openPMD/backend/MeshRecordComponent.hpp" #include "openPMD/backend/PatchRecord.hpp" diff --git a/include/openPMD/version.hpp b/include/openPMD/version.hpp index 8398e22254..031c5085e7 100644 --- a/include/openPMD/version.hpp +++ b/include/openPMD/version.hpp @@ -33,7 +33,8 @@ #define OPENPMDAPI_VERSION_LABEL "dev" /** @} */ -/** maximum supported version of the openPMD standard (read & write, compile-time) +/** maximum supported version of the openPMD standard (read & write, + * compile-time) * @{ */ #define OPENPMD_STANDARD_MAJOR 1 @@ -51,49 +52,51 @@ /** convert major, minor, patch version into a 1000th-interleaved number */ -#define OPENPMDAPI_VERSIONIFY(major,minor,patch) (major * 1000000 + minor * 1000 + patch) +#define OPENPMDAPI_VERSIONIFY(major, minor, patch) \ + (major * 1000000 + minor * 1000 + patch) /** Compare if the library version is greater or equal than major,minor,patch */ -#define OPENPMDAPI_VERSION_GE(major,minor,patch) \ - (OPENPMDAPI_VERSIONIFY(OPENPMDAPI_VERSION_MAJOR,OPENPMDAPI_VERSION_MINOR,OPENPMDAPI_VERSION_PATCH) >= \ - OPENPMDAPI_VERSIONIFY(major,minor,patch)) +#define OPENPMDAPI_VERSION_GE(major, minor, patch) \ + (OPENPMDAPI_VERSIONIFY( \ + OPENPMDAPI_VERSION_MAJOR, \ + OPENPMDAPI_VERSION_MINOR, \ + OPENPMDAPI_VERSION_PATCH) >= \ + OPENPMDAPI_VERSIONIFY(major, minor, patch)) namespace openPMD { - /** Return the version of the openPMD-api library (run-time) - * - * @return std::string API version (dot separated) - */ - std::string - getVersion( ); +/** Return the version of the openPMD-api library (run-time) + * + * @return std::string API version (dot separated) + */ +std::string getVersion(); - /** Return the maximum supported version of the openPMD standard (read & write, run-time) - * - * @return std::string openPMD standard version (dot separated) - */ - std::string - getStandard( ); +/** Return the maximum supported version of the openPMD standard (read & write, + * run-time) + * + * @return std::string openPMD standard version (dot separated) + */ +std::string getStandard(); - /** Return the minimum supported version of the openPMD standard (read, run-time) - * - * @return std::string minimum openPMD standard version (dot separated) - */ - std::string - getStandardMinimum( ); +/** Return the minimum supported version of the openPMD standard (read, + * run-time) + * + * @return std::string minimum openPMD standard version (dot separated) + */ +std::string getStandardMinimum(); - /** Return the feature variants of the openPMD-api library (run-time) - * - * @return std::map< std::string, bool > with variants such as backends - */ - std::map< std::string, bool > - getVariants( ); +/** Return the feature variants of the openPMD-api library (run-time) + * + * @return std::map< std::string, bool > with variants such as backends + */ +std::map getVariants(); - /** Return the file extensions supported in this variant of the openPMD-api library (run-time) - * - * @return std::vector< std::string > with file extensions - */ - std::vector< std::string > - getFileExtensions( ); +/** Return the file extensions supported in this variant of the openPMD-api + * library (run-time) + * + * @return std::vector< std::string > with file extensions + */ +std::vector getFileExtensions(); } // namespace openPMD diff --git a/src/ChunkInfo.cpp b/src/ChunkInfo.cpp index 56299f02e1..3c01b7b681 100644 --- a/src/ChunkInfo.cpp +++ b/src/ChunkInfo.cpp @@ -22,38 +22,30 @@ #include - namespace openPMD { -ChunkInfo::ChunkInfo( Offset offset_in, Extent extent_in ) - : offset( std::move( offset_in ) ), extent( std::move( extent_in ) ) -{ -} +ChunkInfo::ChunkInfo(Offset offset_in, Extent extent_in) + : offset(std::move(offset_in)), extent(std::move(extent_in)) +{} -bool -ChunkInfo::operator==( ChunkInfo const & other ) const +bool ChunkInfo::operator==(ChunkInfo const &other) const { return this->offset == other.offset && this->extent == other.extent; } WrittenChunkInfo::WrittenChunkInfo( - Offset offset_in, - Extent extent_in, - int sourceID_in ) - : ChunkInfo( std::move( offset_in ), std::move( extent_in ) ) - , sourceID( sourceID_in < 0 ? 0 : sourceID_in ) -{ -} + Offset offset_in, Extent extent_in, int sourceID_in) + : ChunkInfo(std::move(offset_in), std::move(extent_in)) + , sourceID(sourceID_in < 0 ? 0 : sourceID_in) +{} -WrittenChunkInfo::WrittenChunkInfo( Offset offset_in, Extent extent_in ) - : WrittenChunkInfo( std::move( offset_in ), std::move( extent_in ), 0 ) -{ -} +WrittenChunkInfo::WrittenChunkInfo(Offset offset_in, Extent extent_in) + : WrittenChunkInfo(std::move(offset_in), std::move(extent_in), 0) +{} -bool -WrittenChunkInfo::operator==( WrittenChunkInfo const & other ) const +bool WrittenChunkInfo::operator==(WrittenChunkInfo const &other) const { return this->sourceID == other.sourceID && - this->ChunkInfo::operator==( other ); + this->ChunkInfo::operator==(other); } } // namespace openPMD diff --git a/src/Dataset.cpp b/src/Dataset.cpp index bb89f76423..587598db63 100644 --- a/src/Dataset.cpp +++ b/src/Dataset.cpp @@ -20,33 +20,33 @@ */ #include "openPMD/Dataset.hpp" -#include #include - +#include namespace openPMD { Dataset::Dataset(Datatype d, Extent e, std::string options_in) - : extent{e}, - dtype{d}, - rank{static_cast(e.size())}, - options{std::move(options_in)} -{ } + : extent{e} + , dtype{d} + , rank{static_cast(e.size())} + , options{std::move(options_in)} +{} -Dataset::Dataset( Extent e ) : Dataset( Datatype::UNDEFINED, std::move( e ) ) -{ -} +Dataset::Dataset(Extent e) : Dataset(Datatype::UNDEFINED, std::move(e)) +{} -Dataset & -Dataset::extend( Extent newExtents ) +Dataset &Dataset::extend(Extent newExtents) { - if( newExtents.size() != rank ) - throw std::runtime_error("Dimensionality of extended Dataset must match the original dimensionality"); - for( size_t i = 0; i < newExtents.size(); ++i ) - if( newExtents[i] < extent[i] ) - throw std::runtime_error("New Extent must be equal or greater than previous Extent"); + if (newExtents.size() != rank) + throw std::runtime_error( + "Dimensionality of extended Dataset must match the original " + "dimensionality"); + for (size_t i = 0; i < newExtents.size(); ++i) + if (newExtents[i] < extent[i]) + throw std::runtime_error( + "New Extent must be equal or greater than previous Extent"); extent = newExtents; return *this; } -} // openPMD +} // namespace openPMD diff --git a/src/Datatype.cpp b/src/Datatype.cpp index 34a40ba15a..683cfbb06f 100644 --- a/src/Datatype.cpp +++ b/src/Datatype.cpp @@ -21,407 +21,262 @@ #include "openPMD/Datatype.hpp" #include "openPMD/DatatypeHelpers.hpp" -#include #include -#include #include - +#include +#include namespace openPMD { -void warnWrongDtype(std::string const& key, - Datatype store, - Datatype request) +void warnWrongDtype(std::string const &key, Datatype store, Datatype request) { - std::cerr << "Warning: Attribute '" << key - << "' stored as " << store + std::cerr << "Warning: Attribute '" << key << "' stored as " << store << ", requested as " << request << ". Casting unconditionally with possible loss of precision.\n"; } -std::ostream& -operator<<(std::ostream& os, openPMD::Datatype const & d) +std::ostream &operator<<(std::ostream &os, openPMD::Datatype const &d) { using DT = openPMD::Datatype; - switch( d ) + switch (d) { - case DT::CHAR: - os << "CHAR"; - break; - case DT::UCHAR: - os << "UCHAR"; - break; - case DT::SHORT: - os << "SHORT"; - break; - case DT::INT: - os << "INT"; - break; - case DT::LONG: - os << "LONG"; - break; - case DT::LONGLONG: - os << "LONGLONG"; - break; - case DT::USHORT: - os << "USHORT"; - break; - case DT::UINT: - os << "UINT"; - break; - case DT::ULONG: - os << "ULONG"; - break; - case DT::ULONGLONG: - os << "ULONGLONG"; - break; - case DT::FLOAT: - os << "FLOAT"; - break; - case DT::DOUBLE: - os << "DOUBLE"; - break; - case DT::LONG_DOUBLE: - os << "LONG_DOUBLE"; - break; - case DT::CFLOAT: - os << "CFLOAT"; - break; - case DT::CDOUBLE: - os << "CDOUBLE"; - break; - case DT::CLONG_DOUBLE: - os << "CLONG_DOUBLE"; - break; - case DT::STRING: - os << "STRING"; - break; - case DT::VEC_CHAR: - os << "VEC_CHAR"; - break; - case DT::VEC_SHORT: - os << "VEC_SHORT"; - break; - case DT::VEC_INT: - os << "VEC_INT"; - break; - case DT::VEC_LONG: - os << "VEC_LONG"; - break; - case DT::VEC_LONGLONG: - os << "VEC_LONGLONG"; - break; - case DT::VEC_UCHAR: - os << "VEC_UCHAR"; - break; - case DT::VEC_USHORT: - os << "VEC_USHORT"; - break; - case DT::VEC_UINT: - os << "VEC_UINT"; - break; - case DT::VEC_ULONG: - os << "VEC_ULONG"; - break; - case DT::VEC_ULONGLONG: - os << "VEC_ULONGLONG"; - break; - case DT::VEC_FLOAT: - os << "VEC_FLOAT"; - break; - case DT::VEC_DOUBLE: - os << "VEC_DOUBLE"; - break; - case DT::VEC_LONG_DOUBLE: - os << "VEC_LONG_DOUBLE"; - break; - case DT::VEC_CFLOAT: - os << "VEC_CFLOAT"; - break; - case DT::VEC_CDOUBLE: - os << "VEC_CDOUBLE"; - break; - case DT::VEC_CLONG_DOUBLE: - os << "VEC_CLONG_DOUBLE"; - break; - case DT::VEC_STRING: - os << "VEC_STRING"; - break; - case DT::ARR_DBL_7: - os << "ARR_DBL_7"; - break; - case DT::BOOL: - os << "BOOL"; - break; - case DT::UNDEFINED: - os << "UNDEFINED"; - break; + case DT::CHAR: + os << "CHAR"; + break; + case DT::UCHAR: + os << "UCHAR"; + break; + case DT::SHORT: + os << "SHORT"; + break; + case DT::INT: + os << "INT"; + break; + case DT::LONG: + os << "LONG"; + break; + case DT::LONGLONG: + os << "LONGLONG"; + break; + case DT::USHORT: + os << "USHORT"; + break; + case DT::UINT: + os << "UINT"; + break; + case DT::ULONG: + os << "ULONG"; + break; + case DT::ULONGLONG: + os << "ULONGLONG"; + break; + case DT::FLOAT: + os << "FLOAT"; + break; + case DT::DOUBLE: + os << "DOUBLE"; + break; + case DT::LONG_DOUBLE: + os << "LONG_DOUBLE"; + break; + case DT::CFLOAT: + os << "CFLOAT"; + break; + case DT::CDOUBLE: + os << "CDOUBLE"; + break; + case DT::CLONG_DOUBLE: + os << "CLONG_DOUBLE"; + break; + case DT::STRING: + os << "STRING"; + break; + case DT::VEC_CHAR: + os << "VEC_CHAR"; + break; + case DT::VEC_SHORT: + os << "VEC_SHORT"; + break; + case DT::VEC_INT: + os << "VEC_INT"; + break; + case DT::VEC_LONG: + os << "VEC_LONG"; + break; + case DT::VEC_LONGLONG: + os << "VEC_LONGLONG"; + break; + case DT::VEC_UCHAR: + os << "VEC_UCHAR"; + break; + case DT::VEC_USHORT: + os << "VEC_USHORT"; + break; + case DT::VEC_UINT: + os << "VEC_UINT"; + break; + case DT::VEC_ULONG: + os << "VEC_ULONG"; + break; + case DT::VEC_ULONGLONG: + os << "VEC_ULONGLONG"; + break; + case DT::VEC_FLOAT: + os << "VEC_FLOAT"; + break; + case DT::VEC_DOUBLE: + os << "VEC_DOUBLE"; + break; + case DT::VEC_LONG_DOUBLE: + os << "VEC_LONG_DOUBLE"; + break; + case DT::VEC_CFLOAT: + os << "VEC_CFLOAT"; + break; + case DT::VEC_CDOUBLE: + os << "VEC_CDOUBLE"; + break; + case DT::VEC_CLONG_DOUBLE: + os << "VEC_CLONG_DOUBLE"; + break; + case DT::VEC_STRING: + os << "VEC_STRING"; + break; + case DT::ARR_DBL_7: + os << "ARR_DBL_7"; + break; + case DT::BOOL: + os << "BOOL"; + break; + case DT::UNDEFINED: + os << "UNDEFINED"; + break; } return os; } - Datatype stringToDatatype( std::string s ) +Datatype stringToDatatype(std::string s) +{ + static std::unordered_map m{ + {"CHAR", Datatype::CHAR}, + {"UCHAR", Datatype::UCHAR}, + {"SHORT", Datatype::SHORT}, + {"INT", Datatype::INT}, + {"LONG", Datatype::LONG}, + {"LONGLONG", Datatype::LONGLONG}, + {"USHORT", Datatype::USHORT}, + {"UINT", Datatype::UINT}, + {"ULONG", Datatype::ULONG}, + {"ULONGLONG", Datatype::ULONGLONG}, + {"FLOAT", Datatype::FLOAT}, + {"DOUBLE", Datatype::DOUBLE}, + {"LONG_DOUBLE", Datatype::LONG_DOUBLE}, + {"CFLOAT", Datatype::CFLOAT}, + {"CDOUBLE", Datatype::CDOUBLE}, + {"CLONG_DOUBLE", Datatype::CLONG_DOUBLE}, + {"STRING", Datatype::STRING}, + {"VEC_CHAR", Datatype::VEC_CHAR}, + {"VEC_SHORT", Datatype::VEC_SHORT}, + {"VEC_INT", Datatype::VEC_INT}, + {"VEC_LONG", Datatype::VEC_LONG}, + {"VEC_LONGLONG", Datatype::VEC_LONGLONG}, + {"VEC_UCHAR", Datatype::VEC_UCHAR}, + {"VEC_USHORT", Datatype::VEC_USHORT}, + {"VEC_UINT", Datatype::VEC_UINT}, + {"VEC_ULONG", Datatype::VEC_ULONG}, + {"VEC_ULONGLONG", Datatype::VEC_ULONGLONG}, + {"VEC_FLOAT", Datatype::VEC_FLOAT}, + {"VEC_DOUBLE", Datatype::VEC_DOUBLE}, + {"VEC_LONG_DOUBLE", Datatype::VEC_LONG_DOUBLE}, + {"VEC_CFLOAT", Datatype::VEC_CFLOAT}, + {"VEC_CDOUBLE", Datatype::VEC_CDOUBLE}, + {"VEC_CLONG_DOUBLE", Datatype::VEC_CLONG_DOUBLE}, + {"VEC_STRING", Datatype::VEC_STRING}, + {"ARR_DBL_7", Datatype::ARR_DBL_7}, + {"BOOL", Datatype::BOOL}, + {"UNDEFINED", Datatype::UNDEFINED}}; + auto it = m.find(s); + if (it != m.end()) { - static std::unordered_map< - std::string, - Datatype - > m { - { - "CHAR", - Datatype::CHAR - }, - { - "UCHAR", - Datatype::UCHAR - }, - { - "SHORT", - Datatype::SHORT - }, - { - "INT", - Datatype::INT - }, - { - "LONG", - Datatype::LONG - }, - { - "LONGLONG", - Datatype::LONGLONG - }, - { - "USHORT", - Datatype::USHORT - }, - { - "UINT", - Datatype::UINT - }, - { - "ULONG", - Datatype::ULONG - }, - { - "ULONGLONG", - Datatype::ULONGLONG - }, - { - "FLOAT", - Datatype::FLOAT - }, - { - "DOUBLE", - Datatype::DOUBLE - }, - { - "LONG_DOUBLE", - Datatype::LONG_DOUBLE - }, - { - "CFLOAT", - Datatype::CFLOAT - }, - { - "CDOUBLE", - Datatype::CDOUBLE - }, - { - "CLONG_DOUBLE", - Datatype::CLONG_DOUBLE - }, - { - "STRING", - Datatype::STRING - }, - { - "VEC_CHAR", - Datatype::VEC_CHAR - }, - { - "VEC_SHORT", - Datatype::VEC_SHORT - }, - { - "VEC_INT", - Datatype::VEC_INT - }, - { - "VEC_LONG", - Datatype::VEC_LONG - }, - { - "VEC_LONGLONG", - Datatype::VEC_LONGLONG - }, - { - "VEC_UCHAR", - Datatype::VEC_UCHAR - }, - { - "VEC_USHORT", - Datatype::VEC_USHORT - }, - { - "VEC_UINT", - Datatype::VEC_UINT - }, - { - "VEC_ULONG", - Datatype::VEC_ULONG - }, - { - "VEC_ULONGLONG", - Datatype::VEC_ULONGLONG - }, - { - "VEC_FLOAT", - Datatype::VEC_FLOAT - }, - { - "VEC_DOUBLE", - Datatype::VEC_DOUBLE - }, - { - "VEC_LONG_DOUBLE", - Datatype::VEC_LONG_DOUBLE - }, - { - "VEC_CFLOAT", - Datatype::VEC_CFLOAT - }, - { - "VEC_CDOUBLE", - Datatype::VEC_CDOUBLE - }, - { - "VEC_CLONG_DOUBLE", - Datatype::VEC_CLONG_DOUBLE - }, - { - "VEC_STRING", - Datatype::VEC_STRING - }, - { - "ARR_DBL_7", - Datatype::ARR_DBL_7 - }, - { - "BOOL", - Datatype::BOOL - }, - { - "UNDEFINED", - Datatype::UNDEFINED - } - }; - auto it = m.find( s ); - if( it != m.end( ) ) - { - return it->second; - } - else - { - throw std::runtime_error( "Unknown datatype in string deserialization." ); - } + return it->second; } - - - std::string datatypeToString( openPMD::Datatype dt ) + else { - std::stringbuf buf; - std::ostream os(&buf); - os << dt; - return buf.str(); + throw std::runtime_error("Unknown datatype in string deserialization."); } +} - std::vector openPMD_Datatypes{ - Datatype::CHAR , - Datatype::UCHAR, - Datatype::SHORT, - Datatype::INT, - Datatype::LONG, - Datatype::LONGLONG, - Datatype::USHORT, - Datatype::UINT, - Datatype::ULONG, - Datatype::ULONGLONG, - Datatype::FLOAT, - Datatype::DOUBLE, - Datatype::LONG_DOUBLE, - Datatype::CFLOAT, - Datatype::CDOUBLE, - Datatype::CLONG_DOUBLE, - Datatype::STRING, - Datatype::VEC_CHAR, - Datatype::VEC_SHORT, - Datatype::VEC_INT, - Datatype::VEC_LONG, - Datatype::VEC_LONGLONG, - Datatype::VEC_UCHAR, - Datatype::VEC_USHORT, - Datatype::VEC_UINT, - Datatype::VEC_ULONG, - Datatype::VEC_ULONGLONG, - Datatype::VEC_FLOAT, - Datatype::VEC_DOUBLE, - Datatype::VEC_LONG_DOUBLE, - Datatype::VEC_CFLOAT, - Datatype::VEC_CDOUBLE, - Datatype::VEC_CLONG_DOUBLE, - Datatype::VEC_STRING, - Datatype::ARR_DBL_7, - Datatype::BOOL, - Datatype::UNDEFINED - }; +std::string datatypeToString(openPMD::Datatype dt) +{ + std::stringbuf buf; + std::ostream os(&buf); + os << dt; + return buf.str(); +} + +std::vector openPMD_Datatypes{ + Datatype::CHAR, Datatype::UCHAR, Datatype::SHORT, + Datatype::INT, Datatype::LONG, Datatype::LONGLONG, + Datatype::USHORT, Datatype::UINT, Datatype::ULONG, + Datatype::ULONGLONG, Datatype::FLOAT, Datatype::DOUBLE, + Datatype::LONG_DOUBLE, Datatype::CFLOAT, Datatype::CDOUBLE, + Datatype::CLONG_DOUBLE, Datatype::STRING, Datatype::VEC_CHAR, + Datatype::VEC_SHORT, Datatype::VEC_INT, Datatype::VEC_LONG, + Datatype::VEC_LONGLONG, Datatype::VEC_UCHAR, Datatype::VEC_USHORT, + Datatype::VEC_UINT, Datatype::VEC_ULONG, Datatype::VEC_ULONGLONG, + Datatype::VEC_FLOAT, Datatype::VEC_DOUBLE, Datatype::VEC_LONG_DOUBLE, + Datatype::VEC_CFLOAT, Datatype::VEC_CDOUBLE, Datatype::VEC_CLONG_DOUBLE, + Datatype::VEC_STRING, Datatype::ARR_DBL_7, Datatype::BOOL, + Datatype::UNDEFINED}; +Datatype basicDatatype(Datatype dt) +{ + return switchType(dt); +} - Datatype basicDatatype( Datatype dt ) +Datatype toVectorType(Datatype dt) +{ + auto initializer = []() { + std::map res; + for (Datatype d : openPMD_Datatypes) + { + if (d == Datatype::ARR_DBL_7 || d == Datatype::UNDEFINED) + continue; + Datatype basic = basicDatatype(d); + if (basic == d) + continue; + res[basic] = d; + } + return res; + }; + static auto map(initializer()); + auto it = map.find(dt); + if (it != map.end()) { - return switchType< detail::BasicDatatype >( dt ); + return it->second; } - - Datatype toVectorType( Datatype dt ) + else { - auto initializer = []() { - std::map res; - for (Datatype d: openPMD_Datatypes) { - if (d == Datatype::ARR_DBL_7 - || d == Datatype::UNDEFINED) - continue; - Datatype basic = basicDatatype(d); - if (basic == d) - continue; - res[basic] = d; - } - return res; - }; - static auto map (initializer()); - auto it = map.find(dt); - if (it != map.end()) { - return it->second; - } else { - std::cerr << "Encountered non-basic type " << dt << ", aborting." - << std::endl; - throw std::runtime_error("toVectorType: passed non-basic type."); - } + std::cerr << "Encountered non-basic type " << dt << ", aborting." + << std::endl; + throw std::runtime_error("toVectorType: passed non-basic type."); } +} +namespace detail +{ + template + Datatype BasicDatatype::call() + { + static auto res = BasicDatatypeHelper{}.m_dt; + return res; + } - namespace detail { - template< typename T > - Datatype BasicDatatype::call() - { - static auto res = BasicDatatypeHelper{}.m_dt; - return res; - } - - - template< int n > - Datatype BasicDatatype::call() - { - throw std::runtime_error( "basicDatatype: received unknown datatype." ); - } + template + Datatype BasicDatatype::call() + { + throw std::runtime_error("basicDatatype: received unknown datatype."); } -} +} // namespace detail +} // namespace openPMD diff --git a/src/Error.cpp b/src/Error.cpp index 5492c79859..aa6723e63f 100644 --- a/src/Error.cpp +++ b/src/Error.cpp @@ -4,7 +4,7 @@ namespace openPMD { -const char * Error::what() const noexcept +const char *Error::what() const noexcept { return m_what.c_str(); } @@ -12,41 +12,38 @@ const char * Error::what() const noexcept namespace error { OperationUnsupportedInBackend::OperationUnsupportedInBackend( - std::string backend_in, std::string what ) - : Error( "Operation unsupported in " + backend_in + ": " + what ) - , backend{ std::move( backend_in ) } - { - } + std::string backend_in, std::string what) + : Error("Operation unsupported in " + backend_in + ": " + what) + , backend{std::move(backend_in)} + {} - WrongAPIUsage::WrongAPIUsage( std::string what ) - : Error( "Wrong API usage: " + what ) - { - } + WrongAPIUsage::WrongAPIUsage(std::string what) + : Error("Wrong API usage: " + what) + {} static std::string concatVector( - std::vector< std::string > const & vec, - std::string const & intersperse = "." ) + std::vector const &vec, + std::string const &intersperse = ".") { - if( vec.empty() ) + if (vec.empty()) { return ""; } std::stringstream res; - res << vec[ 0 ]; - for( size_t i = 1; i < vec.size(); ++i ) + res << vec[0]; + for (size_t i = 1; i < vec.size(); ++i) { - res << intersperse << vec[ i ]; + res << intersperse << vec[i]; } return res.str(); } BackendConfigSchema::BackendConfigSchema( - std::vector< std::string > errorLocation_in, std::string what ) + std::vector errorLocation_in, std::string what) : Error( "Wrong JSON/TOML schema at index '" + - concatVector( errorLocation_in ) + "': " + std::move( what ) ) - , errorLocation( std::move( errorLocation_in ) ) - { - } -} -} + concatVector(errorLocation_in) + "': " + std::move(what)) + , errorLocation(std::move(errorLocation_in)) + {} +} // namespace error +} // namespace openPMD diff --git a/src/Format.cpp b/src/Format.cpp index 04d68cb2bf..b92c0c8cf5 100644 --- a/src/Format.cpp +++ b/src/Format.cpp @@ -18,68 +18,70 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include "openPMD/config.hpp" #include "openPMD/IO/Format.hpp" -#include "openPMD/auxiliary/StringManip.hpp" #include "openPMD/auxiliary/Environment.hpp" +#include "openPMD/auxiliary/StringManip.hpp" +#include "openPMD/config.hpp" #include - -namespace openPMD { - Format - determineFormat(std::string const &filename) { - if (auxiliary::ends_with(filename, ".h5")) - return Format::HDF5; - if (auxiliary::ends_with(filename, ".bp")) { - auto const bp_backend = auxiliary::getEnvString( - "OPENPMD_BP_BACKEND", +namespace openPMD +{ +Format determineFormat(std::string const &filename) +{ + if (auxiliary::ends_with(filename, ".h5")) + return Format::HDF5; + if (auxiliary::ends_with(filename, ".bp")) + { + auto const bp_backend = auxiliary::getEnvString( + "OPENPMD_BP_BACKEND", #if openPMD_HAVE_ADIOS2 - "ADIOS2" + "ADIOS2" #elif openPMD_HAVE_ADIOS1 - "ADIOS1" + "ADIOS1" #else - "ADIOS2" + "ADIOS2" #endif - ); - - if (bp_backend == "ADIOS2") - return Format::ADIOS2; - else if (bp_backend == "ADIOS1") - return Format::ADIOS1; - else - throw std::runtime_error( - "Environment variable OPENPMD_BP_BACKEND for .bp backend is neither ADIOS1 nor ADIOS2: " + - bp_backend - ); - } - if (auxiliary::ends_with(filename, ".sst")) - return Format::ADIOS2_SST; - if (auxiliary::ends_with(filename, ".ssc")) - return Format::ADIOS2_SSC; - if (auxiliary::ends_with(filename, ".json")) - return Format::JSON; + ); - // Format might still be specified via JSON - return Format::DUMMY; + if (bp_backend == "ADIOS2") + return Format::ADIOS2; + else if (bp_backend == "ADIOS1") + return Format::ADIOS1; + else + throw std::runtime_error( + "Environment variable OPENPMD_BP_BACKEND for .bp backend is " + "neither ADIOS1 nor ADIOS2: " + + bp_backend); } + if (auxiliary::ends_with(filename, ".sst")) + return Format::ADIOS2_SST; + if (auxiliary::ends_with(filename, ".ssc")) + return Format::ADIOS2_SSC; + if (auxiliary::ends_with(filename, ".json")) + return Format::JSON; + + // Format might still be specified via JSON + return Format::DUMMY; +} - std::string - suffix(Format f) { - switch (f) { - case Format::HDF5: - return ".h5"; - case Format::ADIOS1: - case Format::ADIOS2: - return ".bp"; - case Format::ADIOS2_SST: - return ".sst"; - case Format::ADIOS2_SSC: - return ".ssc"; - case Format::JSON: - return ".json"; - default: - return ""; - } +std::string suffix(Format f) +{ + switch (f) + { + case Format::HDF5: + return ".h5"; + case Format::ADIOS1: + case Format::ADIOS2: + return ".bp"; + case Format::ADIOS2_SST: + return ".sst"; + case Format::ADIOS2_SSC: + return ".ssc"; + case Format::JSON: + return ".json"; + default: + return ""; } +} } // namespace openPMD diff --git a/src/IO/ADIOS/ADIOS1IOHandler.cpp b/src/IO/ADIOS/ADIOS1IOHandler.cpp index c0380517fd..af7270bc5c 100644 --- a/src/IO/ADIOS/ADIOS1IOHandler.cpp +++ b/src/IO/ADIOS/ADIOS1IOHandler.cpp @@ -22,182 +22,264 @@ #include "openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp" #if openPMD_HAVE_ADIOS1 -# include "openPMD/IO/AbstractIOHandlerImpl.hpp" - -# include "openPMD/IO/IOTask.hpp" -# include -# include -# include -# include -# include -# include +#include "openPMD/IO/AbstractIOHandlerImpl.hpp" + +#include "openPMD/IO/IOTask.hpp" +#include +#include +#include +#include +#include +#include #endif - namespace openPMD { #if openPMD_HAVE_ADIOS1 -# if openPMD_USE_VERIFY -# define VERIFY(CONDITION, TEXT) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } -# else -# define VERIFY(CONDITION, TEXT) do{ (void)sizeof(CONDITION); } while( 0 ) -# endif - -ADIOS1IOHandlerImpl::ADIOS1IOHandlerImpl(AbstractIOHandler* handler, json::TracingJSON json) - : Base_t(handler) +#if openPMD_USE_VERIFY +#define VERIFY(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } +#else +#define VERIFY(CONDITION, TEXT) \ + do \ + { \ + (void)sizeof(CONDITION); \ + } while (0) +#endif + +ADIOS1IOHandlerImpl::ADIOS1IOHandlerImpl( + AbstractIOHandler *handler, json::TracingJSON json) + : Base_t(handler) { - initJson( std::move( json ) ); + initJson(std::move(json)); } ADIOS1IOHandlerImpl::~ADIOS1IOHandlerImpl() { - for( auto& f : m_openReadFileHandles ) + for (auto &f : m_openReadFileHandles) close(f.second); m_openReadFileHandles.clear(); - if( this->m_handler->m_backendAccess != Access::READ_ONLY ) + if (this->m_handler->m_backendAccess != Access::READ_ONLY) { - for( auto& group : m_attributeWrites ) - for( auto& att : group.second ) + for (auto &group : m_attributeWrites) + for (auto &att : group.second) flush_attribute(group.first, att.first, att.second); - for( auto& f : m_openWriteFileHandles ) + for (auto &f : m_openWriteFileHandles) close(f.second); m_openWriteFileHandles.clear(); } int status; status = adios_read_finalize_method(m_readMethod); - if( status != err_no_error ) - std::cerr << "Internal error: Failed to finalize ADIOS reading method (serial)\n"; + if (status != err_no_error) + std::cerr << "Internal error: Failed to finalize ADIOS reading method " + "(serial)\n"; status = adios_finalize(0); - if( status != err_no_error ) + if (status != err_no_error) std::cerr << "Internal error: Failed to finalize ADIOS (serial)\n"; } -std::future< void > -ADIOS1IOHandlerImpl::flush() +std::future ADIOS1IOHandlerImpl::flush() { using namespace auxiliary; - auto handler = dynamic_cast< ADIOS1IOHandler* >(m_handler); - while( !handler->m_setup.empty() ) + auto handler = dynamic_cast(m_handler); + while (!handler->m_setup.empty()) { - IOTask& i = handler->m_setup.front(); + IOTask &i = handler->m_setup.front(); try { - switch( i.operation ) + switch (i.operation) { using O = Operation; - case O::CREATE_FILE: - createFile(i.writable, deref_dynamic_cast< Parameter< Operation::CREATE_FILE > >(i.parameter.get())); - break; - case O::CREATE_PATH: - createPath(i.writable, deref_dynamic_cast< Parameter< O::CREATE_PATH > >(i.parameter.get())); - break; - case O::OPEN_PATH: - openPath(i.writable, deref_dynamic_cast< Parameter< O::OPEN_PATH > >(i.parameter.get())); - break; - case O::CREATE_DATASET: - createDataset(i.writable, deref_dynamic_cast< Parameter< O::CREATE_DATASET > >(i.parameter.get())); - break; - case O::WRITE_ATT: - writeAttribute(i.writable, deref_dynamic_cast< Parameter< O::WRITE_ATT > >(i.parameter.get())); - break; - case O::OPEN_FILE: - openFile(i.writable, deref_dynamic_cast< Parameter< O::OPEN_FILE > >(i.parameter.get())); - break; - default: - VERIFY(false, "[ADIOS1] Internal error: Wrong operation in ADIOS setup queue"); + case O::CREATE_FILE: + createFile( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::CREATE_PATH: + createPath( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::OPEN_PATH: + openPath( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::CREATE_DATASET: + createDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::WRITE_ATT: + writeAttribute( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::OPEN_FILE: + openFile( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + default: + VERIFY( + false, + "[ADIOS1] Internal error: Wrong operation in ADIOS setup " + "queue"); } - } catch (...) + } + catch (...) { - std::cerr - << "[AbstractIOHandlerImpl] IO Task " - << internal::operationAsString( i.operation ) - << " failed with exception. Removing task" - << " from IO queue and passing on the exception." - << std::endl; + std::cerr << "[AbstractIOHandlerImpl] IO Task " + << internal::operationAsString(i.operation) + << " failed with exception. Removing task" + << " from IO queue and passing on the exception." + << std::endl; handler->m_setup.pop(); throw; } handler->m_setup.pop(); } - - while( !handler->m_work.empty() ) + while (!handler->m_work.empty()) { using namespace auxiliary; - IOTask& i = handler->m_work.front(); + IOTask &i = handler->m_work.front(); try { - switch( i.operation ) + switch (i.operation) { using O = Operation; - case O::EXTEND_DATASET: - extendDataset(i.writable, deref_dynamic_cast< Parameter< O::EXTEND_DATASET > >(i.parameter.get())); - break; - case O::CLOSE_PATH: - closePath(i.writable, deref_dynamic_cast< Parameter< O::CLOSE_PATH > >(i.parameter.get())); - break; - case O::OPEN_DATASET: - openDataset(i.writable, deref_dynamic_cast< Parameter< O::OPEN_DATASET > >(i.parameter.get())); - break; - case O::CLOSE_FILE: - closeFile(i.writable, *dynamic_cast< Parameter< O::CLOSE_FILE >* >(i.parameter.get())); - break; - case O::DELETE_FILE: - deleteFile(i.writable, deref_dynamic_cast< Parameter< O::DELETE_FILE > >(i.parameter.get())); - break; - case O::DELETE_PATH: - deletePath(i.writable, deref_dynamic_cast< Parameter< O::DELETE_PATH > >(i.parameter.get())); - break; - case O::DELETE_DATASET: - deleteDataset(i.writable, deref_dynamic_cast< Parameter< O::DELETE_DATASET > >(i.parameter.get())); - break; - case O::DELETE_ATT: - deleteAttribute(i.writable, deref_dynamic_cast< Parameter< O::DELETE_ATT > >(i.parameter.get())); - break; - case O::WRITE_DATASET: - writeDataset(i.writable, deref_dynamic_cast< Parameter< O::WRITE_DATASET > >(i.parameter.get())); - break; - case O::READ_DATASET: - readDataset(i.writable, deref_dynamic_cast< Parameter< O::READ_DATASET > >(i.parameter.get())); - break; - case O::GET_BUFFER_VIEW: - getBufferView(i.writable, deref_dynamic_cast< Parameter< O::GET_BUFFER_VIEW > >(i.parameter.get())); - break; - case O::READ_ATT: - readAttribute(i.writable, deref_dynamic_cast< Parameter< O::READ_ATT > >(i.parameter.get())); - break; - case O::LIST_PATHS: - listPaths(i.writable, deref_dynamic_cast< Parameter< O::LIST_PATHS > >(i.parameter.get())); - break; - case O::LIST_DATASETS: - listDatasets(i.writable, deref_dynamic_cast< Parameter< O::LIST_DATASETS > >(i.parameter.get())); - break; - case O::LIST_ATTS: - listAttributes(i.writable, deref_dynamic_cast< Parameter< O::LIST_ATTS > >(i.parameter.get())); - break; - case O::ADVANCE: - advance(i.writable, deref_dynamic_cast< Parameter< O::ADVANCE > >(i.parameter.get())); - break; - case O::AVAILABLE_CHUNKS: - availableChunks(i.writable, deref_dynamic_cast< Parameter< O::AVAILABLE_CHUNKS > >(i.parameter.get())); - break; - default: - VERIFY(false, "[ADIOS1] Internal error: Wrong operation in ADIOS work queue"); + case O::EXTEND_DATASET: + extendDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::CLOSE_PATH: + closePath( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::OPEN_DATASET: + openDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::CLOSE_FILE: + closeFile( + i.writable, + *dynamic_cast *>( + i.parameter.get())); + break; + case O::DELETE_FILE: + deleteFile( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::DELETE_PATH: + deletePath( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::DELETE_DATASET: + deleteDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::DELETE_ATT: + deleteAttribute( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::WRITE_DATASET: + writeDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::READ_DATASET: + readDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::GET_BUFFER_VIEW: + getBufferView( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::READ_ATT: + readAttribute( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::LIST_PATHS: + listPaths( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::LIST_DATASETS: + listDatasets( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::LIST_ATTS: + listAttributes( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::ADVANCE: + advance( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::AVAILABLE_CHUNKS: + availableChunks( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + default: + VERIFY( + false, + "[ADIOS1] Internal error: Wrong operation in ADIOS work " + "queue"); } - } catch (...) + } + catch (...) { - std::cerr - << "[AbstractIOHandlerImpl] IO Task " - << internal::operationAsString( i.operation ) - << " failed with exception. Removing task" - << " from IO queue and passing on the exception." - << std::endl; + std::cerr << "[AbstractIOHandlerImpl] IO Task " + << internal::operationAsString(i.operation) + << " failed with exception. Removing task" + << " from IO queue and passing on the exception." + << std::endl; m_handler->m_work.pop(); throw; } @@ -205,82 +287,85 @@ ADIOS1IOHandlerImpl::flush() } int status; - for( auto& file : m_scheduledReads ) + for (auto &file : m_scheduledReads) { - status = adios_perform_reads(file.first, - 1); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to perform ADIOS reads during dataset reading"); + status = adios_perform_reads(file.first, 1); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to perform ADIOS reads during " + "dataset reading"); - for( auto& sel : file.second ) + for (auto &sel : file.second) adios_selection_delete(sel); } m_scheduledReads.clear(); - return std::future< void >(); + return std::future(); } -void -ADIOS1IOHandlerImpl::init() +void ADIOS1IOHandlerImpl::init() { int status; status = adios_init_noxml(MPI_COMM_NULL); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to initialize ADIOS"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to initialize ADIOS"); m_readMethod = ADIOS_READ_METHOD_BP; status = adios_read_init_method(m_readMethod, MPI_COMM_NULL, ""); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to initialize ADIOS reading method"); - + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to initialize ADIOS reading method"); } #endif #if openPMD_HAVE_ADIOS1 -ADIOS1IOHandler::ADIOS1IOHandler(std::string path, Access at, json::TracingJSON json) - : AbstractIOHandler(std::move(path), at), - m_impl{new ADIOS1IOHandlerImpl(this, std::move(json))} +ADIOS1IOHandler::ADIOS1IOHandler( + std::string path, Access at, json::TracingJSON json) + : AbstractIOHandler(std::move(path), at) + , m_impl{new ADIOS1IOHandlerImpl(this, std::move(json))} { m_impl->init(); } ADIOS1IOHandler::~ADIOS1IOHandler() = default; -std::future< void > -ADIOS1IOHandler::flush() +std::future ADIOS1IOHandler::flush() { return m_impl->flush(); } -void -ADIOS1IOHandler::enqueue(IOTask const& i) +void ADIOS1IOHandler::enqueue(IOTask const &i) { - switch( i.operation ) + switch (i.operation) { - case Operation::CREATE_FILE: - case Operation::CREATE_PATH: - case Operation::OPEN_PATH: - case Operation::CREATE_DATASET: - case Operation::OPEN_FILE: - case Operation::WRITE_ATT: - m_setup.push(i); - return; - default: - m_work.push(i); - return; + case Operation::CREATE_FILE: + case Operation::CREATE_PATH: + case Operation::OPEN_PATH: + case Operation::CREATE_DATASET: + case Operation::OPEN_FILE: + case Operation::WRITE_ATT: + m_setup.push(i); + return; + default: + m_work.push(i); + return; } } -int64_t -ADIOS1IOHandlerImpl::open_write(Writable* writable) +int64_t ADIOS1IOHandlerImpl::open_write(Writable *writable) { auto res = m_filePaths.find(writable); - if( res == m_filePaths.end() ) + if (res == m_filePaths.end()) res = m_filePaths.find(writable->parent); std::string mode; - if( m_existsOnDisk[res->second] ) + if (m_existsOnDisk[res->second]) { mode = "u"; /* close the handle that corresponds to the file we want to append to */ - if( m_openReadFileHandles.find(res->second) != m_openReadFileHandles.end() ) + if (m_openReadFileHandles.find(res->second) != + m_openReadFileHandles.end()) { close(m_openReadFileHandles[res->second]); m_openReadFileHandles.erase(res->second); @@ -294,55 +379,61 @@ ADIOS1IOHandlerImpl::open_write(Writable* writable) int64_t fd = -1; int status; - status = adios_open(&fd, - res->second->c_str(), - res->second->c_str(), - mode.c_str(), - MPI_COMM_NULL); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to open_write ADIOS file"); + status = adios_open( + &fd, + res->second->c_str(), + res->second->c_str(), + mode.c_str(), + MPI_COMM_NULL); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to open_write ADIOS file"); return fd; } -ADIOS_FILE* -ADIOS1IOHandlerImpl::open_read(std::string const & name) +ADIOS_FILE *ADIOS1IOHandlerImpl::open_read(std::string const &name) { ADIOS_FILE *f = nullptr; - f = adios_read_open_file(name.c_str(), - m_readMethod, - MPI_COMM_NULL); - VERIFY(adios_errno != err_file_not_found, "[ADIOS1] Internal error: ADIOS file not found"); - VERIFY(f != nullptr, "[ADIOS1] Internal error: Failed to open_read ADIOS file"); + f = adios_read_open_file(name.c_str(), m_readMethod, MPI_COMM_NULL); + VERIFY( + adios_errno != err_file_not_found, + "[ADIOS1] Internal error: ADIOS file not found"); + VERIFY( + f != nullptr, + "[ADIOS1] Internal error: Failed to open_read ADIOS file"); return f; } -int64_t -ADIOS1IOHandlerImpl::initialize_group(std::string const &name) +int64_t ADIOS1IOHandlerImpl::initialize_group(std::string const &name) { int status; int64_t group; ADIOS_STATISTICS_FLAG noStatistics = adios_stat_no; status = adios_declare_group(&group, name.c_str(), "", noStatistics); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to declare ADIOS group"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to declare ADIOS group"); status = adios_select_method(group, "POSIX", "", ""); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to select ADIOS method"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to select ADIOS method"); return group; } #else ADIOS1IOHandler::ADIOS1IOHandler(std::string path, Access at, json::TracingJSON) - : AbstractIOHandler(std::move(path), at) + : AbstractIOHandler(std::move(path), at) { throw std::runtime_error("openPMD-api built without ADIOS1 support"); } ADIOS1IOHandler::~ADIOS1IOHandler() = default; -std::future< void > -ADIOS1IOHandler::flush() +std::future ADIOS1IOHandler::flush() { - return std::future< void >(); + return std::future(); } #endif -} // openPMD +} // namespace openPMD diff --git a/src/IO/ADIOS/ADIOS2Auxiliary.cpp b/src/IO/ADIOS/ADIOS2Auxiliary.cpp index 40cedc00c6..1c4cd9b7f4 100644 --- a/src/IO/ADIOS/ADIOS2Auxiliary.cpp +++ b/src/IO/ADIOS/ADIOS2Auxiliary.cpp @@ -21,240 +21,222 @@ #include "openPMD/config.hpp" #if openPMD_HAVE_ADIOS2 -#include "openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp" #include "openPMD/Datatype.hpp" +#include "openPMD/IO/ADIOS/ADIOS2Auxiliary.hpp" #include namespace openPMD::detail { - template< typename T > - std::string - ToDatatypeHelper< T >::type() - { - return adios2::GetType< T >(); - } +template +std::string ToDatatypeHelper::type() +{ + return adios2::GetType(); +} - template< typename T > - std::string - ToDatatypeHelper< std::vector< T > >::type() - { - return +template +std::string ToDatatypeHelper>::type() +{ + return - adios2::GetType< T >(); - } + adios2::GetType(); +} - template< typename T, size_t n > - std::string - ToDatatypeHelper< std::array< T, n > >::type() - { - return +template +std::string ToDatatypeHelper>::type() +{ + return - adios2::GetType< T >(); - } + adios2::GetType(); +} - std::string - ToDatatypeHelper< bool >::type() - { - return ToDatatypeHelper< bool_representation >::type(); - } +std::string ToDatatypeHelper::type() +{ + return ToDatatypeHelper::type(); +} - template< typename T > - std::string - ToDatatype::operator()() +template +std::string ToDatatype::operator()() +{ + return ToDatatypeHelper::type(); +} + +template +std::string ToDatatype::operator()() +{ + return ""; +} + +Datatype fromADIOS2Type(std::string const &dt, bool verbose) +{ + static std::map map{ + {"string", Datatype::STRING}, + {"char", Datatype::CHAR}, + {"signed char", Datatype::CHAR}, + {"unsigned char", Datatype::UCHAR}, + {"short", Datatype::SHORT}, + {"unsigned short", Datatype::USHORT}, + {"int", Datatype::INT}, + {"unsigned int", Datatype::UINT}, + {"long int", Datatype::LONG}, + {"unsigned long int", Datatype::ULONG}, + {"long long int", Datatype::LONGLONG}, + {"unsigned long long int", Datatype::ULONGLONG}, + {"float", Datatype::FLOAT}, + {"double", Datatype::DOUBLE}, + {"long double", Datatype::LONG_DOUBLE}, + {"float complex", Datatype::CFLOAT}, + {"double complex", Datatype::CDOUBLE}, + {"long double complex", + Datatype::CLONG_DOUBLE}, // does not exist as of 2.7.0 but might come + // later + {"uint8_t", Datatype::UCHAR}, + {"int8_t", Datatype::CHAR}, + {"uint16_t", determineDatatype()}, + {"int16_t", determineDatatype()}, + {"uint32_t", determineDatatype()}, + {"int32_t", determineDatatype()}, + {"uint64_t", determineDatatype()}, + {"int64_t", determineDatatype()}}; + auto it = map.find(dt); + if (it != map.end()) { - return ToDatatypeHelper< T >::type(); + return it->second; } - - template< int n > - std::string - ToDatatype::operator()() + else { - return ""; + if (verbose) + { + std::cerr + << "[ADIOS2] Warning: Encountered unknown ADIOS2 datatype," + " defaulting to UNDEFINED." + << std::endl; + } + return Datatype::UNDEFINED; } +} - Datatype fromADIOS2Type( std::string const & dt, bool verbose ) +template +Extent AttributeInfo::call( + adios2::IO &IO, std::string const &attributeName, VariableOrAttribute voa) +{ + switch (voa) { - static std::map< std::string, Datatype > map{ - { "string", Datatype::STRING }, - { "char", Datatype::CHAR }, - { "signed char", Datatype::CHAR }, - { "unsigned char", Datatype::UCHAR }, - { "short", Datatype::SHORT }, - { "unsigned short", Datatype::USHORT }, - { "int", Datatype::INT }, - { "unsigned int", Datatype::UINT }, - { "long int", Datatype::LONG }, - { "unsigned long int", Datatype::ULONG }, - { "long long int", Datatype::LONGLONG }, - { "unsigned long long int", Datatype::ULONGLONG }, - { "float", Datatype::FLOAT }, - { "double", Datatype::DOUBLE }, - { "long double", Datatype::LONG_DOUBLE }, - { "float complex", Datatype::CFLOAT }, - { "double complex", Datatype::CDOUBLE }, - { "long double complex", Datatype::CLONG_DOUBLE }, // does not exist as of 2.7.0 but might come later - { "uint8_t", Datatype::UCHAR }, - { "int8_t", Datatype::CHAR }, - { "uint16_t", determineDatatype< uint16_t >() }, - { "int16_t", determineDatatype< int16_t >() }, - { "uint32_t", determineDatatype< uint32_t >() }, - { "int32_t", determineDatatype< int32_t >() }, - { "uint64_t", determineDatatype< uint64_t >() }, - { "int64_t", determineDatatype< int64_t >() } - }; - auto it = map.find( dt ); - if( it != map.end() ) + case VariableOrAttribute::Attribute: { + auto attribute = IO.InquireAttribute(attributeName); + if (!attribute) { - return it->second; + throw std::runtime_error( + "[ADIOS2] Internal error: Attribute not present."); } - else + return {attribute.Data().size()}; + } + case VariableOrAttribute::Variable: { + auto variable = IO.InquireVariable(attributeName); + if (!variable) { - if( verbose ) - { - std::cerr - << "[ADIOS2] Warning: Encountered unknown ADIOS2 datatype," - " defaulting to UNDEFINED." - << std::endl; - } - return Datatype::UNDEFINED; + throw std::runtime_error( + "[ADIOS2] Internal error: Variable not present."); } - } - - template< typename T > - Extent - AttributeInfo::call( - adios2::IO & IO, - std::string const & attributeName, - VariableOrAttribute voa ) - { - switch( voa ) + auto shape = variable.Shape(); + Extent res; + res.reserve(shape.size()); + for (auto val : shape) { - case VariableOrAttribute::Attribute: - { - auto attribute = IO.InquireAttribute< T >( attributeName ); - if( !attribute ) - { - throw std::runtime_error( - "[ADIOS2] Internal error: Attribute not present." ); - } - return { attribute.Data().size() }; - } - case VariableOrAttribute::Variable: - { - auto variable = IO.InquireVariable< T >( attributeName ); - if( !variable ) - { - throw std::runtime_error( - "[ADIOS2] Internal error: Variable not present." ); - } - auto shape = variable.Shape(); - Extent res; - res.reserve( shape.size() ); - for( auto val : shape ) - { - res.push_back( val ); - } - return res; - } - default: - throw std::runtime_error( "[ADIOS2] Unreachable!" ); + res.push_back(val); } + return res; } + default: + throw std::runtime_error("[ADIOS2] Unreachable!"); + } +} - template< int n, typename... Params > - Extent - AttributeInfo::call( Params &&... ) +template +Extent AttributeInfo::call(Params &&...) +{ + return {0}; +} + +Datatype attributeInfo( + adios2::IO &IO, + std::string const &attributeName, + bool verbose, + VariableOrAttribute voa) +{ + std::string type; + switch (voa) { - return { 0 }; + case VariableOrAttribute::Attribute: + type = IO.AttributeType(attributeName); + break; + case VariableOrAttribute::Variable: + type = IO.VariableType(attributeName); + break; } - - Datatype - attributeInfo( - adios2::IO & IO, - std::string const & attributeName, - bool verbose, - VariableOrAttribute voa ) + if (type.empty()) { - std::string type; - switch( voa ) + if (verbose) { - case VariableOrAttribute::Attribute: - type = IO.AttributeType( attributeName ); - break; - case VariableOrAttribute::Variable: - type = IO.VariableType( attributeName ); - break; + std::cerr << "[ADIOS2] Warning: Attribute with name " + << attributeName << " has no type in backend." + << std::endl; } - if( type.empty() ) + return Datatype::UNDEFINED; + } + else + { + Datatype basicType = fromADIOS2Type(type); + Extent shape = switchAdios2AttributeType( + basicType, IO, attributeName, voa); + + switch (voa) { - if( verbose ) + case VariableOrAttribute::Attribute: { + auto size = shape[0]; + Datatype openPmdType = size == 1 ? basicType + : size == 7 && basicType == Datatype::DOUBLE + ? Datatype::ARR_DBL_7 + : toVectorType(basicType); + return openPmdType; + } + case VariableOrAttribute::Variable: { + if (shape.size() == 0 || (shape.size() == 1 && shape[0] == 1)) { - std::cerr << "[ADIOS2] Warning: Attribute with name " - << attributeName << " has no type in backend." - << std::endl; + // global single value variable + return basicType; } - return Datatype::UNDEFINED; - } - else - { - Datatype basicType = fromADIOS2Type( type ); - Extent shape = switchAdios2AttributeType< AttributeInfo >( - basicType, IO, attributeName, voa ); - - switch( voa ) + else if (shape.size() == 1) { - case VariableOrAttribute::Attribute: - { - auto size = shape[ 0 ]; - Datatype openPmdType = size == 1 - ? basicType - : size == 7 && basicType == Datatype::DOUBLE - ? Datatype::ARR_DBL_7 - : toVectorType( basicType ); - return openPmdType; - } - case VariableOrAttribute::Variable: + auto size = shape[0]; + Datatype openPmdType = + size == 7 && basicType == Datatype::DOUBLE + ? Datatype::ARR_DBL_7 + : toVectorType(basicType); + return openPmdType; + } + else if ( + shape.size() == 2 && + (basicType == Datatype::CHAR || basicType == Datatype::UCHAR)) + { + return Datatype::VEC_STRING; + } + else + { + std::stringstream errorMsg; + errorMsg << "[ADIOS2] Unexpected shape for " << attributeName + << ": ["; + for (auto const ext : shape) { - if( shape.size() == 0 || - ( shape.size() == 1 && shape[ 0 ] == 1 ) ) - { - // global single value variable - return basicType; - } - else if( shape.size() == 1 ) - { - auto size = shape[ 0 ]; - Datatype openPmdType = - size == 7 && basicType == Datatype::DOUBLE - ? Datatype::ARR_DBL_7 - : toVectorType( basicType ); - return openPmdType; - } - else if( - shape.size() == 2 && - ( basicType == Datatype::CHAR || - basicType == Datatype::UCHAR ) ) - { - return Datatype::VEC_STRING; - } - else - { - std::stringstream errorMsg; - errorMsg << "[ADIOS2] Unexpected shape for " - << attributeName << ": ["; - for( auto const ext : shape ) - { - errorMsg << std::to_string( ext ) << ", "; - } - errorMsg << "] of type " - << datatypeToString( basicType ); - throw std::runtime_error( errorMsg.str() ); - } + errorMsg << std::to_string(ext) << ", "; } + errorMsg << "] of type " << datatypeToString(basicType); + throw std::runtime_error(errorMsg.str()); } - throw std::runtime_error( "Unreachable!" ); } + } + throw std::runtime_error("Unreachable!"); } +} } // namespace openPMD::detail #endif diff --git a/src/IO/ADIOS/ADIOS2IOHandler.cpp b/src/IO/ADIOS/ADIOS2IOHandler.cpp index a51ef9a9f8..5c0e4c4d9b 100644 --- a/src/IO/ADIOS/ADIOS2IOHandler.cpp +++ b/src/IO/ADIOS/ADIOS2IOHandler.cpp @@ -39,56 +39,53 @@ #include #include - namespace openPMD { #if openPMD_USE_VERIFY -#define VERIFY( CONDITION, TEXT ) \ +#define VERIFY(CONDITION, TEXT) \ { \ - if ( !( CONDITION ) ) \ - throw std::runtime_error( ( TEXT ) ); \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ } #else -#define VERIFY( CONDITION, TEXT ) \ +#define VERIFY(CONDITION, TEXT) \ do \ { \ - (void)sizeof( CONDITION ); \ - } while ( 0 ); + (void)sizeof(CONDITION); \ + } while (0); #endif -#define VERIFY_ALWAYS( CONDITION, TEXT ) \ +#define VERIFY_ALWAYS(CONDITION, TEXT) \ { \ - if ( !( CONDITION ) ) \ - throw std::runtime_error( ( TEXT ) ); \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ } #if openPMD_HAVE_ADIOS2 -# if openPMD_HAVE_MPI +#if openPMD_HAVE_MPI ADIOS2IOHandlerImpl::ADIOS2IOHandlerImpl( - AbstractIOHandler * handler, + AbstractIOHandler *handler, MPI_Comm communicator, json::TracingJSON cfg, - std::string engineType ) - : AbstractIOHandlerImplCommon( handler ) - , m_ADIOS{ communicator, ADIOS2_DEBUG_MODE } - , m_engineType( std::move( engineType ) ) + std::string engineType) + : AbstractIOHandlerImplCommon(handler) + , m_ADIOS{communicator, ADIOS2_DEBUG_MODE} + , m_engineType(std::move(engineType)) { - init( std::move( cfg ) ); + init(std::move(cfg)); } -# endif // openPMD_HAVE_MPI +#endif // openPMD_HAVE_MPI ADIOS2IOHandlerImpl::ADIOS2IOHandlerImpl( - AbstractIOHandler * handler, - json::TracingJSON cfg, - std::string engineType ) - : AbstractIOHandlerImplCommon( handler ) - , m_ADIOS{ ADIOS2_DEBUG_MODE } - , m_engineType( std::move( engineType ) ) + AbstractIOHandler *handler, json::TracingJSON cfg, std::string engineType) + : AbstractIOHandlerImplCommon(handler) + , m_ADIOS{ADIOS2_DEBUG_MODE} + , m_engineType(std::move(engineType)) { - init( std::move( cfg ) ); + init(std::move(cfg)); } ADIOS2IOHandlerImpl::~ADIOS2IOHandlerImpl() @@ -99,159 +96,154 @@ ADIOS2IOHandlerImpl::~ADIOS2IOHandlerImpl() * This means that destruction order is nondeterministic. * Let's determinize it (necessary if computing in parallel). */ - using file_t = std::unique_ptr< detail::BufferedActions >; - std::vector< file_t > sorted; - sorted.reserve( m_fileData.size() ); - for( auto & pair : m_fileData ) + using file_t = std::unique_ptr; + std::vector sorted; + sorted.reserve(m_fileData.size()); + for (auto &pair : m_fileData) { - sorted.push_back( std::move( pair.second ) ); + sorted.push_back(std::move(pair.second)); } m_fileData.clear(); std::sort( - sorted.begin(), - sorted.end(), - []( auto const & left, auto const & right ) { + sorted.begin(), sorted.end(), [](auto const &left, auto const &right) { return left->m_file <= right->m_file; - } ); + }); // run the destructors - for( auto & file : sorted ) + for (auto &file : sorted) { // std::unique_ptr interface file.reset(); } } -void -ADIOS2IOHandlerImpl::init( json::TracingJSON cfg ) +void ADIOS2IOHandlerImpl::init(json::TracingJSON cfg) { - if( cfg.json().contains( "adios2" ) ) + if (cfg.json().contains("adios2")) { - m_config = cfg[ "adios2" ]; + m_config = cfg["adios2"]; - if( m_config.json().contains( "schema" ) ) + if (m_config.json().contains("schema")) { - m_schema = - m_config[ "schema" ].json().get< ADIOS2Schema::schema_t >(); + m_schema = m_config["schema"].json().get(); } - if( m_config.json().contains( "use_span_based_put" ) ) + if (m_config.json().contains("use_span_based_put")) { m_useSpanBasedPutByDefault = - m_config[ "use_span_based_put" ].json().get< bool >() - ? UseSpan::Yes - : UseSpan::No; + m_config["use_span_based_put"].json().get() ? UseSpan::Yes + : UseSpan::No; } - auto engineConfig = config( ADIOS2Defaults::str_engine ); - if( !engineConfig.json().is_null() ) + auto engineConfig = config(ADIOS2Defaults::str_engine); + if (!engineConfig.json().is_null()) { auto engineTypeConfig = - config( ADIOS2Defaults::str_type, engineConfig ).json(); - if( !engineTypeConfig.is_null() ) + config(ADIOS2Defaults::str_type, engineConfig).json(); + if (!engineTypeConfig.is_null()) { // convert to string auto maybeEngine = - json::asLowerCaseStringDynamic( engineTypeConfig ); - if( maybeEngine.has_value() ) + json::asLowerCaseStringDynamic(engineTypeConfig); + if (maybeEngine.has_value()) { - m_engineType = std::move( maybeEngine.value() ); + m_engineType = std::move(maybeEngine.value()); } else { throw error::BackendConfigSchema( {"adios2", "engine", "type"}, - "Must be convertible to string type." ); + "Must be convertible to string type."); } } } auto operators = getOperators(); - if( operators ) + if (operators) { - defaultOperators = std::move( operators.value() ); + defaultOperators = std::move(operators.value()); } } // environment-variable based configuration - m_schema = auxiliary::getEnvNum( "OPENPMD2_ADIOS2_SCHEMA", m_schema ); + m_schema = auxiliary::getEnvNum("OPENPMD2_ADIOS2_SCHEMA", m_schema); } -std::optional< std::vector< ADIOS2IOHandlerImpl::ParameterizedOperator > > -ADIOS2IOHandlerImpl::getOperators( json::TracingJSON cfg ) +std::optional> +ADIOS2IOHandlerImpl::getOperators(json::TracingJSON cfg) { - using ret_t = std::optional< std::vector< ParameterizedOperator > >; - std::vector< ParameterizedOperator > res; - if( !cfg.json().contains( "dataset" ) ) + using ret_t = std::optional>; + std::vector res; + if (!cfg.json().contains("dataset")) { return ret_t(); } - auto datasetConfig = cfg[ "dataset" ]; - if( !datasetConfig.json().contains( "operators" ) ) + auto datasetConfig = cfg["dataset"]; + if (!datasetConfig.json().contains("operators")) { return ret_t(); } - auto _operators = datasetConfig[ "operators" ]; - nlohmann::json const & operators = _operators.json(); - for( auto operatorIterator = operators.begin(); + auto _operators = datasetConfig["operators"]; + nlohmann::json const &operators = _operators.json(); + for (auto operatorIterator = operators.begin(); operatorIterator != operators.end(); - ++operatorIterator ) + ++operatorIterator) { - nlohmann::json const & op = operatorIterator.value(); - std::string const & type = op[ "type" ]; + nlohmann::json const &op = operatorIterator.value(); + std::string const &type = op["type"]; adios2::Params adiosParams; - if( op.contains( "parameters" ) ) + if (op.contains("parameters")) { - nlohmann::json const & params = op[ "parameters" ]; - for( auto paramIterator = params.begin(); + nlohmann::json const ¶ms = op["parameters"]; + for (auto paramIterator = params.begin(); paramIterator != params.end(); - ++paramIterator ) + ++paramIterator) { - auto maybeString = - json::asStringDynamic( paramIterator.value() ); - if( maybeString.has_value() ) + auto maybeString = json::asStringDynamic(paramIterator.value()); + if (maybeString.has_value()) { - adiosParams[ paramIterator.key() ] = - std::move( maybeString.value() ); + adiosParams[paramIterator.key()] = + std::move(maybeString.value()); } else { throw error::BackendConfigSchema( - { "adios2", - "dataset", - "operators", - paramIterator.key() }, - "Must be convertible to string type." ); + {"adios2", "dataset", "operators", paramIterator.key()}, + "Must be convertible to string type."); } } } - std::optional< adios2::Operator > adiosOperator = - getCompressionOperator( type ); - if( adiosOperator ) + std::optional adiosOperator = + getCompressionOperator(type); + if (adiosOperator) { - res.emplace_back( ParameterizedOperator{ - adiosOperator.value(), std::move( adiosParams ) } ); + res.emplace_back(ParameterizedOperator{ + adiosOperator.value(), std::move(adiosParams)}); } } _operators.declareFullyRead(); - return std::make_optional( std::move( res ) ); + return std::make_optional(std::move(res)); } -std::optional< std::vector< ADIOS2IOHandlerImpl::ParameterizedOperator > > +std::optional> ADIOS2IOHandlerImpl::getOperators() { - return getOperators( m_config ); + return getOperators(m_config); } -std::string -ADIOS2IOHandlerImpl::fileSuffix() const +std::string ADIOS2IOHandlerImpl::fileSuffix() const { // SST engine adds its suffix unconditionally // so we don't add it - static std::map< std::string, std::string > endings{ - { "sst", "" }, { "staging", "" }, { "bp4", ".bp" }, { "bp5", ".bp" }, - { "bp3", ".bp" }, { "file", ".bp" }, { "hdf5", ".h5" }, - { "nullcore", ".nullcore" }, { "ssc", ".ssc" } - }; - auto it = endings.find( m_engineType ); - if( it != endings.end() ) + static std::map endings{ + {"sst", ""}, + {"staging", ""}, + {"bp4", ".bp"}, + {"bp5", ".bp"}, + {"bp3", ".bp"}, + {"file", ".bp"}, + {"hdf5", ".h5"}, + {"nullcore", ".nullcore"}, + {"ssc", ".ssc"}}; + auto it = endings.find(m_engineType); + if (it != endings.end()) { return it->second; } @@ -261,135 +253,133 @@ ADIOS2IOHandlerImpl::fileSuffix() const } } -std::future< void > -ADIOS2IOHandlerImpl::flush() +std::future ADIOS2IOHandlerImpl::flush() { auto res = AbstractIOHandlerImpl::flush(); - for ( auto & p : m_fileData ) + for (auto &p : m_fileData) { - if ( m_dirty.find( p.first ) != m_dirty.end( ) ) + if (m_dirty.find(p.first) != m_dirty.end()) { - p.second->flush( m_handler->m_flushLevel, /* writeAttributes = */ false ); + p.second->flush( + m_handler->m_flushLevel, /* writeAttributes = */ false); } else { - p.second->drop( ); + p.second->drop(); } } return res; } void ADIOS2IOHandlerImpl::createFile( - Writable * writable, - Parameter< Operation::CREATE_FILE > const & parameters ) + Writable *writable, Parameter const ¶meters) { - VERIFY_ALWAYS( m_handler->m_backendAccess != Access::READ_ONLY, - "[ADIOS2] Creating a file in read-only mode is not possible." ); + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[ADIOS2] Creating a file in read-only mode is not possible."); - if ( !writable->written ) + if (!writable->written) { std::string name = parameters.name; - std::string suffix( fileSuffix() ); - if( !auxiliary::ends_with( name, suffix ) ) + std::string suffix(fileSuffix()); + if (!auxiliary::ends_with(name, suffix)) { name += suffix; } - auto res_pair = getPossiblyExisting( name ); - InvalidatableFile shared_name = InvalidatableFile( name ); + auto res_pair = getPossiblyExisting(name); + InvalidatableFile shared_name = InvalidatableFile(name); VERIFY_ALWAYS( - !( m_handler->m_backendAccess == Access::READ_WRITE && - ( !std::get< PE_NewlyCreated >( res_pair ) || - auxiliary::file_exists( fullPath( - std::get< PE_InvalidatableFile >( res_pair ) ) ) ) ), - "[ADIOS2] Can only overwrite existing file in CREATE mode." ); + !(m_handler->m_backendAccess == Access::READ_WRITE && + (!std::get(res_pair) || + auxiliary::file_exists( + fullPath(std::get(res_pair))))), + "[ADIOS2] Can only overwrite existing file in CREATE mode."); - if ( !std::get< PE_NewlyCreated >( res_pair ) ) + if (!std::get(res_pair)) { - auto file = std::get< PE_InvalidatableFile >( res_pair ); - m_dirty.erase( file ); - dropFileData( file ); - file.invalidate( ); + auto file = std::get(res_pair); + m_dirty.erase(file); + dropFileData(file); + file.invalidate(); } - std::string const dir( m_handler->directory ); - if ( !auxiliary::directory_exists( dir ) ) + std::string const dir(m_handler->directory); + if (!auxiliary::directory_exists(dir)) { - auto success = auxiliary::create_directories( dir ); - VERIFY( success, "[ADIOS2] Could not create directory." ); + auto success = auxiliary::create_directories(dir); + VERIFY(success, "[ADIOS2] Could not create directory."); } m_iterationEncoding = parameters.encoding; - associateWithFile( writable, shared_name ); - this->m_dirty.emplace( shared_name ); - getFileData( shared_name, IfFileNotOpen::OpenImplicitly ).m_mode = + associateWithFile(writable, shared_name); + this->m_dirty.emplace(shared_name); + getFileData(shared_name, IfFileNotOpen::OpenImplicitly).m_mode = adios2::Mode::Write; // WORKAROUND // ADIOS2 does not yet implement ReadWrite Mode writable->written = true; - writable->abstractFilePosition = - std::make_shared< ADIOS2FilePosition >( ); + writable->abstractFilePosition = std::make_shared(); // enforce opening the file // lazy opening is deathly in parallel situations - getFileData( shared_name, IfFileNotOpen::OpenImplicitly ); + getFileData(shared_name, IfFileNotOpen::OpenImplicitly); } } void ADIOS2IOHandlerImpl::createPath( - Writable * writable, - const Parameter< Operation::CREATE_PATH > & parameters ) + Writable *writable, const Parameter ¶meters) { std::string path; - refreshFileFromParent( writable, /* preferParentFile = */ true ); + refreshFileFromParent(writable, /* preferParentFile = */ true); /* Sanitize path */ - if ( !auxiliary::starts_with( parameters.path, '/' ) ) + if (!auxiliary::starts_with(parameters.path, '/')) { - path = filePositionToString( setAndGetFilePosition( writable ) ) + "/" + - auxiliary::removeSlashes( parameters.path ); + path = filePositionToString(setAndGetFilePosition(writable)) + "/" + + auxiliary::removeSlashes(parameters.path); } else { - path = "/" + auxiliary::removeSlashes( parameters.path ); + path = "/" + auxiliary::removeSlashes(parameters.path); } /* ADIOS has no concept for explicitly creating paths. * They are implicitly created with the paths of variables/attributes. */ writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS2FilePosition >( - path, ADIOS2FilePosition::GD::GROUP ); + writable->abstractFilePosition = std::make_shared( + path, ADIOS2FilePosition::GD::GROUP); } void ADIOS2IOHandlerImpl::createDataset( - Writable * writable, - const Parameter< Operation::CREATE_DATASET > & parameters ) + Writable *writable, const Parameter ¶meters) { - if ( m_handler->m_backendAccess == Access::READ_ONLY ) + if (m_handler->m_backendAccess == Access::READ_ONLY) { - throw std::runtime_error( "[ADIOS2] Creating a dataset in a file opened as read " - "only is not possible." ); + throw std::runtime_error( + "[ADIOS2] Creating a dataset in a file opened as read " + "only is not possible."); } - if ( !writable->written ) + if (!writable->written) { /* Sanitize name */ - std::string name = auxiliary::removeSlashes( parameters.name ); + std::string name = auxiliary::removeSlashes(parameters.name); auto const file = - refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto filePos = setAndGetFilePosition( writable, name ); + refreshFileFromParent(writable, /* preferParentFile = */ false); + auto filePos = setAndGetFilePosition(writable, name); filePos->gd = ADIOS2FilePosition::GD::DATASET; - auto const varName = nameOfVariable( writable ); + auto const varName = nameOfVariable(writable); - std::vector< ParameterizedOperator > operators; - json::TracingJSON options = json::parseOptions( - parameters.options, /* considerFiles = */ false ); - if( options.json().contains( "adios2" ) ) + std::vector operators; + json::TracingJSON options = + json::parseOptions(parameters.options, /* considerFiles = */ false); + if (options.json().contains("adios2")) { - json::TracingJSON datasetConfig( options[ "adios2" ] ); - auto datasetOperators = getOperators( datasetConfig ); + json::TracingJSON datasetConfig(options["adios2"]); + auto datasetOperators = getOperators(datasetConfig); - operators = datasetOperators ? std::move( datasetOperators.value() ) + operators = datasetOperators ? std::move(datasetOperators.value()) : defaultOperators; } else @@ -400,17 +390,18 @@ void ADIOS2IOHandlerImpl::createDataset( options, "adios2", "Warning: parts of the backend configuration for ADIOS2 dataset '" + - varName + "' remain unused:\n" ); + varName + "' remain unused:\n"); // cast from openPMD::Extent to adios2::Dims - adios2::Dims const shape( parameters.extent.begin(), parameters.extent.end() ); + adios2::Dims const shape( + parameters.extent.begin(), parameters.extent.end()); - auto & fileData = getFileData( file, IfFileNotOpen::ThrowError ); - switchAdios2VariableType< detail::VariableDefiner >( - parameters.dtype, fileData.m_IO, varName, operators, shape ); + auto &fileData = getFileData(file, IfFileNotOpen::ThrowError); + switchAdios2VariableType( + parameters.dtype, fileData.m_IO, varName, operators, shape); fileData.invalidateVariablesMap(); writable->written = true; - m_dirty.emplace( file ); + m_dirty.emplace(file); } } @@ -418,93 +409,85 @@ namespace detail { struct DatasetExtender { - template< typename T, typename... Args > + template static void call( - adios2::IO & IO, - std::string const & variable, - Extent const & newShape ) + adios2::IO &IO, std::string const &variable, Extent const &newShape) { - auto var = IO.InquireVariable< T >( variable ); - if( !var ) + auto var = IO.InquireVariable(variable); + if (!var) { throw std::runtime_error( "[ADIOS2] Unable to retrieve variable for resizing: '" + - variable + "'." ); + variable + "'."); } adios2::Dims dims; - dims.reserve( newShape.size() ); - for( auto ext : newShape ) + dims.reserve(newShape.size()); + for (auto ext : newShape) { - dims.push_back( ext ); + dims.push_back(ext); } - var.SetShape( dims ); + var.SetShape(dims); } - static constexpr char const * errorMsg = "ADIOS2: extendDataset()"; + static constexpr char const *errorMsg = "ADIOS2: extendDataset()"; }; } // namespace detail -void -ADIOS2IOHandlerImpl::extendDataset( - Writable * writable, - const Parameter< Operation::EXTEND_DATASET > & parameters ) +void ADIOS2IOHandlerImpl::extendDataset( + Writable *writable, const Parameter ¶meters) { VERIFY_ALWAYS( m_handler->m_backendAccess != Access::READ_ONLY, - "[ADIOS2] Cannot extend datasets in read-only mode." ); - setAndGetFilePosition( writable ); - auto file = - refreshFileFromParent( writable, /* preferParentFile = */ false ); - std::string name = nameOfVariable( writable ); - auto & filedata = getFileData( file, IfFileNotOpen::ThrowError ); - Datatype dt = detail::fromADIOS2Type( filedata.m_IO.VariableType( name ) ); - switchAdios2VariableType< detail::DatasetExtender >( - dt, filedata.m_IO, name, parameters.extent ); + "[ADIOS2] Cannot extend datasets in read-only mode."); + setAndGetFilePosition(writable); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + std::string name = nameOfVariable(writable); + auto &filedata = getFileData(file, IfFileNotOpen::ThrowError); + Datatype dt = detail::fromADIOS2Type(filedata.m_IO.VariableType(name)); + switchAdios2VariableType( + dt, filedata.m_IO, name, parameters.extent); } -void -ADIOS2IOHandlerImpl::openFile( - Writable * writable, - const Parameter< Operation::OPEN_FILE > & parameters ) +void ADIOS2IOHandlerImpl::openFile( + Writable *writable, const Parameter ¶meters) { - if ( !auxiliary::directory_exists( m_handler->directory ) ) + if (!auxiliary::directory_exists(m_handler->directory)) { - throw no_such_file_error( "[ADIOS2] Supplied directory is not valid: " + - m_handler->directory ); + throw no_such_file_error( + "[ADIOS2] Supplied directory is not valid: " + + m_handler->directory); } std::string name = parameters.name; - std::string suffix( fileSuffix() ); - if( !auxiliary::ends_with( name, suffix ) ) + std::string suffix(fileSuffix()); + if (!auxiliary::ends_with(name, suffix)) { name += suffix; } - auto file = std::get< PE_InvalidatableFile >( getPossiblyExisting( name ) ); + auto file = std::get(getPossiblyExisting(name)); - associateWithFile( writable, file ); + associateWithFile(writable, file); writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS2FilePosition >( ); + writable->abstractFilePosition = std::make_shared(); m_iterationEncoding = parameters.encoding; // enforce opening the file // lazy opening is deathly in parallel situations - getFileData( file, IfFileNotOpen::OpenImplicitly ); + getFileData(file, IfFileNotOpen::OpenImplicitly); } -void -ADIOS2IOHandlerImpl::closeFile( - Writable * writable, - Parameter< Operation::CLOSE_FILE > const & ) +void ADIOS2IOHandlerImpl::closeFile( + Writable *writable, Parameter const &) { - auto fileIterator = m_files.find( writable ); - if ( fileIterator != m_files.end( ) ) + auto fileIterator = m_files.find(writable); + if (fileIterator != m_files.end()) { // do not invalidate the file // it still exists, it is just not open - auto it = m_fileData.find( fileIterator->second ); - if ( it != m_fileData.end( ) ) + auto it = m_fileData.find(fileIterator->second); + if (it != m_fileData.end()) { /* * No need to finalize unconditionally, destructor will take care @@ -512,250 +495,231 @@ ADIOS2IOHandlerImpl::closeFile( */ it->second->flush( FlushLevel::UserFlush, - []( detail::BufferedActions & ba, adios2::Engine & ) { + [](detail::BufferedActions &ba, adios2::Engine &) { ba.finalize(); }, /* writeAttributes = */ true, - /* flushUnconditionally = */ false ); - m_fileData.erase( it ); + /* flushUnconditionally = */ false); + m_fileData.erase(it); } } } void ADIOS2IOHandlerImpl::openPath( - Writable * writable, const Parameter< Operation::OPEN_PATH > & parameters ) + Writable *writable, const Parameter ¶meters) { /* Sanitize path */ - refreshFileFromParent( writable, /* preferParentFile = */ true ); + refreshFileFromParent(writable, /* preferParentFile = */ true); std::string prefix = - filePositionToString( setAndGetFilePosition( writable->parent ) ); - std::string suffix = auxiliary::removeSlashes( parameters.path ); - std::string infix = suffix.empty() || auxiliary::ends_with( prefix, '/' ) - ? "" - : "/"; + filePositionToString(setAndGetFilePosition(writable->parent)); + std::string suffix = auxiliary::removeSlashes(parameters.path); + std::string infix = + suffix.empty() || auxiliary::ends_with(prefix, '/') ? "" : "/"; /* ADIOS has no concept for explicitly creating paths. * They are implicitly created with the paths of variables/attributes. */ - writable->abstractFilePosition = std::make_shared< ADIOS2FilePosition >( - prefix + infix + suffix, ADIOS2FilePosition::GD::GROUP ); + writable->abstractFilePosition = std::make_shared( + prefix + infix + suffix, ADIOS2FilePosition::GD::GROUP); writable->written = true; } void ADIOS2IOHandlerImpl::openDataset( - Writable * writable, Parameter< Operation::OPEN_DATASET > & parameters ) + Writable *writable, Parameter ¶meters) { - auto name = auxiliary::removeSlashes( parameters.name ); + auto name = auxiliary::removeSlashes(parameters.name); writable->abstractFilePosition.reset(); - auto pos = setAndGetFilePosition( writable, name ); + auto pos = setAndGetFilePosition(writable, name); pos->gd = ADIOS2FilePosition::GD::DATASET; - auto file = - refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto varName = nameOfVariable( writable ); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + auto varName = nameOfVariable(writable); *parameters.dtype = - detail::fromADIOS2Type( getFileData( file, IfFileNotOpen::ThrowError ) - .m_IO.VariableType( varName ) ); - switchAdios2VariableType< detail::DatasetOpener >( - *parameters.dtype, - this, - file, - varName, - parameters ); + detail::fromADIOS2Type(getFileData(file, IfFileNotOpen::ThrowError) + .m_IO.VariableType(varName)); + switchAdios2VariableType( + *parameters.dtype, this, file, varName, parameters); writable->written = true; } void ADIOS2IOHandlerImpl::deleteFile( - Writable *, const Parameter< Operation::DELETE_FILE > & ) + Writable *, const Parameter &) { - throw std::runtime_error( "[ADIOS2] Backend does not support deletion." ); + throw std::runtime_error("[ADIOS2] Backend does not support deletion."); } void ADIOS2IOHandlerImpl::deletePath( - Writable *, const Parameter< Operation::DELETE_PATH > & ) + Writable *, const Parameter &) { - throw std::runtime_error( "[ADIOS2] Backend does not support deletion." ); + throw std::runtime_error("[ADIOS2] Backend does not support deletion."); } -void -ADIOS2IOHandlerImpl::deleteDataset( - Writable *, - const Parameter< Operation::DELETE_DATASET > & ) +void ADIOS2IOHandlerImpl::deleteDataset( + Writable *, const Parameter &) { // call filedata.invalidateVariablesMap - throw std::runtime_error( "[ADIOS2] Backend does not support deletion." ); + throw std::runtime_error("[ADIOS2] Backend does not support deletion."); } void ADIOS2IOHandlerImpl::deleteAttribute( - Writable *, const Parameter< Operation::DELETE_ATT > & ) + Writable *, const Parameter &) { // call filedata.invalidateAttributesMap - throw std::runtime_error( "[ADIOS2] Backend does not support deletion." ); + throw std::runtime_error("[ADIOS2] Backend does not support deletion."); } void ADIOS2IOHandlerImpl::writeDataset( - Writable * writable, - const Parameter< Operation::WRITE_DATASET > & parameters ) + Writable *writable, const Parameter ¶meters) { VERIFY_ALWAYS( m_handler->m_backendAccess != Access::READ_ONLY, - "[ADIOS2] Cannot write data in read-only mode." ); - setAndGetFilePosition( writable ); - auto file = - refreshFileFromParent( writable, /* preferParentFile = */ false ); - detail::BufferedActions & ba = - getFileData( file, IfFileNotOpen::ThrowError ); + "[ADIOS2] Cannot write data in read-only mode."); + setAndGetFilePosition(writable); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + detail::BufferedActions &ba = getFileData(file, IfFileNotOpen::ThrowError); detail::BufferedPut bp; - bp.name = nameOfVariable( writable ); + bp.name = nameOfVariable(writable); bp.param = parameters; - ba.enqueue( std::move( bp ) ); - m_dirty.emplace( std::move( file ) ); + ba.enqueue(std::move(bp)); + m_dirty.emplace(std::move(file)); writable->written = true; // TODO erst nach dem Schreiben? } void ADIOS2IOHandlerImpl::writeAttribute( - Writable * writable, const Parameter< Operation::WRITE_ATT > & parameters ) + Writable *writable, const Parameter ¶meters) { - switch( attributeLayout() ) - { - case AttributeLayout::ByAdiosAttributes: - switchType< detail::OldAttributeWriter >( - parameters.dtype, - this, - writable, - parameters ); - break; - case AttributeLayout::ByAdiosVariables: { - VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, - "[ADIOS2] Cannot write attribute in read-only mode." ); - auto pos = setAndGetFilePosition( writable ); - auto file = refreshFileFromParent( - writable, /* preferParentFile = */ false ); - auto fullName = nameOfAttribute( writable, parameters.name ); - auto prefix = filePositionToString( pos ); - - auto & filedata = getFileData( file, IfFileNotOpen::ThrowError ); - filedata.invalidateAttributesMap(); - m_dirty.emplace( std::move( file ) ); - - // this intentionally overwrites previous writes - auto & bufferedWrite = filedata.m_attributeWrites[ fullName ]; - bufferedWrite.name = fullName; - bufferedWrite.dtype = parameters.dtype; - bufferedWrite.resource = parameters.resource; - break; - } - default: - throw std::runtime_error( "Unreachable!" ); + switch (attributeLayout()) + { + case AttributeLayout::ByAdiosAttributes: + switchType( + parameters.dtype, this, writable, parameters); + break; + case AttributeLayout::ByAdiosVariables: { + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[ADIOS2] Cannot write attribute in read-only mode."); + auto pos = setAndGetFilePosition(writable); + auto file = + refreshFileFromParent(writable, /* preferParentFile = */ false); + auto fullName = nameOfAttribute(writable, parameters.name); + auto prefix = filePositionToString(pos); + + auto &filedata = getFileData(file, IfFileNotOpen::ThrowError); + filedata.invalidateAttributesMap(); + m_dirty.emplace(std::move(file)); + + // this intentionally overwrites previous writes + auto &bufferedWrite = filedata.m_attributeWrites[fullName]; + bufferedWrite.name = fullName; + bufferedWrite.dtype = parameters.dtype; + bufferedWrite.resource = parameters.resource; + break; + } + default: + throw std::runtime_error("Unreachable!"); } } void ADIOS2IOHandlerImpl::readDataset( - Writable * writable, Parameter< Operation::READ_DATASET > & parameters ) + Writable *writable, Parameter ¶meters) { - setAndGetFilePosition( writable ); - auto file = - refreshFileFromParent( writable, /* preferParentFile = */ false ); - detail::BufferedActions & ba = - getFileData( file, IfFileNotOpen::ThrowError ); + setAndGetFilePosition(writable); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + detail::BufferedActions &ba = getFileData(file, IfFileNotOpen::ThrowError); detail::BufferedGet bg; - bg.name = nameOfVariable( writable ); + bg.name = nameOfVariable(writable); bg.param = parameters; - ba.enqueue( std::move( bg ) ); - m_dirty.emplace( std::move( file ) ); + ba.enqueue(std::move(bg)); + m_dirty.emplace(std::move(file)); } namespace detail { -struct GetSpan -{ - template< typename T, typename... Args > - static void call( - ADIOS2IOHandlerImpl * impl, - Parameter< Operation::GET_BUFFER_VIEW > & params, - detail::BufferedActions & ba, - std::string const & varName ) - { - auto & IO = ba.m_IO; - auto & engine = ba.getEngine(); - adios2::Variable< T > variable = impl->verifyDataset< T >( - params.offset, params.extent, IO, varName ); - adios2::Dims offset( params.offset.begin(), params.offset.end() ); - adios2::Dims extent( params.extent.begin(), params.extent.end() ); - variable.SetSelection( { std::move( offset ), std::move( extent ) } ); - typename adios2::Variable< T >::Span span = engine.Put( variable ); - params.out->backendManagedBuffer = true; - /* - * SIC! - * Do not emplace span.data() yet. - * Only call span.data() as soon as the user needs the pointer - * (will always be propagated to the backend with parameters.update - * = true). - * This avoids repeated resizing of ADIOS2 internal buffers if calling - * multiple spans. - */ - // params.out->ptr = span.data(); - unsigned nextIndex; - if( ba.m_updateSpans.empty() ) - { - nextIndex = 0; - } - else - { - nextIndex = ba.m_updateSpans.rbegin()->first + 1; + struct GetSpan + { + template + static void call( + ADIOS2IOHandlerImpl *impl, + Parameter ¶ms, + detail::BufferedActions &ba, + std::string const &varName) + { + auto &IO = ba.m_IO; + auto &engine = ba.getEngine(); + adios2::Variable variable = impl->verifyDataset( + params.offset, params.extent, IO, varName); + adios2::Dims offset(params.offset.begin(), params.offset.end()); + adios2::Dims extent(params.extent.begin(), params.extent.end()); + variable.SetSelection({std::move(offset), std::move(extent)}); + typename adios2::Variable::Span span = engine.Put(variable); + params.out->backendManagedBuffer = true; + /* + * SIC! + * Do not emplace span.data() yet. + * Only call span.data() as soon as the user needs the pointer + * (will always be propagated to the backend with parameters.update + * = true). + * This avoids repeated resizing of ADIOS2 internal buffers if + * calling multiple spans. + */ + // params.out->ptr = span.data(); + unsigned nextIndex; + if (ba.m_updateSpans.empty()) + { + nextIndex = 0; + } + else + { + nextIndex = ba.m_updateSpans.rbegin()->first + 1; + } + params.out->viewIndex = nextIndex; + std::unique_ptr updateSpan{ + new UpdateSpan{std::move(span)}}; + ba.m_updateSpans.emplace_hint( + ba.m_updateSpans.end(), nextIndex, std::move(updateSpan)); } - params.out->viewIndex = nextIndex; - std::unique_ptr< I_UpdateSpan > updateSpan{ - new UpdateSpan< T >{ std::move( span ) } }; - ba.m_updateSpans.emplace_hint( - ba.m_updateSpans.end(), nextIndex, std::move( updateSpan ) ); - } - static constexpr char const * errorMsg = "ADIOS2: getBufferView()"; -}; + static constexpr char const *errorMsg = "ADIOS2: getBufferView()"; + }; -struct HasOperators -{ - template< typename T > - static bool call( std::string const & name, adios2::IO & IO ) + struct HasOperators { - adios2::Variable< T > variable = IO.InquireVariable< T >( name ); - if( !variable ) + template + static bool call(std::string const &name, adios2::IO &IO) { - return false; + adios2::Variable variable = IO.InquireVariable(name); + if (!variable) + { + return false; + } + return !variable.Operations().empty(); } - return !variable.Operations().empty(); - } - static constexpr char const * errorMsg = "ADIOS2: getBufferView()"; -}; + static constexpr char const *errorMsg = "ADIOS2: getBufferView()"; + }; } // namespace detail -void -ADIOS2IOHandlerImpl::getBufferView( - Writable * writable, - Parameter< Operation::GET_BUFFER_VIEW > & parameters ) +void ADIOS2IOHandlerImpl::getBufferView( + Writable *writable, Parameter ¶meters) { // @todo check access mode - if( m_engineType != "bp4" ) + if (m_engineType != "bp4") { parameters.out->backendManagedBuffer = false; return; } - setAndGetFilePosition( writable ); - auto file = refreshFileFromParent( writable, /* preferParentFile = */ false ); - detail::BufferedActions & ba = - getFileData( file, IfFileNotOpen::ThrowError ); + setAndGetFilePosition(writable); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + detail::BufferedActions &ba = getFileData(file, IfFileNotOpen::ThrowError); - std::string name = nameOfVariable( writable ); - switch( m_useSpanBasedPutByDefault ) + std::string name = nameOfVariable(writable); + switch (m_useSpanBasedPutByDefault) { case UseSpan::No: parameters.out->backendManagedBuffer = false; return; case UseSpan::Auto: - if( switchAdios2VariableType< detail::HasOperators >( - parameters.dtype, name, ba.m_IO ) ) + if (switchAdios2VariableType( + parameters.dtype, name, ba.m_IO)) { parameters.out->backendManagedBuffer = false; return; @@ -765,79 +729,74 @@ ADIOS2IOHandlerImpl::getBufferView( break; } - if( parameters.update ) + if (parameters.update) { detail::I_UpdateSpan &updater = - *ba.m_updateSpans.at( parameters.out->viewIndex ); + *ba.m_updateSpans.at(parameters.out->viewIndex); parameters.out->ptr = updater.update(); parameters.out->backendManagedBuffer = true; } else { - switchAdios2VariableType< detail::GetSpan >( - parameters.dtype, this, parameters, ba, name ); + switchAdios2VariableType( + parameters.dtype, this, parameters, ba, name); } } namespace detail { -template< typename T > -UpdateSpan< T >::UpdateSpan( adios2::detail::Span< T > span_in ) : - span( std::move( span_in ) ) -{ -} + template + UpdateSpan::UpdateSpan(adios2::detail::Span span_in) + : span(std::move(span_in)) + {} -template< typename T > -void *UpdateSpan< T >::update() -{ - return span.data(); -} + template + void *UpdateSpan::update() + { + return span.data(); + } } // namespace detail void ADIOS2IOHandlerImpl::readAttribute( - Writable * writable, Parameter< Operation::READ_ATT > & parameters ) + Writable *writable, Parameter ¶meters) { - auto file = refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto pos = setAndGetFilePosition( writable ); - detail::BufferedActions & ba = - getFileData( file, IfFileNotOpen::ThrowError ); - switch( attributeLayout() ) + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + auto pos = setAndGetFilePosition(writable); + detail::BufferedActions &ba = getFileData(file, IfFileNotOpen::ThrowError); + switch (attributeLayout()) { using AL = AttributeLayout; - case AL::ByAdiosAttributes: - { - detail::OldBufferedAttributeRead bar; - bar.name = nameOfAttribute( writable, parameters.name ); - bar.param = parameters; - ba.enqueue( std::move( bar ) ); - break; - } - case AL::ByAdiosVariables: - { - detail::BufferedAttributeRead bar; - bar.name = nameOfAttribute( writable, parameters.name ); - bar.param = parameters; - ba.m_attributeReads.push_back( std::move( bar ) ); - break; - } - default: - throw std::runtime_error( "Unreachable!" ); + case AL::ByAdiosAttributes: { + detail::OldBufferedAttributeRead bar; + bar.name = nameOfAttribute(writable, parameters.name); + bar.param = parameters; + ba.enqueue(std::move(bar)); + break; + } + case AL::ByAdiosVariables: { + detail::BufferedAttributeRead bar; + bar.name = nameOfAttribute(writable, parameters.name); + bar.param = parameters; + ba.m_attributeReads.push_back(std::move(bar)); + break; } - m_dirty.emplace( std::move( file ) ); + default: + throw std::runtime_error("Unreachable!"); + } + m_dirty.emplace(std::move(file)); } void ADIOS2IOHandlerImpl::listPaths( - Writable * writable, Parameter< Operation::LIST_PATHS > & parameters ) + Writable *writable, Parameter ¶meters) { VERIFY_ALWAYS( writable->written, "[ADIOS2] Internal error: Writable not marked written during path " - "listing" ); - auto file = - refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto pos = setAndGetFilePosition( writable ); - std::string myName = filePositionToString( pos ); - if ( !auxiliary::ends_with( myName, '/' ) ) + "listing"); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + auto pos = setAndGetFilePosition(writable); + std::string myName = filePositionToString(pos); + if (!auxiliary::ends_with(myName, '/')) { myName = myName + '/'; } @@ -846,10 +805,10 @@ void ADIOS2IOHandlerImpl::listPaths( * since ADIOS does not have a concept of paths, restore them * from variables and attributes. */ - auto & fileData = getFileData( file, IfFileNotOpen::ThrowError ); + auto &fileData = getFileData(file, IfFileNotOpen::ThrowError); fileData.requireActiveStep(); - std::unordered_set< std::string > subdirs; + std::unordered_set subdirs; /* * When reading an attribute, we cannot distinguish * whether its containing "folder" is a group or a @@ -862,101 +821,99 @@ void ADIOS2IOHandlerImpl::listPaths( * from variables – attributes don't even need to be * inspected. */ - std::vector< std::string > delete_me; + std::vector delete_me; - switch( attributeLayout() ) + switch (attributeLayout()) { using AL = AttributeLayout; - case AL::ByAdiosVariables: + case AL::ByAdiosVariables: { + std::vector vars = + fileData.availableVariablesPrefixed(myName); + for (auto var : vars) { - std::vector< std::string > vars = - fileData.availableVariablesPrefixed( myName ); - for( auto var : vars ) + // since current Writable is a group and no dataset, + // var == "__data__" is not possible + if (auxiliary::ends_with(var, "/__data__")) { - // since current Writable is a group and no dataset, - // var == "__data__" is not possible - if( auxiliary::ends_with( var, "/__data__" ) ) + // here be datasets + var = auxiliary::replace_last(var, "/__data__", ""); + auto firstSlash = var.find_first_of('/'); + if (firstSlash != std::string::npos) { - // here be datasets - var = auxiliary::replace_last( var, "/__data__", "" ); - auto firstSlash = var.find_first_of( '/' ); - if( firstSlash != std::string::npos ) - { - var = var.substr( 0, firstSlash ); - subdirs.emplace( std::move( var ) ); - } - else - { // var is a dataset at the current level - delete_me.push_back( std::move( var ) ); - } + var = var.substr(0, firstSlash); + subdirs.emplace(std::move(var)); } else + { // var is a dataset at the current level + delete_me.push_back(std::move(var)); + } + } + else + { + // here be attributes + auto firstSlash = var.find_first_of('/'); + if (firstSlash != std::string::npos) { - // here be attributes - auto firstSlash = var.find_first_of( '/' ); - if( firstSlash != std::string::npos ) - { - var = var.substr( 0, firstSlash ); - subdirs.emplace( std::move( var ) ); - } + var = var.substr(0, firstSlash); + subdirs.emplace(std::move(var)); } } - break; } - case AL::ByAdiosAttributes: + break; + } + case AL::ByAdiosAttributes: { + std::vector vars = + fileData.availableVariablesPrefixed(myName); + for (auto var : vars) { - std::vector< std::string > vars = - fileData.availableVariablesPrefixed( myName ); - for( auto var : vars ) + auto firstSlash = var.find_first_of('/'); + if (firstSlash != std::string::npos) { - auto firstSlash = var.find_first_of( '/' ); - if( firstSlash != std::string::npos ) - { - var = var.substr( 0, firstSlash ); - subdirs.emplace( std::move( var ) ); - } - else - { // var is a dataset at the current level - delete_me.push_back( std::move( var ) ); - } + var = var.substr(0, firstSlash); + subdirs.emplace(std::move(var)); } - std::vector< std::string > attributes = - fileData.availableAttributesPrefixed( myName ); - for( auto attr : attributes ) + else + { // var is a dataset at the current level + delete_me.push_back(std::move(var)); + } + } + std::vector attributes = + fileData.availableAttributesPrefixed(myName); + for (auto attr : attributes) + { + auto firstSlash = attr.find_first_of('/'); + if (firstSlash != std::string::npos) { - auto firstSlash = attr.find_first_of( '/' ); - if( firstSlash != std::string::npos ) - { - attr = attr.substr( 0, firstSlash ); - subdirs.emplace( std::move( attr ) ); - } + attr = attr.substr(0, firstSlash); + subdirs.emplace(std::move(attr)); } - break; } + break; + } } - for ( auto & d : delete_me ) + for (auto &d : delete_me) { - subdirs.erase( d ); + subdirs.erase(d); } - for ( auto & path : subdirs ) + for (auto &path : subdirs) { - parameters.paths->emplace_back( std::move( path ) ); + parameters.paths->emplace_back(std::move(path)); } } void ADIOS2IOHandlerImpl::listDatasets( - Writable * writable, Parameter< Operation::LIST_DATASETS > & parameters ) + Writable *writable, Parameter ¶meters) { VERIFY_ALWAYS( writable->written, "[ADIOS2] Internal error: Writable not marked written during path " - "listing" ); - auto file = refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto pos = setAndGetFilePosition( writable ); + "listing"); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + auto pos = setAndGetFilePosition(writable); // adios2::Engine & engine = getEngine( file ); - std::string myName = filePositionToString( pos ); - if ( !auxiliary::ends_with( myName, '/' ) ) + std::string myName = filePositionToString(pos); + if (!auxiliary::ends_with(myName, '/')) { myName = myName + '/'; } @@ -966,152 +923,146 @@ void ADIOS2IOHandlerImpl::listDatasets( * from variables and attributes. */ - auto & fileData = getFileData( file, IfFileNotOpen::ThrowError ); + auto &fileData = getFileData(file, IfFileNotOpen::ThrowError); fileData.requireActiveStep(); - std::unordered_set< std::string > subdirs; - for( auto var : fileData.availableVariablesPrefixed( myName ) ) + std::unordered_set subdirs; + for (auto var : fileData.availableVariablesPrefixed(myName)) { - if( attributeLayout() == AttributeLayout::ByAdiosVariables ) + if (attributeLayout() == AttributeLayout::ByAdiosVariables) { // since current Writable is a group and no dataset, // var == "__data__" is not possible - if( !auxiliary::ends_with( var, "/__data__" ) ) + if (!auxiliary::ends_with(var, "/__data__")) { continue; } // variable is now definitely a dataset, let's strip the suffix - var = auxiliary::replace_last( var, "/__data__", "" ); + var = auxiliary::replace_last(var, "/__data__", ""); } // if string still contains a slash, variable is a dataset below the // current group // we only want datasets contained directly within the current group // let's ensure that - auto firstSlash = var.find_first_of( '/' ); - if( firstSlash == std::string::npos ) + auto firstSlash = var.find_first_of('/'); + if (firstSlash == std::string::npos) { - subdirs.emplace( std::move( var ) ); + subdirs.emplace(std::move(var)); } } - for( auto & dataset : subdirs ) + for (auto &dataset : subdirs) { - parameters.datasets->emplace_back( std::move( dataset ) ); + parameters.datasets->emplace_back(std::move(dataset)); } } void ADIOS2IOHandlerImpl::listAttributes( - Writable * writable, Parameter< Operation::LIST_ATTS > & parameters ) + Writable *writable, Parameter ¶meters) { VERIFY_ALWAYS( writable->written, "[ADIOS2] Internal error: Writable not marked " - "written during attribute writing" ); - auto file = refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto pos = setAndGetFilePosition( writable ); - auto attributePrefix = filePositionToString( pos ); - if ( attributePrefix == "/" ) + "written during attribute writing"); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + auto pos = setAndGetFilePosition(writable); + auto attributePrefix = filePositionToString(pos); + if (attributePrefix == "/") { attributePrefix = ""; } - auto & ba = getFileData( file, IfFileNotOpen::ThrowError ); + auto &ba = getFileData(file, IfFileNotOpen::ThrowError); ba.requireActiveStep(); // make sure that the attributes are present - std::vector< std::string > attrs; - switch( attributeLayout() ) + std::vector attrs; + switch (attributeLayout()) { using AL = AttributeLayout; - case AL::ByAdiosAttributes: - attrs = ba.availableAttributesPrefixed( attributePrefix ); - break; - case AL::ByAdiosVariables: - attrs = ba.availableVariablesPrefixed( attributePrefix ); - break; + case AL::ByAdiosAttributes: + attrs = ba.availableAttributesPrefixed(attributePrefix); + break; + case AL::ByAdiosVariables: + attrs = ba.availableVariablesPrefixed(attributePrefix); + break; } - for( auto & rawAttr : attrs ) + for (auto &rawAttr : attrs) { - if( attributeLayout() == AttributeLayout::ByAdiosVariables && - ( auxiliary::ends_with( rawAttr, "/__data__" ) || - rawAttr == "__data__" ) ) + if (attributeLayout() == AttributeLayout::ByAdiosVariables && + (auxiliary::ends_with(rawAttr, "/__data__") || + rawAttr == "__data__")) { continue; } - auto attr = auxiliary::removeSlashes( rawAttr ); - if( attr.find_last_of( '/' ) == std::string::npos ) + auto attr = auxiliary::removeSlashes(rawAttr); + if (attr.find_last_of('/') == std::string::npos) { - parameters.attributes->push_back( std::move( attr ) ); + parameters.attributes->push_back(std::move(attr)); } } } -void -ADIOS2IOHandlerImpl::advance( - Writable * writable, - Parameter< Operation::ADVANCE > & parameters ) +void ADIOS2IOHandlerImpl::advance( + Writable *writable, Parameter ¶meters) { - auto file = m_files[ writable ]; - auto & ba = getFileData( file, IfFileNotOpen::ThrowError ); - *parameters.status = ba.advance( parameters.mode ); + auto file = m_files[writable]; + auto &ba = getFileData(file, IfFileNotOpen::ThrowError); + *parameters.status = ba.advance(parameters.mode); } -void -ADIOS2IOHandlerImpl::closePath( - Writable * writable, - Parameter< Operation::CLOSE_PATH > const & ) +void ADIOS2IOHandlerImpl::closePath( + Writable *writable, Parameter const &) { VERIFY_ALWAYS( writable->written, - "[ADIOS2] Cannot close a path that has not been written yet." ); - if( m_handler->m_backendAccess == Access::READ_ONLY ) + "[ADIOS2] Cannot close a path that has not been written yet."); + if (m_handler->m_backendAccess == Access::READ_ONLY) { // nothing to do return; } - auto file = refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto & fileData = getFileData( file, IfFileNotOpen::ThrowError ); - if( !fileData.optimizeAttributesStreaming ) + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + auto &fileData = getFileData(file, IfFileNotOpen::ThrowError); + if (!fileData.optimizeAttributesStreaming) { return; } - auto position = setAndGetFilePosition( writable ); - auto const positionString = filePositionToString( position ); + auto position = setAndGetFilePosition(writable); + auto const positionString = filePositionToString(position); VERIFY( - !auxiliary::ends_with( positionString, '/' ), + !auxiliary::ends_with(positionString, '/'), "[ADIOS2] Position string has unexpected format. This is a bug " - "in the openPMD API." ); + "in the openPMD API."); - for( auto const & attr : - fileData.availableAttributesPrefixed( positionString ) ) + for (auto const &attr : + fileData.availableAttributesPrefixed(positionString)) { - fileData.m_IO.RemoveAttribute( positionString + '/' + attr ); + fileData.m_IO.RemoveAttribute(positionString + '/' + attr); } } void ADIOS2IOHandlerImpl::availableChunks( - Writable * writable, Parameter< Operation::AVAILABLE_CHUNKS > & parameters ) + Writable *writable, Parameter ¶meters) { - setAndGetFilePosition( writable ); - auto file = refreshFileFromParent( writable, /* preferParentFile = */ false ); - detail::BufferedActions & ba = - getFileData( file, IfFileNotOpen::ThrowError ); - std::string varName = nameOfVariable( writable ); + setAndGetFilePosition(writable); + auto file = refreshFileFromParent(writable, /* preferParentFile = */ false); + detail::BufferedActions &ba = getFileData(file, IfFileNotOpen::ThrowError); + std::string varName = nameOfVariable(writable); auto engine = ba.getEngine(); // make sure that data are present - auto datatype = detail::fromADIOS2Type( ba.m_IO.VariableType( varName ) ); - switchAdios2VariableType< detail::RetrieveBlocksInfo >( - datatype, parameters, ba.m_IO, engine, varName ); + auto datatype = detail::fromADIOS2Type(ba.m_IO.VariableType(varName)); + switchAdios2VariableType( + datatype, parameters, ba.m_IO, engine, varName); } -adios2::Mode -ADIOS2IOHandlerImpl::adios2AccessMode( std::string const & fullPath ) +adios2::Mode ADIOS2IOHandlerImpl::adios2AccessMode(std::string const &fullPath) { - switch ( m_handler->m_backendAccess ) + switch (m_handler->m_backendAccess) { case Access::CREATE: return adios2::Mode::Write; case Access::READ_ONLY: return adios2::Mode::Read; case Access::READ_WRITE: - if( auxiliary::directory_exists( fullPath ) || - auxiliary::file_exists( fullPath ) ) + if (auxiliary::directory_exists(fullPath) || + auxiliary::file_exists(fullPath)) { std::cerr << "ADIOS2 does currently not yet implement ReadWrite " "(Append) mode. " @@ -1131,137 +1082,130 @@ ADIOS2IOHandlerImpl::adios2AccessMode( std::string const & fullPath ) } json::TracingJSON ADIOS2IOHandlerImpl::nullvalue = { - nlohmann::json(), json::SupportedLanguages::JSON }; + nlohmann::json(), json::SupportedLanguages::JSON}; -std::string -ADIOS2IOHandlerImpl::filePositionToString( - std::shared_ptr< ADIOS2FilePosition > filepos ) +std::string ADIOS2IOHandlerImpl::filePositionToString( + std::shared_ptr filepos) { return filepos->location; } -std::shared_ptr< ADIOS2FilePosition > ADIOS2IOHandlerImpl::extendFilePosition( - std::shared_ptr< ADIOS2FilePosition > const & oldPos, std::string s ) +std::shared_ptr ADIOS2IOHandlerImpl::extendFilePosition( + std::shared_ptr const &oldPos, std::string s) { - auto path = filePositionToString( oldPos ); - if ( !auxiliary::ends_with( path, '/' ) && - !auxiliary::starts_with( s, '/' ) ) + auto path = filePositionToString(oldPos); + if (!auxiliary::ends_with(path, '/') && !auxiliary::starts_with(s, '/')) { path = path + "/"; } - else if ( auxiliary::ends_with( path, '/' ) && - auxiliary::starts_with( s, '/' ) ) + else if (auxiliary::ends_with(path, '/') && auxiliary::starts_with(s, '/')) { - path = auxiliary::replace_last( path, "/", "" ); + path = auxiliary::replace_last(path, "/", ""); } - return std::make_shared< ADIOS2FilePosition >( path + std::move( s ), - oldPos->gd ); + return std::make_shared( + path + std::move(s), oldPos->gd); } -std::optional< adios2::Operator > -ADIOS2IOHandlerImpl::getCompressionOperator( std::string const & compression ) +std::optional +ADIOS2IOHandlerImpl::getCompressionOperator(std::string const &compression) { adios2::Operator res; - auto it = m_operators.find( compression ); - if ( it == m_operators.end( ) ) + auto it = m_operators.find(compression); + if (it == m_operators.end()) { - try { - res = m_ADIOS.DefineOperator( compression, compression ); + try + { + res = m_ADIOS.DefineOperator(compression, compression); } - catch ( std::invalid_argument const & e ) + catch (std::invalid_argument const &e) { std::cerr << "Warning: ADIOS2 backend does not support compression " "method " << compression << ". Continuing without compression." - << "\nOriginal error: " << e.what() - << std::endl; - return std::optional< adios2::Operator >(); + << "\nOriginal error: " << e.what() << std::endl; + return std::optional(); } - catch(std::string const & s) + catch (std::string const &s) { std::cerr << "Warning: ADIOS2 backend does not support compression " "method " << compression << ". Continuing without compression." - << "\nOriginal error: " << s - << std::endl; - return std::optional< adios2::Operator >(); + << "\nOriginal error: " << s << std::endl; + return std::optional(); } - m_operators.emplace( compression, res ); + m_operators.emplace(compression, res); } else { res = it->second; } - return std::make_optional( adios2::Operator( res ) ); + return std::make_optional(adios2::Operator(res)); } -std::string -ADIOS2IOHandlerImpl::nameOfVariable( Writable * writable ) +std::string ADIOS2IOHandlerImpl::nameOfVariable(Writable *writable) { - auto filepos = setAndGetFilePosition( writable ); - auto res = filePositionToString( filepos ); - if( attributeLayout() == AttributeLayout::ByAdiosAttributes ) + auto filepos = setAndGetFilePosition(writable); + auto res = filePositionToString(filepos); + if (attributeLayout() == AttributeLayout::ByAdiosAttributes) { return res; } - switch( filepos->gd ) + switch (filepos->gd) { - case ADIOS2FilePosition::GD::GROUP: - return res; - case ADIOS2FilePosition::GD::DATASET: - if( auxiliary::ends_with( res, '/' ) ) - { - return res + "__data__"; - } - else - { - // By convention, this path should always be taken - // But let's be safe - return res + "/__data__"; - } - default: - throw std::runtime_error( "[ADIOS2IOHandlerImpl] Unreachable!" ); + case ADIOS2FilePosition::GD::GROUP: + return res; + case ADIOS2FilePosition::GD::DATASET: + if (auxiliary::ends_with(res, '/')) + { + return res + "__data__"; + } + else + { + // By convention, this path should always be taken + // But let's be safe + return res + "/__data__"; + } + default: + throw std::runtime_error("[ADIOS2IOHandlerImpl] Unreachable!"); } } -std::string ADIOS2IOHandlerImpl::nameOfAttribute( Writable * writable, - std::string attribute ) +std::string +ADIOS2IOHandlerImpl::nameOfAttribute(Writable *writable, std::string attribute) { - auto pos = setAndGetFilePosition( writable ); + auto pos = setAndGetFilePosition(writable); return filePositionToString( - extendFilePosition( pos, auxiliary::removeSlashes( attribute ) ) ); + extendFilePosition(pos, auxiliary::removeSlashes(attribute))); } -ADIOS2FilePosition::GD -ADIOS2IOHandlerImpl::groupOrDataset( Writable * writable ) +ADIOS2FilePosition::GD ADIOS2IOHandlerImpl::groupOrDataset(Writable *writable) { - return setAndGetFilePosition( writable )->gd; + return setAndGetFilePosition(writable)->gd; } detail::BufferedActions & -ADIOS2IOHandlerImpl::getFileData( InvalidatableFile file, IfFileNotOpen flag ) +ADIOS2IOHandlerImpl::getFileData(InvalidatableFile file, IfFileNotOpen flag) { VERIFY_ALWAYS( file.valid(), "[ADIOS2] Cannot retrieve file data for a file that has " - "been overwritten or deleted." ) - auto it = m_fileData.find( file ); - if( it == m_fileData.end() ) + "been overwritten or deleted.") + auto it = m_fileData.find(file); + if (it == m_fileData.end()) { - switch( flag ) + switch (flag) { case IfFileNotOpen::OpenImplicitly: { auto res = m_fileData.emplace( - std::move( file ), - std::make_unique< detail::BufferedActions >( *this, file ) ); + std::move(file), + std::make_unique(*this, file)); return *res.first->second; } case IfFileNotOpen::ThrowError: throw std::runtime_error( "[ADIOS2] Requested file has not been opened yet: " + - ( file.fileState ? file.fileState->name - : "Unknown file name" ) ); + (file.fileState ? file.fileState->name : "Unknown file name")); } } else @@ -1270,105 +1214,106 @@ ADIOS2IOHandlerImpl::getFileData( InvalidatableFile file, IfFileNotOpen flag ) } } -void ADIOS2IOHandlerImpl::dropFileData( InvalidatableFile file ) +void ADIOS2IOHandlerImpl::dropFileData(InvalidatableFile file) { - auto it = m_fileData.find( file ); - if ( it != m_fileData.end( ) ) + auto it = m_fileData.find(file); + if (it != m_fileData.end()) { - it->second->drop( ); - m_fileData.erase( it ); + it->second->drop(); + m_fileData.erase(it); } } -template < typename T > -adios2::Variable< T > -ADIOS2IOHandlerImpl::verifyDataset( Offset const & offset, - Extent const & extent, adios2::IO & IO, - std::string const & varName ) +template +adios2::Variable ADIOS2IOHandlerImpl::verifyDataset( + Offset const &offset, + Extent const &extent, + adios2::IO &IO, + std::string const &varName) { { - auto requiredType = adios2::GetType< T >( ); - auto actualType = IO.VariableType( varName ); + auto requiredType = adios2::GetType(); + auto actualType = IO.VariableType(varName); std::stringstream errorMessage; errorMessage << "[ADIOS2] Trying to access a dataset with wrong type (trying to " "access dataset with type " - << determineDatatype< T >() << ", but has type " - << detail::fromADIOS2Type( actualType, false ) << ")"; - VERIFY_ALWAYS( requiredType == actualType, errorMessage.str() ); + << determineDatatype() << ", but has type " + << detail::fromADIOS2Type(actualType, false) << ")"; + VERIFY_ALWAYS(requiredType == actualType, errorMessage.str()); } - adios2::Variable< T > var = IO.InquireVariable< T >( varName ); - VERIFY_ALWAYS( var.operator bool( ), - "[ADIOS2] Internal error: Failed opening ADIOS2 variable." ) + adios2::Variable var = IO.InquireVariable(varName); + VERIFY_ALWAYS( + var.operator bool(), + "[ADIOS2] Internal error: Failed opening ADIOS2 variable.") // TODO leave this check to ADIOS? - adios2::Dims shape = var.Shape( ); - auto actualDim = shape.size( ); + adios2::Dims shape = var.Shape(); + auto actualDim = shape.size(); { - auto requiredDim = extent.size( ); - VERIFY_ALWAYS( requiredDim == actualDim, - "[ADIOS2] Trying to access a dataset with wrong dimensionality " - "(trying to access dataset with dimensionality " + - std::to_string( requiredDim ) + - ", but has dimensionality " + - std::to_string( actualDim ) + ")" ) + auto requiredDim = extent.size(); + VERIFY_ALWAYS( + requiredDim == actualDim, + "[ADIOS2] Trying to access a dataset with wrong dimensionality " + "(trying to access dataset with dimensionality " + + std::to_string(requiredDim) + ", but has dimensionality " + + std::to_string(actualDim) + ")") } - for ( unsigned int i = 0; i < actualDim; i++ ) + for (unsigned int i = 0; i < actualDim; i++) { - VERIFY_ALWAYS( offset[i] + extent[i] <= shape[i], - "[ADIOS2] Dataset access out of bounds." ) + VERIFY_ALWAYS( + offset[i] + extent[i] <= shape[i], + "[ADIOS2] Dataset access out of bounds.") } - var.SetSelection({ - adios2::Dims(offset.begin(), offset.end()), - adios2::Dims(extent.begin(), extent.end()) - }); + var.SetSelection( + {adios2::Dims(offset.begin(), offset.end()), + adios2::Dims(extent.begin(), extent.end())}); return var; } namespace detail { - template< typename T > + template void DatasetReader::call( - ADIOS2IOHandlerImpl * impl, - detail::BufferedGet & bp, - adios2::IO & IO, - adios2::Engine & engine, - std::string const & fileName ) + ADIOS2IOHandlerImpl *impl, + detail::BufferedGet &bp, + adios2::IO &IO, + adios2::Engine &engine, + std::string const &fileName) { - adios2::Variable< T > var = impl->verifyDataset< T >( - bp.param.offset, bp.param.extent, IO, bp.name ); - if ( !var ) + adios2::Variable var = impl->verifyDataset( + bp.param.offset, bp.param.extent, IO, bp.name); + if (!var) { throw std::runtime_error( - "[ADIOS2] Failed retrieving ADIOS2 Variable with name '" + bp.name + - "' from file " + fileName + "." ); + "[ADIOS2] Failed retrieving ADIOS2 Variable with name '" + + bp.name + "' from file " + fileName + "."); } - auto ptr = std::static_pointer_cast< T >( bp.param.data ).get( ); - engine.Get( var, ptr ); + auto ptr = std::static_pointer_cast(bp.param.data).get(); + engine.Get(var, ptr); } - template< typename T > - Datatype - OldAttributeReader::call( - adios2::IO & IO, + template + Datatype OldAttributeReader::call( + adios2::IO &IO, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { /* * If we store an attribute of boolean type, we store an additional * attribute prefixed with '__is_boolean__' to indicate this information * that would otherwise be lost. Check whether this has been done. */ - using rep = AttributeTypes< bool >::rep; + using rep = AttributeTypes::rep; - if constexpr( std::is_same< T, rep >::value ) + if constexpr (std::is_same::value) { - auto attr = IO.InquireAttribute< rep >( name ); - if ( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { throw std::runtime_error( - "[ADIOS2] Internal error: Failed reading attribute '" + name + - "'." ); + "[ADIOS2] Internal error: Failed reading attribute '" + + name + "'."); } std::string metaAttr = @@ -1382,87 +1327,87 @@ namespace detail auto type = attributeInfo( IO, ADIOS2Defaults::str_isBooleanOldLayout + name, - /* verbose = */ false ); + /* verbose = */ false); - - if( type == determineDatatype< rep >() ) + if (type == determineDatatype()) { - auto meta = IO.InquireAttribute< rep >( metaAttr ); - if( meta.Data().size() == 1 && meta.Data()[ 0 ] == 1 ) + auto meta = IO.InquireAttribute(metaAttr); + if (meta.Data().size() == 1 && meta.Data()[0] == 1) { - *resource = bool_repr::fromRep( attr.Data()[ 0 ] ); - return determineDatatype< bool >(); + *resource = bool_repr::fromRep(attr.Data()[0]); + return determineDatatype(); } } - *resource = attr.Data()[ 0 ]; + *resource = attr.Data()[0]; } - else if constexpr( IsUnsupportedComplex_v< T > ) + else if constexpr (IsUnsupportedComplex_v) { throw std::runtime_error( "[ADIOS2] Internal error: no support for long double complex " - "attribute types" ); + "attribute types"); } - else if constexpr( auxiliary::IsVector_v< T > ) + else if constexpr (auxiliary::IsVector_v) { - auto attr = IO.InquireAttribute< typename T::value_type >( name ); - if ( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { throw std::runtime_error( - "[ADIOS2] Internal error: Failed reading attribute '" + name + "'." ); + "[ADIOS2] Internal error: Failed reading attribute '" + + name + "'."); } *resource = attr.Data(); } - else if constexpr( auxiliary::IsArray_v< T > ) + else if constexpr (auxiliary::IsArray_v) { - auto attr = IO.InquireAttribute< typename T::value_type >( name ); - if ( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { throw std::runtime_error( - "[ADIOS2] Internal error: Failed reading attribute '" + name + "'." ); + "[ADIOS2] Internal error: Failed reading attribute '" + + name + "'."); } - auto data = attr.Data( ); + auto data = attr.Data(); T res; - for ( size_t i = 0; i < data.size(); i++ ) + for (size_t i = 0; i < data.size(); i++) { res[i] = data[i]; } *resource = res; } - else if constexpr( std::is_same_v< T, bool > ) + else if constexpr (std::is_same_v) { throw std::runtime_error( - "Observed boolean attribute. ADIOS2 does not have these?" ); + "Observed boolean attribute. ADIOS2 does not have these?"); } else { - auto attr = IO.InquireAttribute< T >( name ); - if ( !attr ) + auto attr = IO.InquireAttribute(name); + if (!attr) { throw std::runtime_error( - "[ADIOS2] Internal error: Failed reading attribute '" + name + - "'." ); + "[ADIOS2] Internal error: Failed reading attribute '" + + name + "'."); } - *resource = attr.Data()[ 0 ]; + *resource = attr.Data()[0]; } - return determineDatatype< T >(); + return determineDatatype(); } - template< int n, typename... Params > - Datatype - OldAttributeReader::call( Params &&... ) + template + Datatype OldAttributeReader::call(Params &&...) { throw std::runtime_error( "[ADIOS2] Internal error: Unknown datatype while " - "trying to read an attribute." ); + "trying to read an attribute."); } - template< typename T > + template Datatype AttributeReader::call( - adios2::IO & IO, - detail::PreloadAdiosAttributes const & preloadedAttributes, + adios2::IO &IO, + detail::PreloadAdiosAttributes const &preloadedAttributes, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { /* * If we store an attribute of boolean type, we store an additional @@ -1470,7 +1415,7 @@ namespace detail * that would otherwise be lost. Check whether this has been done. */ using rep = AttributeTypes::rep; - if constexpr( std::is_same< T, rep >::value ) + if constexpr (std::is_same::value) { std::string metaAttr = ADIOS2Defaults::str_isBooleanNewLayout + name; @@ -1483,70 +1428,69 @@ namespace detail auto type = attributeInfo( IO, ADIOS2Defaults::str_isBooleanNewLayout + name, - /* verbose = */ false ); - if( type == determineDatatype< rep >() ) + /* verbose = */ false); + if (type == determineDatatype()) { - auto attr = IO.InquireAttribute< rep >( metaAttr ); + auto attr = IO.InquireAttribute(metaAttr); if (attr.Data().size() == 1 && attr.Data()[0] == 1) { - AttributeTypes< bool >::readAttribute( - preloadedAttributes, name, resource ); - return determineDatatype< bool >(); + AttributeTypes::readAttribute( + preloadedAttributes, name, resource); + return determineDatatype(); } } } - AttributeTypes< T >::readAttribute( - preloadedAttributes, name, resource ); - return determineDatatype< T >(); + AttributeTypes::readAttribute(preloadedAttributes, name, resource); + return determineDatatype(); } - template < int n, typename... Params > - Datatype AttributeReader::call( Params &&... ) + template + Datatype AttributeReader::call(Params &&...) { - throw std::runtime_error( "[ADIOS2] Internal error: Unknown datatype while " - "trying to read an attribute." ); + throw std::runtime_error( + "[ADIOS2] Internal error: Unknown datatype while " + "trying to read an attribute."); } - template< typename T > + template void OldAttributeWriter::call( - ADIOS2IOHandlerImpl * impl, - Writable * writable, - const Parameter< Operation::WRITE_ATT > & parameters ) + ADIOS2IOHandlerImpl *impl, + Writable *writable, + const Parameter ¶meters) { VERIFY_ALWAYS( impl->m_handler->m_backendAccess != Access::READ_ONLY, - "[ADIOS2] Cannot write attribute in read-only mode." ); - auto pos = impl->setAndGetFilePosition( writable ); - auto file = impl->refreshFileFromParent( writable, /* preferParentFile = */ false ); - auto fullName = impl->nameOfAttribute( writable, parameters.name ); - auto prefix = impl->filePositionToString( pos ); - - auto & filedata = impl->getFileData( - file, ADIOS2IOHandlerImpl::IfFileNotOpen::ThrowError ); + "[ADIOS2] Cannot write attribute in read-only mode."); + auto pos = impl->setAndGetFilePosition(writable); + auto file = impl->refreshFileFromParent( + writable, /* preferParentFile = */ false); + auto fullName = impl->nameOfAttribute(writable, parameters.name); + auto prefix = impl->filePositionToString(pos); + + auto &filedata = impl->getFileData( + file, ADIOS2IOHandlerImpl::IfFileNotOpen::ThrowError); filedata.invalidateAttributesMap(); adios2::IO IO = filedata.m_IO; - impl->m_dirty.emplace( std::move( file ) ); + impl->m_dirty.emplace(std::move(file)); - std::string t = IO.AttributeType( fullName ); - if ( !t.empty( ) ) // an attribute is present <=> it has a type + std::string t = IO.AttributeType(fullName); + if (!t.empty()) // an attribute is present <=> it has a type { // don't overwrite attributes if they are equivalent // overwriting is only legal within the same step - auto attributeModifiable = [ &filedata, &fullName ]() { - auto it = filedata.uncommittedAttributes.find( fullName ); + auto attributeModifiable = [&filedata, &fullName]() { + auto it = filedata.uncommittedAttributes.find(fullName); return it != filedata.uncommittedAttributes.end(); }; - if( AttributeTypes< T >::attributeUnchanged( - IO, - fullName, - std::get< T >( parameters.resource ) ) ) + if (AttributeTypes::attributeUnchanged( + IO, fullName, std::get(parameters.resource))) { return; } - else if( attributeModifiable() ) + else if (attributeModifiable()) { - IO.RemoveAttribute( fullName ); + IO.RemoveAttribute(fullName); } else { @@ -1558,447 +1502,441 @@ namespace detail } else { - filedata.uncommittedAttributes.emplace( fullName ); + filedata.uncommittedAttributes.emplace(fullName); } - auto & value = std::get< T >( parameters.resource ); + auto &value = std::get(parameters.resource); - if constexpr( IsUnsupportedComplex_v< T > ) + if constexpr (IsUnsupportedComplex_v) { throw std::runtime_error( "[ADIOS2] Internal error: no support for long double complex " - "attribute types" ); + "attribute types"); } - else if constexpr( auxiliary::IsVector_v< T > ) + else if constexpr (auxiliary::IsVector_v) { - auto attr = IO.DefineAttribute( fullName, value.data(), value.size() ); - if( !attr ) + auto attr = + IO.DefineAttribute(fullName, value.data(), value.size()); + if (!attr) { throw std::runtime_error( - "[ADIOS2] Internal error: Failed defining attribute '" + fullName + - "'." ); + "[ADIOS2] Internal error: Failed defining attribute '" + + fullName + "'."); } } - else if constexpr( auxiliary::IsArray_v< T > ) + else if constexpr (auxiliary::IsArray_v) { - auto attr = IO.DefineAttribute( fullName, value.data(), value.size() ); - if( !attr ) + auto attr = + IO.DefineAttribute(fullName, value.data(), value.size()); + if (!attr) { throw std::runtime_error( - "[ADIOS2] Internal error: Failed defining attribute '" + fullName + - "'." ); + "[ADIOS2] Internal error: Failed defining attribute '" + + fullName + "'."); } } - else if constexpr( std::is_same_v< T, bool > ) + else if constexpr (std::is_same_v) { - IO.DefineAttribute< bool_representation >( - ADIOS2Defaults::str_isBooleanOldLayout + fullName, 1 ); - auto representation = bool_repr::toRep( value ); - auto attr = IO.DefineAttribute( fullName, representation ); - if( !attr ) + IO.DefineAttribute( + ADIOS2Defaults::str_isBooleanOldLayout + fullName, 1); + auto representation = bool_repr::toRep(value); + auto attr = IO.DefineAttribute(fullName, representation); + if (!attr) { throw std::runtime_error( - "[ADIOS2] Internal error: Failed defining attribute '" + fullName + - "'." ); + "[ADIOS2] Internal error: Failed defining attribute '" + + fullName + "'."); } } else { - auto attr = IO.DefineAttribute( fullName, value ); - if( !attr ) + auto attr = IO.DefineAttribute(fullName, value); + if (!attr) { throw std::runtime_error( - "[ADIOS2] Internal error: Failed defining attribute '" + fullName + - "'." ); + "[ADIOS2] Internal error: Failed defining attribute '" + + fullName + "'."); } } } - template< int n, typename... Params > - void - OldAttributeWriter::call( Params &&... ) + template + void OldAttributeWriter::call(Params &&...) { throw std::runtime_error( "[ADIOS2] Internal error: Unknown datatype while " - "trying to write an attribute." ); + "trying to write an attribute."); } - template< typename T > + template void AttributeWriter::call( - detail::BufferedAttributeWrite & params, BufferedActions & fileData ) + detail::BufferedAttributeWrite ¶ms, BufferedActions &fileData) { - AttributeTypes< T >::createAttribute( + AttributeTypes::createAttribute( fileData.m_IO, fileData.requireActiveStep(), params, - std::get< T >( params.resource ) ); + std::get(params.resource)); } - template< int n, typename... Params > - void AttributeWriter::call( Params &&... ) + template + void AttributeWriter::call(Params &&...) { - throw std::runtime_error( "[ADIOS2] Internal error: Unknown datatype while " - "trying to write an attribute." ); + throw std::runtime_error( + "[ADIOS2] Internal error: Unknown datatype while " + "trying to write an attribute."); } - template< typename T > + template void DatasetOpener::call( - ADIOS2IOHandlerImpl * impl, + ADIOS2IOHandlerImpl *impl, InvalidatableFile file, - const std::string & varName, - Parameter< Operation::OPEN_DATASET > & parameters ) + const std::string &varName, + Parameter ¶meters) { - auto & fileData = impl->getFileData( - file, ADIOS2IOHandlerImpl::IfFileNotOpen::ThrowError ); + auto &fileData = impl->getFileData( + file, ADIOS2IOHandlerImpl::IfFileNotOpen::ThrowError); fileData.requireActiveStep(); - auto & IO = fileData.m_IO; - adios2::Variable< T > var = IO.InquireVariable< T >( varName ); - if( !var ) + auto &IO = fileData.m_IO; + adios2::Variable var = IO.InquireVariable(varName); + if (!var) { throw std::runtime_error( - "[ADIOS2] Failed retrieving ADIOS2 Variable with name '" + varName + - "' from file " + *file + "." ); + "[ADIOS2] Failed retrieving ADIOS2 Variable with name '" + + varName + "' from file " + *file + "."); } // Operators in reading needed e.g. for setting decompression threads - for( auto const & operation : impl->defaultOperators ) + for (auto const &operation : impl->defaultOperators) { - if( operation.op ) + if (operation.op) { - var.AddOperation( operation.op, operation.params ); + var.AddOperation(operation.op, operation.params); } } // cast from adios2::Dims to openPMD::Extent auto const shape = var.Shape(); parameters.extent->clear(); - parameters.extent->reserve( shape.size() ); - std::copy( shape.begin(), shape.end(), std::back_inserter(*parameters.extent) ); + parameters.extent->reserve(shape.size()); + std::copy( + shape.begin(), shape.end(), std::back_inserter(*parameters.extent)); } - template< typename T > + template void WriteDataset::call( - ADIOS2IOHandlerImpl * impl, - detail::BufferedPut & bp, - adios2::IO & IO, - adios2::Engine & engine ) + ADIOS2IOHandlerImpl *impl, + detail::BufferedPut &bp, + adios2::IO &IO, + adios2::Engine &engine) { VERIFY_ALWAYS( impl->m_handler->m_backendAccess != Access::READ_ONLY, - "[ADIOS2] Cannot write data in read-only mode." ); + "[ADIOS2] Cannot write data in read-only mode."); - auto ptr = std::static_pointer_cast< const T >( bp.param.data ).get(); + auto ptr = std::static_pointer_cast(bp.param.data).get(); - adios2::Variable< T > var = impl->verifyDataset< T >( - bp.param.offset, bp.param.extent, IO, bp.name ); + adios2::Variable var = impl->verifyDataset( + bp.param.offset, bp.param.extent, IO, bp.name); - engine.Put( var, ptr ); + engine.Put(var, ptr); } - template < int n, typename... Params > - void WriteDataset::call( Params &&... ) + template + void WriteDataset::call(Params &&...) { - throw std::runtime_error( "[ADIOS2] WRITE_DATASET: Invalid datatype." ); + throw std::runtime_error("[ADIOS2] WRITE_DATASET: Invalid datatype."); } - template< typename T > + template void VariableDefiner::call( - adios2::IO & IO, - std::string const & name, - std::vector< ADIOS2IOHandlerImpl::ParameterizedOperator > const & - compressions, - adios2::Dims const & shape, - adios2::Dims const & start, - adios2::Dims const & count, - bool const constantDims ) + adios2::IO &IO, + std::string const &name, + std::vector const + &compressions, + adios2::Dims const &shape, + adios2::Dims const &start, + adios2::Dims const &count, + bool const constantDims) { /* * Step/Variable-based iteration layout: * The variable may already be defined from a previous step, * so check if it's already here. */ - adios2::Variable< T > var = IO.InquireVariable< T >( name ); - if( !var ) + adios2::Variable var = IO.InquireVariable(name); + if (!var) { - var = IO.DefineVariable< T >( - name, shape, start, count, constantDims ); + var = IO.DefineVariable(name, shape, start, count, constantDims); } else { - var.SetShape( shape ); - if( count.size() > 0 ) + var.SetShape(shape); + if (count.size() > 0) { - var.SetSelection( { start, count } ); + var.SetSelection({start, count}); } // don't add compression operators multiple times return; } - if( !var ) + if (!var) { throw std::runtime_error( - "[ADIOS2] Internal error: Could not create Variable '" + name + "'." ); + "[ADIOS2] Internal error: Could not create Variable '" + name + + "'."); } - for( auto const & compression : compressions ) + for (auto const &compression : compressions) { - if( compression.op ) + if (compression.op) { - var.AddOperation( compression.op, compression.params ); + var.AddOperation(compression.op, compression.params); } } } - template < typename T > + template void RetrieveBlocksInfo::call( - Parameter< Operation::AVAILABLE_CHUNKS > & params, - adios2::IO & IO, - adios2::Engine & engine, - std::string const & varName ) + Parameter ¶ms, + adios2::IO &IO, + adios2::Engine &engine, + std::string const &varName) { - auto var = IO.InquireVariable< T >( varName ); - auto blocksInfo = engine.BlocksInfo< T >( var, engine.CurrentStep() ); - auto & table = *params.chunks; - table.reserve( blocksInfo.size() ); - for( auto const & info : blocksInfo ) + auto var = IO.InquireVariable(varName); + auto blocksInfo = engine.BlocksInfo(var, engine.CurrentStep()); + auto &table = *params.chunks; + table.reserve(blocksInfo.size()); + for (auto const &info : blocksInfo) { Offset offset; Extent extent; auto size = info.Start.size(); - offset.reserve( size ); - extent.reserve( size ); - for( unsigned i = 0; i < size; ++i ) + offset.reserve(size); + extent.reserve(size); + for (unsigned i = 0; i < size; ++i) { - offset.push_back( info.Start[ i ] ); - extent.push_back( info.Count[ i ] ); + offset.push_back(info.Start[i]); + extent.push_back(info.Count[i]); } table.emplace_back( - std::move( offset ), std::move( extent ), info.WriterID ); + std::move(offset), std::move(extent), info.WriterID); } } - template < int n, typename... Args > - void RetrieveBlocksInfo::call( Args&&... ) + template + void RetrieveBlocksInfo::call(Args &&...) { // variable has not been found, so we don't fill in any blocks } - template< typename T > - void - AttributeTypes< T >::createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const T value ) + template + void AttributeTypes::createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const T value) { - auto attr = IO.InquireVariable< T >( params.name ); + auto attr = IO.InquireVariable(params.name); // @todo check size - if( !attr ) + if (!attr) { // std::cout << "DATATYPE OF " << name << ": " // << IO.VariableType( name ) << std::endl; - attr = IO.DefineVariable< T >( params.name ); + attr = IO.DefineVariable(params.name); } - if( !attr ) + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed defining variable '" + - params.name + "'." ); + params.name + "'."); } - engine.Put( attr, value, adios2::Mode::Deferred ); + engine.Put(attr, value, adios2::Mode::Deferred); } - template< typename T > - void - AttributeTypes< T >::readAttribute( - detail::PreloadAdiosAttributes const & preloadedAttributes, + template + void AttributeTypes::readAttribute( + detail::PreloadAdiosAttributes const &preloadedAttributes, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { - detail::AttributeWithShape< T > attr = - preloadedAttributes.getAttribute< T >( name ); - if( !( attr.shape.size() == 0 || - ( attr.shape.size() == 1 && attr.shape[ 0 ] == 1 ) ) ) + detail::AttributeWithShape attr = + preloadedAttributes.getAttribute(name); + if (!(attr.shape.size() == 0 || + (attr.shape.size() == 1 && attr.shape[0] == 1))) { throw std::runtime_error( "[ADIOS2] Expecting scalar ADIOS variable, got " + - std::to_string( attr.shape.size() ) + "D: " + name ); + std::to_string(attr.shape.size()) + "D: " + name); } *resource = *attr.data; } - template < typename T > - void - AttributeTypes< std::vector< T > >::createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const std::vector< T > & value ) + template + void AttributeTypes>::createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const std::vector &value) { auto size = value.size(); - auto attr = IO.InquireVariable< T >( params.name ); + auto attr = IO.InquireVariable(params.name); // @todo check size - if( !attr ) + if (!attr) { - attr = IO.DefineVariable< T >( - params.name, { size }, { 0 }, { size } ); + attr = IO.DefineVariable(params.name, {size}, {0}, {size}); } - if( !attr ) + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed defining variable '" + - params.name + "'." ); + params.name + "'."); } - engine.Put( attr, value.data(), adios2::Mode::Deferred ); + engine.Put(attr, value.data(), adios2::Mode::Deferred); } - template< typename T > - void - AttributeTypes< std::vector< T > >::readAttribute( - detail::PreloadAdiosAttributes const & preloadedAttributes, + template + void AttributeTypes>::readAttribute( + detail::PreloadAdiosAttributes const &preloadedAttributes, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { - detail::AttributeWithShape< T > attr = - preloadedAttributes.getAttribute< T >( name ); - if( attr.shape.size() != 1 ) + detail::AttributeWithShape attr = + preloadedAttributes.getAttribute(name); + if (attr.shape.size() != 1) { - throw std::runtime_error( "[ADIOS2] Expecting 1D ADIOS variable" ); + throw std::runtime_error("[ADIOS2] Expecting 1D ADIOS variable"); } - std::vector< T > res( attr.shape[ 0 ] ); - std::copy_n( attr.data, attr.shape[ 0 ], res.data() ); - *resource = std::move( res ); + std::vector res(attr.shape[0]); + std::copy_n(attr.data, attr.shape[0], res.data()); + *resource = std::move(res); } - void - AttributeTypes< std::vector< std::string > >::createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const std::vector< std::string > & vec ) + void AttributeTypes>::createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const std::vector &vec) { size_t width = 0; - for( auto const & str : vec ) + for (auto const &str : vec) { - width = std::max( width, str.size() ); + width = std::max(width, str.size()); } ++width; // null delimiter size_t const height = vec.size(); - auto attr = IO.InquireVariable< char >( params.name ); + auto attr = IO.InquireVariable(params.name); // @todo check size - if( !attr ) + if (!attr) { - attr = IO.DefineVariable< char >( - params.name, { height, width }, { 0, 0 }, { height, width } ); + attr = IO.DefineVariable( + params.name, {height, width}, {0, 0}, {height, width}); } - if( !attr ) + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed defining variable '" + - params.name + "'." ); + params.name + "'."); } // write this thing to the params, so we don't get a use after free // due to deferred writing - params.bufferForVecString = std::vector< char >( width * height, 0 ); - for( size_t i = 0; i < height; ++i ) + params.bufferForVecString = std::vector(width * height, 0); + for (size_t i = 0; i < height; ++i) { size_t start = i * width; - std::string const & str = vec[ i ]; + std::string const &str = vec[i]; std::copy( str.begin(), str.end(), - params.bufferForVecString.begin() + start ); + params.bufferForVecString.begin() + start); } engine.Put( - attr, params.bufferForVecString.data(), adios2::Mode::Deferred ); + attr, params.bufferForVecString.data(), adios2::Mode::Deferred); } - void - AttributeTypes< std::vector< std::string > >::readAttribute( - detail::PreloadAdiosAttributes const & preloadedAttributes, + void AttributeTypes>::readAttribute( + detail::PreloadAdiosAttributes const &preloadedAttributes, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { /* * char_type parameter only for specifying the "template" type. */ - auto loadFromDatatype = - [ &preloadedAttributes, &name, &resource ]( auto char_type ) { - using char_t = decltype( char_type ); - detail::AttributeWithShape< char_t > attr = - preloadedAttributes.getAttribute< char_t >( name ); - if( attr.shape.size() != 2 ) - { - throw std::runtime_error( - "[ADIOS2] Expecting 2D ADIOS variable" ); - } - char_t const * loadedData = attr.data; - size_t height = attr.shape[ 0 ]; - size_t width = attr.shape[ 1 ]; + auto loadFromDatatype = [&preloadedAttributes, &name, &resource]( + auto char_type) { + using char_t = decltype(char_type); + detail::AttributeWithShape attr = + preloadedAttributes.getAttribute(name); + if (attr.shape.size() != 2) + { + throw std::runtime_error( + "[ADIOS2] Expecting 2D ADIOS variable"); + } + char_t const *loadedData = attr.data; + size_t height = attr.shape[0]; + size_t width = attr.shape[1]; - std::vector< std::string > res( height ); - if( std::is_signed< char >::value == - std::is_signed< char_t >::value ) + std::vector res(height); + if (std::is_signed::value == std::is_signed::value) + { + /* + * This branch is chosen if the signedness of the + * ADIOS variable corresponds with the signedness of the + * char type on the current platform. + * In this case, the C++ standard guarantees that the + * representations for char and (un)signed char are + * identical, reinterpret_cast-ing the loadedData to + * char in order to construct our strings will be fine. + */ + for (size_t i = 0; i < height; ++i) { - /* - * This branch is chosen if the signedness of the - * ADIOS variable corresponds with the signedness of the - * char type on the current platform. - * In this case, the C++ standard guarantees that the - * representations for char and (un)signed char are - * identical, reinterpret_cast-ing the loadedData to - * char in order to construct our strings will be fine. - */ - for( size_t i = 0; i < height; ++i ) + size_t start = i * width; + char const *start_ptr = + reinterpret_cast(loadedData + start); + size_t j = 0; + while (j < width && start_ptr[j] != 0) { - size_t start = i * width; - char const * start_ptr = - reinterpret_cast< char const * >( - loadedData + start ); - size_t j = 0; - while( j < width && start_ptr[ j ] != 0 ) - { - ++j; - } - std::string & str = res[ i ]; - str.append( start_ptr, start_ptr + j ); + ++j; } + std::string &str = res[i]; + str.append(start_ptr, start_ptr + j); } - else + } + else + { + /* + * This branch is chosen if the signedness of the + * ADIOS variable is different from the signedness of the + * char type on the current platform. + * In this case, we play it safe, and explicitly convert + * the loadedData to char pointwise. + */ + std::vector converted(width); + for (size_t i = 0; i < height; ++i) { - /* - * This branch is chosen if the signedness of the - * ADIOS variable is different from the signedness of the - * char type on the current platform. - * In this case, we play it safe, and explicitly convert - * the loadedData to char pointwise. - */ - std::vector< char > converted( width ); - for( size_t i = 0; i < height; ++i ) + size_t start = i * width; + auto const *start_ptr = loadedData + start; + size_t j = 0; + while (j < width && start_ptr[j] != 0) { - size_t start = i * width; - auto const * start_ptr = loadedData + start; - size_t j = 0; - while( j < width && start_ptr[ j ] != 0 ) - { - converted[ j ] = start_ptr[ j ]; - ++j; - } - std::string & str = res[ i ]; - str.append( converted.data(), converted.data() + j ); + converted[j] = start_ptr[j]; + ++j; } + std::string &str = res[i]; + str.append(converted.data(), converted.data() + j); } + } - *resource = res; - }; + *resource = res; + }; /* * If writing char variables in ADIOS2, they might become either int8_t * or uint8_t on disk depending on the platform. * So allow reading from both types. */ - switch( preloadedAttributes.attributeType( name ) ) + switch (preloadedAttributes.attributeType(name)) { /* * Workaround for two bugs at once: @@ -2008,179 +1946,163 @@ namespace detail */ case Datatype::CHAR: { using schar_t = signed char; - loadFromDatatype( schar_t{} ); + loadFromDatatype(schar_t{}); break; } case Datatype::UCHAR: { using uchar_t = unsigned char; - loadFromDatatype( uchar_t{} ); + loadFromDatatype(uchar_t{}); break; } default: { throw std::runtime_error( "[ADIOS2] Expecting 2D ADIOS variable of " - "type signed or unsigned char." ); + "type signed or unsigned char."); } } } - template< typename T, size_t n > - void - AttributeTypes< std::array< T, n > >::createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const std::array< T, n > & value ) + template + void AttributeTypes>::createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const std::array &value) { - auto attr = IO.InquireVariable< T >( params.name ); + auto attr = IO.InquireVariable(params.name); // @todo check size - if( !attr ) + if (!attr) { - attr = IO.DefineVariable< T >( params.name, { n }, { 0 }, { n } ); + attr = IO.DefineVariable(params.name, {n}, {0}, {n}); } - if( !attr ) + if (!attr) { throw std::runtime_error( "[ADIOS2] Internal error: Failed defining variable '" + - params.name + "'." ); + params.name + "'."); } - engine.Put( attr, value.data(), adios2::Mode::Deferred ); + engine.Put(attr, value.data(), adios2::Mode::Deferred); } - template< typename T, size_t n > - void - AttributeTypes< std::array< T, n > >::readAttribute( - detail::PreloadAdiosAttributes const & preloadedAttributes, + template + void AttributeTypes>::readAttribute( + detail::PreloadAdiosAttributes const &preloadedAttributes, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { - detail::AttributeWithShape< T > attr = - preloadedAttributes.getAttribute< T >( name ); - if( attr.shape.size() != 1 || attr.shape[ 0 ] != n ) + detail::AttributeWithShape attr = + preloadedAttributes.getAttribute(name); + if (attr.shape.size() != 1 || attr.shape[0] != n) { throw std::runtime_error( "[ADIOS2] Expecting 1D ADIOS variable of extent " + - std::to_string( n ) ); + std::to_string(n)); } - std::array< T, n > res; - std::copy_n( attr.data, n, res.data() ); - *resource = std::move( res ); + std::array res; + std::copy_n(attr.data, n, res.data()); + *resource = std::move(res); } - void - AttributeTypes< bool >::createAttribute( - adios2::IO & IO, - adios2::Engine & engine, - detail::BufferedAttributeWrite & params, - const bool value ) + void AttributeTypes::createAttribute( + adios2::IO &IO, + adios2::Engine &engine, + detail::BufferedAttributeWrite ¶ms, + const bool value) { - IO.DefineAttribute< bool_representation >( - ADIOS2Defaults::str_isBooleanNewLayout + params.name, 1 ); - AttributeTypes< bool_representation >::createAttribute( - IO, engine, params, toRep( value ) ); + IO.DefineAttribute( + ADIOS2Defaults::str_isBooleanNewLayout + params.name, 1); + AttributeTypes::createAttribute( + IO, engine, params, toRep(value)); } - void - AttributeTypes< bool >::readAttribute( - detail::PreloadAdiosAttributes const & preloadedAttributes, + void AttributeTypes::readAttribute( + detail::PreloadAdiosAttributes const &preloadedAttributes, std::string name, - std::shared_ptr< Attribute::resource > resource ) + std::shared_ptr resource) { - detail::AttributeWithShape< rep > attr = - preloadedAttributes.getAttribute< rep >( name ); - if( !( attr.shape.size() == 0 || - ( attr.shape.size() == 1 && attr.shape[ 0 ] == 1 ) ) ) + detail::AttributeWithShape attr = + preloadedAttributes.getAttribute(name); + if (!(attr.shape.size() == 0 || + (attr.shape.size() == 1 && attr.shape[0] == 1))) { throw std::runtime_error( "[ADIOS2] Expecting scalar ADIOS variable, got " + - std::to_string( attr.shape.size() ) + "D: " + name ); + std::to_string(attr.shape.size()) + "D: " + name); } - *resource = fromRep( *attr.data ); + *resource = fromRep(*attr.data); } - void BufferedGet::run( BufferedActions & ba ) + void BufferedGet::run(BufferedActions &ba) { - switchAdios2VariableType< detail::DatasetReader >( - param.dtype, - ba.m_impl, - *this, - ba.m_IO, - ba.getEngine(), - ba.m_file ); + switchAdios2VariableType( + param.dtype, ba.m_impl, *this, ba.m_IO, ba.getEngine(), ba.m_file); } - void - BufferedPut::run( BufferedActions & ba ) + void BufferedPut::run(BufferedActions &ba) { - switchAdios2VariableType< detail::WriteDataset >( - param.dtype, ba.m_impl, *this, ba.m_IO, ba.getEngine() ); + switchAdios2VariableType( + param.dtype, ba.m_impl, *this, ba.m_IO, ba.getEngine()); } - void - OldBufferedAttributeRead::run( BufferedActions & ba ) + void OldBufferedAttributeRead::run(BufferedActions &ba) { - auto type = attributeInfo( ba.m_IO, name, /* verbose = */ true ); + auto type = attributeInfo(ba.m_IO, name, /* verbose = */ true); - if( type == Datatype::UNDEFINED ) + if (type == Datatype::UNDEFINED) { throw std::runtime_error( "[ADIOS2] Requested attribute (" + name + - ") not found in backend." ); + ") not found in backend."); } - Datatype ret = switchType< detail::OldAttributeReader >( - type, ba.m_IO, name, param.resource ); + Datatype ret = switchType( + type, ba.m_IO, name, param.resource); *param.dtype = ret; } - void - BufferedAttributeRead::run( BufferedActions & ba ) + void BufferedAttributeRead::run(BufferedActions &ba) { auto type = attributeInfo( ba.m_IO, name, /* verbose = */ true, - VariableOrAttribute::Variable ); + VariableOrAttribute::Variable); - if( type == Datatype::UNDEFINED ) + if (type == Datatype::UNDEFINED) { throw std::runtime_error( "[ADIOS2] Requested attribute (" + name + - ") not found in backend." ); + ") not found in backend."); } - Datatype ret = switchType< detail::AttributeReader >( - type, - ba.m_IO, - ba.preloadAttributes, - name, - param.resource ); + Datatype ret = switchType( + type, ba.m_IO, ba.preloadAttributes, name, param.resource); *param.dtype = ret; } - void - BufferedAttributeWrite::run( BufferedActions & fileData ) + void BufferedAttributeWrite::run(BufferedActions &fileData) { - switchType< detail::AttributeWriter >( dtype, *this, fileData ); + switchType(dtype, *this, fileData); } BufferedActions::BufferedActions( - ADIOS2IOHandlerImpl & impl, InvalidatableFile file ) - : m_file( impl.fullPath( std::move( file ) ) ) - , m_IOName( std::to_string( impl.nameCounter++ ) ) - , m_ADIOS( impl.m_ADIOS ) - , m_IO( impl.m_ADIOS.DeclareIO( m_IOName ) ) - , m_mode( impl.adios2AccessMode( m_file ) ) - , m_impl( &impl ) - , m_engineType( impl.m_engineType ) + ADIOS2IOHandlerImpl &impl, InvalidatableFile file) + : m_file(impl.fullPath(std::move(file))) + , m_IOName(std::to_string(impl.nameCounter++)) + , m_ADIOS(impl.m_ADIOS) + , m_IO(impl.m_ADIOS.DeclareIO(m_IOName)) + , m_mode(impl.adios2AccessMode(m_file)) + , m_impl(&impl) + , m_engineType(impl.m_engineType) { - if( !m_IO ) + if (!m_IO) { throw std::runtime_error( - "[ADIOS2] Internal error: Failed declaring ADIOS2 IO object for file " + - m_file ); + "[ADIOS2] Internal error: Failed declaring ADIOS2 IO object " + "for file " + + m_file); } else { @@ -2195,7 +2117,7 @@ namespace detail void BufferedActions::finalize() { - if( finalized ) + if (finalized) { return; } @@ -2204,49 +2126,46 @@ namespace detail // (attributes are written upon closing a step or a file // which users might never do) bool needToWriteAttributes = !m_attributeWrites.empty(); - if( ( needToWriteAttributes || !m_engine ) && - m_mode != adios2::Mode::Read ) + if ((needToWriteAttributes || !m_engine) && + m_mode != adios2::Mode::Read) { - auto & engine = getEngine(); - if( needToWriteAttributes ) + auto &engine = getEngine(); + if (needToWriteAttributes) { - for( auto & pair : m_attributeWrites ) + for (auto &pair : m_attributeWrites) { - pair.second.run( *this ); + pair.second.run(*this); } engine.PerformPuts(); } } - if( m_engine ) + if (m_engine) { - auto & engine = m_engine.value(); + auto &engine = m_engine.value(); // might have been closed previously - if( engine ) + if (engine) { - if( streamStatus == StreamStatus::DuringStep ) + if (streamStatus == StreamStatus::DuringStep) { engine.EndStep(); } engine.Close(); - m_ADIOS.RemoveIO( m_IOName ); + m_ADIOS.RemoveIO(m_IOName); } } finalized = true; } - void - BufferedActions::configure_IO( ADIOS2IOHandlerImpl & impl ) + void BufferedActions::configure_IO(ADIOS2IOHandlerImpl &impl) { - ( void )impl; - static std::set< std::string > streamingEngines = { - "sst", "insitumpi", "inline", "staging", "nullcore", "ssc" - }; - static std::set< std::string > fileEngines = { - "bp5", "bp4", "bp3", "hdf5", "file" - }; + (void)impl; + static std::set streamingEngines = { + "sst", "insitumpi", "inline", "staging", "nullcore", "ssc"}; + static std::set fileEngines = { + "bp5", "bp4", "bp3", "hdf5", "file"}; // step/variable-based iteration encoding requires the new schema - if( m_impl->m_iterationEncoding == IterationEncoding::variableBased ) + if (m_impl->m_iterationEncoding == IterationEncoding::variableBased) { m_impl->m_schema = ADIOS2Schema::schema_2021_02_09; } @@ -2255,17 +2174,17 @@ namespace detail bool isStreaming = false; { // allow overriding through environment variable - m_engineType = auxiliary::getEnvString( - "OPENPMD_ADIOS2_ENGINE", m_engineType ); + m_engineType = + auxiliary::getEnvString("OPENPMD_ADIOS2_ENGINE", m_engineType); std::transform( m_engineType.begin(), m_engineType.end(), m_engineType.begin(), - []( unsigned char c ) { return std::tolower( c ); } ); + [](unsigned char c) { return std::tolower(c); }); impl.m_engineType = this->m_engineType; - m_IO.SetEngine( m_engineType ); - auto it = streamingEngines.find( m_engineType ); - if( it != streamingEngines.end() ) + m_IO.SetEngine(m_engineType); + auto it = streamingEngines.find(m_engineType); + if (it != streamingEngines.end()) { isStreaming = true; optimizeAttributesStreaming = @@ -2274,10 +2193,10 @@ namespace detail } else { - it = fileEngines.find( m_engineType ); - if( it != fileEngines.end() ) + it = fileEngines.find(m_engineType); + if (it != fileEngines.end()) { - switch( m_mode ) + switch (m_mode) { case adios2::Mode::Read: /* @@ -2295,7 +2214,7 @@ namespace detail * Default for old layout is no steps. * Default for new layout is to use steps. */ - switch( schema() ) + switch (schema()) { case SupportedSchema::s_0000_00_00: streamStatus = StreamStatus::NoStream; @@ -2306,7 +2225,7 @@ namespace detail } break; default: - throw std::runtime_error( "Unreachable!" ); + throw std::runtime_error("Unreachable!"); } optimizeAttributesStreaming = false; } @@ -2315,152 +2234,156 @@ namespace detail throw std::runtime_error( "[ADIOS2IOHandler] Unknown engine type. Please choose " "one out of " - "[sst, staging, bp4, bp3, hdf5, file, null]" ); + "[sst, staging, bp4, bp3, hdf5, file, null]"); // not listing unsupported engines } } } // set engine parameters - std::set< std::string > alreadyConfigured; - auto engineConfig = impl.config( ADIOS2Defaults::str_engine ); - if( !engineConfig.json().is_null() ) + std::set alreadyConfigured; + auto engineConfig = impl.config(ADIOS2Defaults::str_engine); + if (!engineConfig.json().is_null()) { - auto params = - impl.config( ADIOS2Defaults::str_params, engineConfig ); + auto params = impl.config(ADIOS2Defaults::str_params, engineConfig); params.declareFullyRead(); - if( params.json().is_object() ) + if (params.json().is_object()) { - for( auto it = params.json().begin(); it != params.json().end(); - it++ ) + for (auto it = params.json().begin(); it != params.json().end(); + it++) { - auto maybeString = json::asStringDynamic( it.value() ); - if( maybeString.has_value() ) + auto maybeString = json::asStringDynamic(it.value()); + if (maybeString.has_value()) { m_IO.SetParameter( - it.key(), std::move( maybeString.value() ) ); + it.key(), std::move(maybeString.value())); } else { throw error::BackendConfigSchema( - {"adios2", "engine", "parameters", it.key() }, - "Must be convertible to string type." ); + {"adios2", "engine", "parameters", it.key()}, + "Must be convertible to string type."); } alreadyConfigured.emplace( - auxiliary::lowerCase( std::string( it.key() ) ) ); + auxiliary::lowerCase(std::string(it.key()))); } } auto _useAdiosSteps = - impl.config( ADIOS2Defaults::str_usesteps, engineConfig ); - if( !_useAdiosSteps.json().is_null() && - m_mode != adios2::Mode::Read ) + impl.config(ADIOS2Defaults::str_usesteps, engineConfig); + if (!_useAdiosSteps.json().is_null() && + m_mode != adios2::Mode::Read) { bool tmp = _useAdiosSteps.json(); - if( isStreaming && !bool( tmp ) ) + if (isStreaming && !bool(tmp)) { throw std::runtime_error( - "Cannot switch off steps for streaming engines." ); + "Cannot switch off steps for streaming engines."); } - streamStatus = bool( tmp ) ? StreamStatus::OutsideOfStep - : StreamStatus::NoStream; + streamStatus = bool(tmp) ? StreamStatus::OutsideOfStep + : StreamStatus::NoStream; } } auto shadow = impl.m_config.invertShadow(); - if( shadow.size() > 0 ) + if (shadow.size() > 0) { - switch( impl.m_config.originallySpecifiedAs ) + switch (impl.m_config.originallySpecifiedAs) { case json::SupportedLanguages::JSON: std::cerr << "Warning: parts of the backend configuration for " - "ADIOS2 remain unused:\n" - << shadow << std::endl; + "ADIOS2 remain unused:\n" + << shadow << std::endl; break; - case json::SupportedLanguages::TOML: - { - auto asToml = json::jsonToToml( shadow ); + case json::SupportedLanguages::TOML: { + auto asToml = json::jsonToToml(shadow); std::cerr << "Warning: parts of the backend configuration for " - "ADIOS2 remain unused:\n" - << asToml << std::endl; + "ADIOS2 remain unused:\n" + << asToml << std::endl; break; } } } - auto notYetConfigured = - [ &alreadyConfigured ]( std::string const & param ) { - auto it = alreadyConfigured.find( - auxiliary::lowerCase( std::string( param ) ) ); - return it == alreadyConfigured.end(); - }; + auto notYetConfigured = [&alreadyConfigured](std::string const ¶m) { + auto it = alreadyConfigured.find( + auxiliary::lowerCase(std::string(param))); + return it == alreadyConfigured.end(); + }; // read parameters from environment - if( notYetConfigured( "CollectiveMetadata" ) ) + if (notYetConfigured("CollectiveMetadata")) { - if( 1 == - auxiliary::getEnvNum( "OPENPMD_ADIOS2_HAVE_METADATA_FILE", 1 ) ) + if (1 == + auxiliary::getEnvNum("OPENPMD_ADIOS2_HAVE_METADATA_FILE", 1)) { - m_IO.SetParameter( "CollectiveMetadata", "On" ); + m_IO.SetParameter("CollectiveMetadata", "On"); } else { - m_IO.SetParameter( "CollectiveMetadata", "Off" ); + m_IO.SetParameter("CollectiveMetadata", "Off"); } } - if( notYetConfigured( "Profile" ) ) + if (notYetConfigured("Profile")) { - if( 1 == - auxiliary::getEnvNum( - "OPENPMD_ADIOS2_HAVE_PROFILING", 1 ) && - notYetConfigured( "Profile" ) ) + if (1 == auxiliary::getEnvNum("OPENPMD_ADIOS2_HAVE_PROFILING", 1) && + notYetConfigured("Profile")) { - m_IO.SetParameter( "Profile", "On" ); + m_IO.SetParameter("Profile", "On"); } else { - m_IO.SetParameter( "Profile", "Off" ); + m_IO.SetParameter("Profile", "Off"); } } #if openPMD_HAVE_MPI { auto num_substreams = - auxiliary::getEnvNum( "OPENPMD_ADIOS2_NUM_SUBSTREAMS", 0 ); - if( notYetConfigured( "SubStreams" ) && 0 != num_substreams ) + auxiliary::getEnvNum("OPENPMD_ADIOS2_NUM_SUBSTREAMS", 0); + if (notYetConfigured("SubStreams") && 0 != num_substreams) { - m_IO.SetParameter( - "SubStreams", std::to_string( num_substreams ) ); + m_IO.SetParameter("SubStreams", std::to_string(num_substreams)); } // BP5 parameters - auto numAgg = auxiliary::getEnvNum( "OPENPMD_ADIOS2_BP5_NumAgg", 0 ); - auto numSubFiles = auxiliary::getEnvNum( "OPENPMD_ADIOS2_BP5_NumSubFiles", 0 ); - auto AggTypeStr = auxiliary::getEnvString( "OPENPMD_ADIOS2_BP5_TypeAgg", "" ); - auto MaxShmMB = auxiliary::getEnvNum( "OPENPMD_ADIOS2_BP5_MaxShmMB", 0 ); - auto BufferChunkMB = auxiliary::getEnvNum( "OPENPMD_ADIOS2_BP5_BufferChunkMB", 0 ); - - if ( notYetConfigured( "NumAggregators" ) && ( numAgg > 0 ) ) - m_IO.SetParameter( "NumAggregators", std::to_string( numAgg ) ); - if ( notYetConfigured( "NumSubFiles" ) && ( numSubFiles > 0 ) ) - m_IO.SetParameter( "NumSubFiles", std::to_string( numSubFiles) ); - if ( notYetConfigured( "AggregationType" ) && ( AggTypeStr.size() > 0 ) ) - m_IO.SetParameter( "AggregationType", AggTypeStr ); - if ( notYetConfigured( "BufferChunkSize" ) && ( BufferChunkMB > 0 ) ) - m_IO.SetParameter( "BufferChunkSize", std::to_string( (uint64_t)BufferChunkMB * (uint64_t) 1048576 ) ); - if ( notYetConfigured( "MaxShmSize" ) && ( MaxShmMB > 0 ) ) - m_IO.SetParameter( "MaxShmSize", std::to_string( (uint64_t)MaxShmMB * (uint64_t)1048576 ) ); - } -# endif - if( notYetConfigured( "StatsLevel" ) ) + auto numAgg = auxiliary::getEnvNum("OPENPMD_ADIOS2_BP5_NumAgg", 0); + auto numSubFiles = + auxiliary::getEnvNum("OPENPMD_ADIOS2_BP5_NumSubFiles", 0); + auto AggTypeStr = + auxiliary::getEnvString("OPENPMD_ADIOS2_BP5_TypeAgg", ""); + auto MaxShmMB = + auxiliary::getEnvNum("OPENPMD_ADIOS2_BP5_MaxShmMB", 0); + auto BufferChunkMB = + auxiliary::getEnvNum("OPENPMD_ADIOS2_BP5_BufferChunkMB", 0); + + if (notYetConfigured("NumAggregators") && (numAgg > 0)) + m_IO.SetParameter("NumAggregators", std::to_string(numAgg)); + if (notYetConfigured("NumSubFiles") && (numSubFiles > 0)) + m_IO.SetParameter("NumSubFiles", std::to_string(numSubFiles)); + if (notYetConfigured("AggregationType") && (AggTypeStr.size() > 0)) + m_IO.SetParameter("AggregationType", AggTypeStr); + if (notYetConfigured("BufferChunkSize") && (BufferChunkMB > 0)) + m_IO.SetParameter( + "BufferChunkSize", + std::to_string( + (uint64_t)BufferChunkMB * (uint64_t)1048576)); + if (notYetConfigured("MaxShmSize") && (MaxShmMB > 0)) + m_IO.SetParameter( + "MaxShmSize", + std::to_string((uint64_t)MaxShmMB * (uint64_t)1048576)); + } +#endif + if (notYetConfigured("StatsLevel")) { /* * Switch those off by default since they are expensive to compute - * and to enable it, set the JSON option "StatsLevel" or the environment - * variable "OPENPMD_ADIOS2_STATS_LEVEL" be positive. + * and to enable it, set the JSON option "StatsLevel" or the + * environment variable "OPENPMD_ADIOS2_STATS_LEVEL" be positive. * The ADIOS2 default was "1" (on). */ - auto stats_level = auxiliary::getEnvNum( "OPENPMD_ADIOS2_STATS_LEVEL", 0 ); - m_IO.SetParameter( "StatsLevel", std::to_string( stats_level ) ); + auto stats_level = + auxiliary::getEnvNum("OPENPMD_ADIOS2_STATS_LEVEL", 0); + m_IO.SetParameter("StatsLevel", std::to_string(stats_level)); } - if( m_engineType == "sst" && notYetConfigured( "QueueLimit" ) ) + if (m_engineType == "sst" && notYetConfigured("QueueLimit")) { /* * By default, the SST engine of ADIOS2 does not set a limit on its @@ -2479,7 +2402,7 @@ namespace detail * keeping pipeline parallelism a default without running the risk * of using unbound memory. */ - m_IO.SetParameter( "QueueLimit", "2" ); + m_IO.SetParameter("QueueLimit", "2"); } // We need to open the engine now already to inquire configuration @@ -2487,58 +2410,58 @@ namespace detail getEngine(); } - adios2::Engine & BufferedActions::getEngine() + adios2::Engine &BufferedActions::getEngine() { - if( !m_engine ) + if (!m_engine) { - switch( m_mode ) + switch (m_mode) { case adios2::Mode::Write: { // usesSteps attribute only written upon ::advance() // this makes sure that the attribute is only put in case // the streaming API was used. - m_IO.DefineAttribute< ADIOS2Schema::schema_t >( - ADIOS2Defaults::str_adios2Schema, m_impl->m_schema ); + m_IO.DefineAttribute( + ADIOS2Defaults::str_adios2Schema, m_impl->m_schema); m_engine = std::make_optional( - adios2::Engine( m_IO.Open( m_file, m_mode ) ) ); + adios2::Engine(m_IO.Open(m_file, m_mode))); break; } case adios2::Mode::Read: { m_engine = std::make_optional( - adios2::Engine( m_IO.Open( m_file, m_mode ) ) ); + adios2::Engine(m_IO.Open(m_file, m_mode))); // decide attribute layout // in streaming mode, this needs to be done after opening // a step // in file-based mode, we do it before - auto layoutVersion = [ IO{ m_IO } ]() mutable { - auto attr = IO.InquireAttribute< ADIOS2Schema::schema_t >( - ADIOS2Defaults::str_adios2Schema ); - if( !attr ) + auto layoutVersion = [IO{m_IO}]() mutable { + auto attr = IO.InquireAttribute( + ADIOS2Defaults::str_adios2Schema); + if (!attr) { return ADIOS2Schema::schema_0000_00_00; } else { - return attr.Data()[ 0 ]; + return attr.Data()[0]; } }; // decide streaming mode - switch( streamStatus ) + switch (streamStatus) { case StreamStatus::Undecided: { m_impl->m_schema = layoutVersion(); - auto attr = m_IO.InquireAttribute< bool_representation >( - ADIOS2Defaults::str_usesstepsAttribute ); - if( attr && attr.Data()[ 0 ] == 1 ) + auto attr = m_IO.InquireAttribute( + ADIOS2Defaults::str_usesstepsAttribute); + if (attr && attr.Data()[0] == 1) { - if( delayOpeningTheFirstStep ) + if (delayOpeningTheFirstStep) { streamStatus = StreamStatus::Parsing; } else { - if( m_engine.value().BeginStep() != - adios2::StepStatus::OK ) + if (m_engine.value().BeginStep() != + adios2::StepStatus::OK) { throw std::runtime_error( "[ADIOS2] Unexpected step status when " @@ -2554,7 +2477,7 @@ namespace detail break; } case StreamStatus::OutsideOfStep: - if( m_engine.value().BeginStep() != adios2::StepStatus::OK ) + if (m_engine.value().BeginStep() != adios2::StepStatus::OK) { throw std::runtime_error( "[ADIOS2] Unexpected step status when " @@ -2564,87 +2487,85 @@ namespace detail streamStatus = StreamStatus::DuringStep; break; default: - throw std::runtime_error( "[ADIOS2] Control flow error!" ); + throw std::runtime_error("[ADIOS2] Control flow error!"); } - if( attributeLayout() == AttributeLayout::ByAdiosVariables ) + if (attributeLayout() == AttributeLayout::ByAdiosVariables) { - preloadAttributes.preloadAttributes( m_IO, m_engine.value() ); + preloadAttributes.preloadAttributes(m_IO, m_engine.value()); } break; } default: - throw std::runtime_error( - "[ADIOS2] Invalid ADIOS access mode" ); + throw std::runtime_error("[ADIOS2] Invalid ADIOS access mode"); } - if( !m_engine.value() ) + if (!m_engine.value()) { - throw std::runtime_error( "[ADIOS2] Failed opening Engine." ); + throw std::runtime_error("[ADIOS2] Failed opening Engine."); } } return m_engine.value(); } - adios2::Engine & BufferedActions::requireActiveStep( ) + adios2::Engine &BufferedActions::requireActiveStep() { - adios2::Engine & eng = getEngine(); - if( streamStatus == StreamStatus::OutsideOfStep ) + adios2::Engine &eng = getEngine(); + if (streamStatus == StreamStatus::OutsideOfStep) { m_lastStepStatus = eng.BeginStep(); - if( m_mode == adios2::Mode::Read && - attributeLayout() == AttributeLayout::ByAdiosVariables ) + if (m_mode == adios2::Mode::Read && + attributeLayout() == AttributeLayout::ByAdiosVariables) { - preloadAttributes.preloadAttributes( m_IO, m_engine.value() ); + preloadAttributes.preloadAttributes(m_IO, m_engine.value()); } streamStatus = StreamStatus::DuringStep; } return eng; } - template < typename BA > void BufferedActions::enqueue( BA && ba ) + template + void BufferedActions::enqueue(BA &&ba) { - enqueue< BA >( std::forward< BA >( ba ), m_buffer ); + enqueue(std::forward(ba), m_buffer); } - template < typename BA > void BufferedActions::enqueue( - BA && ba, - decltype( m_buffer ) & buffer ) + template + void BufferedActions::enqueue(BA &&ba, decltype(m_buffer) &buffer) { - using _BA = typename std::remove_reference< BA >::type; - buffer.emplace_back( std::unique_ptr< BufferedAction >( - new _BA( std::forward< BA >( ba ) ) ) ); + using _BA = typename std::remove_reference::type; + buffer.emplace_back( + std::unique_ptr(new _BA(std::forward(ba)))); } - template< typename F > - void - BufferedActions::flush( + template + void BufferedActions::flush( FlushLevel level, - F && performPutGets, + F &&performPutGets, bool writeAttributes, - bool flushUnconditionally ) + bool flushUnconditionally) { - if( streamStatus == StreamStatus::StreamOver ) + if (streamStatus == StreamStatus::StreamOver) { - if( flushUnconditionally ) + if (flushUnconditionally) { throw std::runtime_error( - "[ADIOS2] Cannot access engine since stream is over." ); + "[ADIOS2] Cannot access engine since stream is over."); } return; } - auto & eng = getEngine(); + auto &eng = getEngine(); /* * Only open a new step if it is necessary. */ - if( streamStatus == StreamStatus::OutsideOfStep ) + if (streamStatus == StreamStatus::OutsideOfStep) { - if( m_buffer.empty() && - ( !writeAttributes || m_attributeWrites.empty() ) && - m_attributeReads.empty() ) + if (m_buffer.empty() && + (!writeAttributes || m_attributeWrites.empty()) && + m_attributeReads.empty()) { - if( flushUnconditionally ) + if (flushUnconditionally) { - performPutGets( *this, eng ); + performPutGets(*this, eng); } return; } @@ -2653,225 +2574,214 @@ namespace detail requireActiveStep(); } } - for( auto & ba : m_buffer ) + for (auto &ba : m_buffer) { - ba->run( *this ); + ba->run(*this); } - if( writeAttributes ) + if (writeAttributes) { - for( auto & pair : m_attributeWrites ) + for (auto &pair : m_attributeWrites) { - pair.second.run( *this ); + pair.second.run(*this); } } - if( this->m_mode == adios2::Mode::Read ) + if (this->m_mode == adios2::Mode::Read) { level = FlushLevel::UserFlush; } - switch( level ) + switch (level) { - case FlushLevel::UserFlush: - performPutGets( *this, eng ); - m_updateSpans.clear(); - m_buffer.clear(); - m_alreadyEnqueued.clear(); - if( writeAttributes ) - { - m_attributeWrites.clear(); - } + case FlushLevel::UserFlush: + performPutGets(*this, eng); + m_updateSpans.clear(); + m_buffer.clear(); + m_alreadyEnqueued.clear(); + if (writeAttributes) + { + m_attributeWrites.clear(); + } - for( BufferedAttributeRead & task : m_attributeReads ) - { - task.run( *this ); - } - m_attributeReads.clear(); - break; + for (BufferedAttributeRead &task : m_attributeReads) + { + task.run(*this); + } + m_attributeReads.clear(); + break; - case FlushLevel::InternalFlush: - case FlushLevel::SkeletonOnly: - /* - * Tasks have been given to ADIOS2, but we don't flush them - * yet. So, move everything to m_alreadyEnqueued to avoid - * use-after-free. - */ - for( auto & task : m_buffer ) - { - m_alreadyEnqueued.emplace_back( std::move( task ) ); - } - if( writeAttributes ) + case FlushLevel::InternalFlush: + case FlushLevel::SkeletonOnly: + /* + * Tasks have been given to ADIOS2, but we don't flush them + * yet. So, move everything to m_alreadyEnqueued to avoid + * use-after-free. + */ + for (auto &task : m_buffer) + { + m_alreadyEnqueued.emplace_back(std::move(task)); + } + if (writeAttributes) + { + for (auto &task : m_attributeWrites) { - for( auto & task : m_attributeWrites ) - { - m_alreadyEnqueued.emplace_back( - std::unique_ptr< BufferedAction >{ - new BufferedAttributeWrite{ - std::move( task.second ) } } ); - } - m_attributeWrites.clear(); + m_alreadyEnqueued.emplace_back( + std::unique_ptr{ + new BufferedAttributeWrite{ + std::move(task.second)}}); } - m_buffer.clear(); - break; + m_attributeWrites.clear(); + } + m_buffer.clear(); + break; } } - void - BufferedActions::flush( FlushLevel level, bool writeAttributes ) + void BufferedActions::flush(FlushLevel level, bool writeAttributes) { flush( level, - []( BufferedActions & ba, adios2::Engine & eng ) { - switch( ba.m_mode ) + [](BufferedActions &ba, adios2::Engine &eng) { + switch (ba.m_mode) { - case adios2::Mode::Write: - eng.PerformPuts(); - break; - case adios2::Mode::Read: - eng.PerformGets(); - break; - case adios2::Mode::Append: - // TODO order? - eng.PerformGets(); - eng.PerformPuts(); - break; - default: - break; + case adios2::Mode::Write: + eng.PerformPuts(); + break; + case adios2::Mode::Read: + eng.PerformGets(); + break; + case adios2::Mode::Append: + // TODO order? + eng.PerformGets(); + eng.PerformPuts(); + break; + default: + break; } }, writeAttributes, - /* flushUnconditionally = */ false ); + /* flushUnconditionally = */ false); } - AdvanceStatus - BufferedActions::advance( AdvanceMode mode ) + AdvanceStatus BufferedActions::advance(AdvanceMode mode) { - if( streamStatus == StreamStatus::Undecided ) + if (streamStatus == StreamStatus::Undecided) { // stream status gets decided on upon opening an engine getEngine(); } // sic! no else - if( streamStatus == StreamStatus::NoStream ) + if (streamStatus == StreamStatus::NoStream) { - m_IO.DefineAttribute< bool_representation >( - ADIOS2Defaults::str_usesstepsAttribute, 0 ); - flush( FlushLevel::UserFlush, /* writeAttributes = */ false ); + m_IO.DefineAttribute( + ADIOS2Defaults::str_usesstepsAttribute, 0); + flush(FlushLevel::UserFlush, /* writeAttributes = */ false); return AdvanceStatus::OK; } - m_IO.DefineAttribute< bool_representation >( - ADIOS2Defaults::str_usesstepsAttribute, 1 ); - switch( mode ) + m_IO.DefineAttribute( + ADIOS2Defaults::str_usesstepsAttribute, 1); + switch (mode) { - case AdvanceMode::ENDSTEP: + case AdvanceMode::ENDSTEP: { + /* + * Advance mode write: + * Close the current step, defer opening the new step + * until one is actually needed: + * (1) The engine is accessed in BufferedActions::flush + * (2) A new step is opened before the currently active step + * has seen an access. See the following lines: open the + * step just to skip it again. + */ + if (streamStatus == StreamStatus::OutsideOfStep) { - /* - * Advance mode write: - * Close the current step, defer opening the new step - * until one is actually needed: - * (1) The engine is accessed in BufferedActions::flush - * (2) A new step is opened before the currently active step - * has seen an access. See the following lines: open the - * step just to skip it again. - */ - if( streamStatus == StreamStatus::OutsideOfStep ) + if (getEngine().BeginStep() != adios2::StepStatus::OK) { - if( getEngine().BeginStep() != adios2::StepStatus::OK ) - { - throw std::runtime_error( - "[ADIOS2] Trying to close a step that cannot be " - "opened."); - } + throw std::runtime_error( + "[ADIOS2] Trying to close a step that cannot be " + "opened."); } - flush( - FlushLevel::UserFlush, - []( BufferedActions &, adios2::Engine & eng ) - { eng.EndStep(); }, - /* writeAttributes = */ true, - /* flushUnconditionally = */ true ); - uncommittedAttributes.clear(); - m_updateSpans.clear(); - streamStatus = StreamStatus::OutsideOfStep; - return AdvanceStatus::OK; } - case AdvanceMode::BEGINSTEP: - { - adios2::StepStatus adiosStatus = m_lastStepStatus; + flush( + FlushLevel::UserFlush, + [](BufferedActions &, adios2::Engine &eng) { eng.EndStep(); }, + /* writeAttributes = */ true, + /* flushUnconditionally = */ true); + uncommittedAttributes.clear(); + m_updateSpans.clear(); + streamStatus = StreamStatus::OutsideOfStep; + return AdvanceStatus::OK; + } + case AdvanceMode::BEGINSTEP: { + adios2::StepStatus adiosStatus = m_lastStepStatus; - // Step might have been opened implicitly already - // by requireActiveStep() - // In that case, streamStatus is DuringStep and Adios - // return status is stored in m_lastStepStatus - if( streamStatus != StreamStatus::DuringStep ) - { - flush( - FlushLevel::UserFlush, - [ &adiosStatus ]( - BufferedActions &, adios2::Engine & engine ) { - adiosStatus = engine.BeginStep(); - }, - /* writeAttributes = */ false, - /* flushUnconditionally = */ true ); - if( adiosStatus == adios2::StepStatus::OK && - m_mode == adios2::Mode::Read && - attributeLayout() == AttributeLayout::ByAdiosVariables ) - { - preloadAttributes.preloadAttributes( - m_IO, m_engine.value() ); - } - } - AdvanceStatus res = AdvanceStatus::OK; - switch( adiosStatus ) + // Step might have been opened implicitly already + // by requireActiveStep() + // In that case, streamStatus is DuringStep and Adios + // return status is stored in m_lastStepStatus + if (streamStatus != StreamStatus::DuringStep) + { + flush( + FlushLevel::UserFlush, + [&adiosStatus](BufferedActions &, adios2::Engine &engine) { + adiosStatus = engine.BeginStep(); + }, + /* writeAttributes = */ false, + /* flushUnconditionally = */ true); + if (adiosStatus == adios2::StepStatus::OK && + m_mode == adios2::Mode::Read && + attributeLayout() == AttributeLayout::ByAdiosVariables) { - case adios2::StepStatus::EndOfStream: - streamStatus = StreamStatus::StreamOver; - res = AdvanceStatus::OVER; - break; - case adios2::StepStatus::OK: - streamStatus = StreamStatus::DuringStep; - res = AdvanceStatus::OK; - break; - case adios2::StepStatus::NotReady: - case adios2::StepStatus::OtherError: - throw std::runtime_error( - "[ADIOS2] Unexpected step status." ); + preloadAttributes.preloadAttributes(m_IO, m_engine.value()); } - invalidateAttributesMap(); - invalidateVariablesMap(); - return res; } + AdvanceStatus res = AdvanceStatus::OK; + switch (adiosStatus) + { + case adios2::StepStatus::EndOfStream: + streamStatus = StreamStatus::StreamOver; + res = AdvanceStatus::OVER; + break; + case adios2::StepStatus::OK: + streamStatus = StreamStatus::DuringStep; + res = AdvanceStatus::OK; + break; + case adios2::StepStatus::NotReady: + case adios2::StepStatus::OtherError: + throw std::runtime_error("[ADIOS2] Unexpected step status."); + } + invalidateAttributesMap(); + invalidateVariablesMap(); + return res; + } } throw std::runtime_error( "Internal error: Advance mode should be explicitly" - " chosen by the front-end." ); + " chosen by the front-end."); } - void BufferedActions::drop( ) + void BufferedActions::drop() { m_buffer.clear(); } - static std::vector< std::string > - availableAttributesOrVariablesPrefixed( - std::string const & prefix, - BufferedActions::AttributeMap_t const & ( - BufferedActions::*getBasicMap )(), - BufferedActions & ba ) + static std::vector availableAttributesOrVariablesPrefixed( + std::string const &prefix, + BufferedActions::AttributeMap_t const &( + BufferedActions::*getBasicMap)(), + BufferedActions &ba) { - std::string var = auxiliary::ends_with( prefix, '/' ) ? prefix - : prefix + '/'; - BufferedActions::AttributeMap_t const & attributes = - ( ba.*getBasicMap )(); - std::vector< std::string > ret; - for( auto it = attributes.lower_bound( prefix ); it != attributes.end(); - ++it ) + std::string var = + auxiliary::ends_with(prefix, '/') ? prefix : prefix + '/'; + BufferedActions::AttributeMap_t const &attributes = (ba.*getBasicMap)(); + std::vector ret; + for (auto it = attributes.lower_bound(prefix); it != attributes.end(); + ++it) { - if( auxiliary::starts_with( it->first, var ) ) + if (auxiliary::starts_with(it->first, var)) { - ret.emplace_back( - auxiliary::replace_first( it->first, var, "" ) ); + ret.emplace_back(auxiliary::replace_first(it->first, var, "")); } else { @@ -2881,80 +2791,72 @@ namespace detail return ret; } - std::vector< std::string > - BufferedActions::availableAttributesPrefixed( std::string const & prefix ) + std::vector + BufferedActions::availableAttributesPrefixed(std::string const &prefix) { return availableAttributesOrVariablesPrefixed( - prefix, - &BufferedActions::availableAttributes, - *this ); + prefix, &BufferedActions::availableAttributes, *this); } - std::vector< std::string > - BufferedActions::availableVariablesPrefixed( std::string const & prefix ) + std::vector + BufferedActions::availableVariablesPrefixed(std::string const &prefix) { return availableAttributesOrVariablesPrefixed( - prefix, - &BufferedActions::availableVariables, - *this ); + prefix, &BufferedActions::availableVariables, *this); } - void - BufferedActions::invalidateAttributesMap() + void BufferedActions::invalidateAttributesMap() { - m_availableAttributes = std::optional< AttributeMap_t >(); + m_availableAttributes = std::optional(); } BufferedActions::AttributeMap_t const & BufferedActions::availableAttributes() { - if( m_availableAttributes ) + if (m_availableAttributes) { return m_availableAttributes.value(); } else { m_availableAttributes = - std::make_optional( m_IO.AvailableAttributes() ); + std::make_optional(m_IO.AvailableAttributes()); return m_availableAttributes.value(); } } - void - BufferedActions::invalidateVariablesMap() + void BufferedActions::invalidateVariablesMap() { - m_availableVariables = std::optional< AttributeMap_t >(); + m_availableVariables = std::optional(); } - BufferedActions::AttributeMap_t const & - BufferedActions::availableVariables() + BufferedActions::AttributeMap_t const &BufferedActions::availableVariables() { - if( m_availableVariables ) + if (m_availableVariables) { return m_availableVariables.value(); } else { m_availableVariables = - std::make_optional( m_IO.AvailableVariables() ); + std::make_optional(m_IO.AvailableVariables()); return m_availableVariables.value(); } } } // namespace detail -# if openPMD_HAVE_MPI +#if openPMD_HAVE_MPI ADIOS2IOHandler::ADIOS2IOHandler( std::string path, openPMD::Access at, MPI_Comm comm, json::TracingJSON options, - std::string engineType ) - : AbstractIOHandler( std::move( path ), at, comm ) - , m_impl{ this, comm, std::move( options ), std::move( engineType ) } -{ -} + std::string engineType) + : AbstractIOHandler(std::move(path), at, comm) + , m_impl{this, comm, std::move(options), std::move(engineType)} +{} #endif @@ -2962,45 +2864,34 @@ ADIOS2IOHandler::ADIOS2IOHandler( std::string path, Access at, json::TracingJSON options, - std::string engineType ) - : AbstractIOHandler( std::move( path ), at ) - , m_impl{ this, std::move( options ), std::move( engineType ) } -{ -} + std::string engineType) + : AbstractIOHandler(std::move(path), at) + , m_impl{this, std::move(options), std::move(engineType)} +{} -std::future< void > -ADIOS2IOHandler::flush() +std::future ADIOS2IOHandler::flush() { return m_impl.flush(); } #else // openPMD_HAVE_ADIOS2 -# if openPMD_HAVE_MPI +#if openPMD_HAVE_MPI ADIOS2IOHandler::ADIOS2IOHandler( - std::string path, - Access at, - MPI_Comm comm, - json::TracingJSON, - std::string ) - : AbstractIOHandler( std::move( path ), at, comm ) -{ -} + std::string path, Access at, MPI_Comm comm, json::TracingJSON, std::string) + : AbstractIOHandler(std::move(path), at, comm) +{} -# endif // openPMD_HAVE_MPI +#endif // openPMD_HAVE_MPI ADIOS2IOHandler::ADIOS2IOHandler( - std::string path, - Access at, - json::TracingJSON, - std::string ) - : AbstractIOHandler( std::move( path ), at ) -{ -} + std::string path, Access at, json::TracingJSON, std::string) + : AbstractIOHandler(std::move(path), at) +{} -std::future< void > ADIOS2IOHandler::flush( ) +std::future ADIOS2IOHandler::flush() { - return std::future< void >( ); + return std::future(); } #endif diff --git a/src/IO/ADIOS/ADIOS2PreloadAttributes.cpp b/src/IO/ADIOS/ADIOS2PreloadAttributes.cpp index ef61d05944..89e0a17d41 100644 --- a/src/IO/ADIOS/ADIOS2PreloadAttributes.cpp +++ b/src/IO/ADIOS/ADIOS2PreloadAttributes.cpp @@ -36,256 +36,247 @@ namespace openPMD::detail { - namespace +namespace +{ + struct GetAlignment { - struct GetAlignment + template + static constexpr size_t call() { - template< typename T > - static constexpr size_t call() - { - return alignof(T); - } + return alignof(T); + } - template< unsigned long, typename... Args > - static constexpr size_t call( Args &&... ) - { - return alignof(std::max_align_t); - } - }; + template + static constexpr size_t call(Args &&...) + { + return alignof(std::max_align_t); + } + }; + + struct GetSize + { + template + static constexpr size_t call() + { + return sizeof(T); + } + + template + static constexpr size_t call(Args &&...) + { + return 0; + } + }; - struct GetSize + struct ScheduleLoad + { + template + static void call( + adios2::IO &IO, + adios2::Engine &engine, + std::string const &name, + char *buffer, + PreloadAdiosAttributes::AttributeLocation &location) { - template< typename T > - static constexpr size_t call() + adios2::Variable var = IO.InquireVariable(name); + if (!var) { - return sizeof(T); + throw std::runtime_error( + "[ADIOS2] Variable not found: " + name); } - - template< unsigned long, typename... Args > - static constexpr size_t call( Args &&... ) + adios2::Dims const &shape = location.shape; + adios2::Dims offset(shape.size(), 0); + if (shape.size() > 0) { - return 0; + var.SetSelection({offset, shape}); } - }; - - struct ScheduleLoad - { - template< typename T > - static void call( - adios2::IO & IO, - adios2::Engine & engine, - std::string const & name, - char * buffer, - PreloadAdiosAttributes::AttributeLocation & location ) + T *dest = reinterpret_cast(buffer); + size_t numItems = 1; + for (auto extent : shape) { - adios2::Variable< T > var = IO.InquireVariable< T >( name ); - if( !var ) - { - throw std::runtime_error( - "[ADIOS2] Variable not found: " + name ); - } - adios2::Dims const & shape = location.shape; - adios2::Dims offset( shape.size(), 0 ); - if( shape.size() > 0 ) - { - var.SetSelection( { offset, shape } ); - } - T * dest = reinterpret_cast< T * >( buffer ); - size_t numItems = 1; - for( auto extent : shape ) - { - numItems *= extent; - } - /* - * MSVC does not like placement new of arrays, so we do it - * in a loop instead. - * https://developercommunity.visualstudio.com/t/c-placement-new-is-incorrectly-compiled/206439 - */ - for( size_t i = 0; i < numItems; ++i ) - { - new( dest + i ) T(); - } - location.destroy = buffer; - engine.Get( var, dest, adios2::Mode::Deferred ); + numItems *= extent; } + /* + * MSVC does not like placement new of arrays, so we do it + * in a loop instead. + * https://developercommunity.visualstudio.com/t/c-placement-new-is-incorrectly-compiled/206439 + */ + for (size_t i = 0; i < numItems; ++i) + { + new (dest + i) T(); + } + location.destroy = buffer; + engine.Get(var, dest, adios2::Mode::Deferred); + } - static constexpr char const * errorMsg = "ADIOS2"; - }; + static constexpr char const *errorMsg = "ADIOS2"; + }; - struct VariableShape + struct VariableShape + { + template + static adios2::Dims call(adios2::IO &IO, std::string const &name) { - template< typename T > - static adios2::Dims - call( adios2::IO & IO, std::string const & name ) + auto var = IO.InquireVariable(name); + if (!var) { - auto var = IO.InquireVariable< T >( name ); - if( !var ) - { - throw std::runtime_error( - "[ADIOS2] Variable not found: " + name ); - } - return var.Shape(); + throw std::runtime_error( + "[ADIOS2] Variable not found: " + name); } + return var.Shape(); + } - template< unsigned long n, typename... Args > - static adios2::Dims call( Args &&... ) - { - return {}; - } - }; + template + static adios2::Dims call(Args &&...) + { + return {}; + } + }; - struct AttributeLocationDestroy + struct AttributeLocationDestroy + { + template + static void call(char *ptr, size_t numItems) { - template< typename T > - static void call( char *ptr, size_t numItems ) + T *destroy = reinterpret_cast(ptr); + for (size_t i = 0; i < numItems; ++i) { - T *destroy = reinterpret_cast< T * >( ptr ); - for( size_t i = 0; i < numItems; ++i ) - { - destroy[ i ].~T(); - } + destroy[i].~T(); } + } - template< unsigned long n, typename... Args > - static void call( Args &&... ) - { - } - }; - } // namespace + template + static void call(Args &&...) + {} + }; +} // namespace - using AttributeLocation = PreloadAdiosAttributes::AttributeLocation; +using AttributeLocation = PreloadAdiosAttributes::AttributeLocation; - AttributeLocation::AttributeLocation( - adios2::Dims shape_in, size_t offset_in, Datatype dt_in ) - : shape( std::move( shape_in ) ), offset( offset_in ), dt( dt_in ) - { - } +AttributeLocation::AttributeLocation( + adios2::Dims shape_in, size_t offset_in, Datatype dt_in) + : shape(std::move(shape_in)), offset(offset_in), dt(dt_in) +{} - AttributeLocation::AttributeLocation( AttributeLocation && other ) - : shape{ std::move( other.shape ) } - , offset{ std::move( other.offset ) } - , dt{ std::move( other.dt ) } - , destroy{ std::move( other.destroy ) } - { - other.destroy = nullptr; - } +AttributeLocation::AttributeLocation(AttributeLocation &&other) + : shape{std::move(other.shape)} + , offset{std::move(other.offset)} + , dt{std::move(other.dt)} + , destroy{std::move(other.destroy)} +{ + other.destroy = nullptr; +} - AttributeLocation & - AttributeLocation::operator=( AttributeLocation && other ) +AttributeLocation &AttributeLocation::operator=(AttributeLocation &&other) +{ + this->shape = std::move(other.shape); + this->offset = std::move(other.offset); + this->dt = std::move(other.dt); + this->destroy = std::move(other.destroy); + other.destroy = nullptr; + return *this; +} + +PreloadAdiosAttributes::AttributeLocation::~AttributeLocation() +{ + /* + * If the object has been moved from, this may be empty. + * Or else, if no custom destructor has been emplaced. + */ + if (destroy) { - this->shape = std::move( other.shape ); - this->offset = std::move( other.offset ); - this->dt = std::move( other.dt ); - this->destroy = std::move( other.destroy ); - other.destroy = nullptr; - return *this; + size_t length = 1; + for (auto ext : shape) + { + length *= ext; + } + switchAdios2AttributeType( + dt, destroy, length); } +} - PreloadAdiosAttributes::AttributeLocation::~AttributeLocation() +void PreloadAdiosAttributes::preloadAttributes( + adios2::IO &IO, adios2::Engine &engine) +{ + m_offsets.clear(); + std::map> attributesByType; + auto addAttribute = [&attributesByType](Datatype dt, std::string name) { + constexpr size_t reserve = 10; + auto it = attributesByType.find(dt); + if (it == attributesByType.end()) + { + it = attributesByType.emplace_hint( + it, dt, std::vector()); + it->second.reserve(reserve); + } + it->second.push_back(std::move(name)); + }; + // PHASE 1: collect names of available attributes by ADIOS datatype + for (auto &variable : IO.AvailableVariables()) { - /* - * If the object has been moved from, this may be empty. - * Or else, if no custom destructor has been emplaced. - */ - if( destroy ) + if (auxiliary::ends_with(variable.first, "/__data__")) { - size_t length = 1; - for( auto ext : shape ) - { - length *= ext; - } - switchAdios2AttributeType< AttributeLocationDestroy >( - dt, destroy, length ); + continue; } + // this will give us basic types only, no fancy vectors or similar + Datatype dt = fromADIOS2Type(IO.VariableType(variable.first)); + addAttribute(dt, std::move(variable.first)); } - void - PreloadAdiosAttributes::preloadAttributes( - adios2::IO & IO, - adios2::Engine & engine ) + // PHASE 2: get offsets for attributes in buffer + std::map offsets; + size_t currentOffset = 0; + for (auto &pair : attributesByType) { - m_offsets.clear(); - std::map< Datatype, std::vector< std::string > > attributesByType; - auto addAttribute = - [ &attributesByType ]( Datatype dt, std::string name ) { - constexpr size_t reserve = 10; - auto it = attributesByType.find( dt ); - if( it == attributesByType.end() ) - { - it = attributesByType.emplace_hint( - it, dt, std::vector< std::string >() ); - it->second.reserve( reserve ); - } - it->second.push_back( std::move( name ) ); - }; - // PHASE 1: collect names of available attributes by ADIOS datatype - for( auto & variable : IO.AvailableVariables() ) + size_t alignment = switchAdios2AttributeType(pair.first); + size_t size = switchAdios2AttributeType(pair.first); + // go to next offset with valid alignment + size_t modulus = currentOffset % alignment; + if (modulus > 0) { - if( auxiliary::ends_with( variable.first, "/__data__" ) ) - { - continue; - } - // this will give us basic types only, no fancy vectors or similar - Datatype dt = fromADIOS2Type( IO.VariableType( variable.first ) ); - addAttribute( dt, std::move( variable.first ) ); + currentOffset += alignment - modulus; } - - // PHASE 2: get offsets for attributes in buffer - std::map< Datatype, size_t > offsets; - size_t currentOffset = 0; - for( auto & pair : attributesByType ) + for (std::string &name : pair.second) { - size_t alignment = switchAdios2AttributeType< GetAlignment >( - pair.first ); - size_t size = switchAdios2AttributeType< GetSize >( pair.first ); - // go to next offset with valid alignment - size_t modulus = currentOffset % alignment; - if( modulus > 0 ) - { - currentOffset += alignment - modulus; - } - for( std::string & name : pair.second ) + adios2::Dims shape = + switchAdios2AttributeType(pair.first, IO, name); + size_t elements = 1; + for (auto extent : shape) { - adios2::Dims shape = switchAdios2AttributeType< VariableShape >( - pair.first, IO, name ); - size_t elements = 1; - for( auto extent : shape ) - { - elements *= extent; - } - m_offsets.emplace( - std::piecewise_construct, - std::forward_as_tuple( std::move( name ) ), - std::forward_as_tuple( - std::move( shape ), currentOffset, pair.first ) ); - currentOffset += elements * size; + elements *= extent; } + m_offsets.emplace( + std::piecewise_construct, + std::forward_as_tuple(std::move(name)), + std::forward_as_tuple( + std::move(shape), currentOffset, pair.first)); + currentOffset += elements * size; } - // now, currentOffset is the number of bytes that we need to allocate - // PHASE 3: allocate new buffer and schedule loads - m_rawBuffer.resize( currentOffset ); - for( auto & pair : m_offsets ) - { - switchAdios2AttributeType< ScheduleLoad >( - pair.second.dt, - IO, - engine, - pair.first, - &m_rawBuffer[ pair.second.offset ], - pair.second ); - } } + // now, currentOffset is the number of bytes that we need to allocate + // PHASE 3: allocate new buffer and schedule loads + m_rawBuffer.resize(currentOffset); + for (auto &pair : m_offsets) + { + switchAdios2AttributeType( + pair.second.dt, + IO, + engine, + pair.first, + &m_rawBuffer[pair.second.offset], + pair.second); + } +} - Datatype - PreloadAdiosAttributes::attributeType( std::string const & name ) const +Datatype PreloadAdiosAttributes::attributeType(std::string const &name) const +{ + auto it = m_offsets.find(name); + if (it == m_offsets.end()) { - auto it = m_offsets.find( name ); - if( it == m_offsets.end() ) - { - return Datatype::UNDEFINED; - } - return it->second.dt; + return Datatype::UNDEFINED; } + return it->second.dt; +} } // namespace openPMD::detail #endif // openPMD_HAVE_ADIOS2 diff --git a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp index 531e924d33..3d4acb5f84 100644 --- a/src/IO/ADIOS/CommonADIOS1IOHandler.cpp +++ b/src/IO/ADIOS/CommonADIOS1IOHandler.cpp @@ -23,10 +23,10 @@ #if openPMD_HAVE_ADIOS1 -#include "openPMD/auxiliary/JSON_internal.hpp" #include "openPMD/Error.hpp" #include "openPMD/IO/ADIOS/ADIOS1IOHandlerImpl.hpp" #include "openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp" +#include "openPMD/auxiliary/JSON_internal.hpp" #include #include @@ -37,527 +37,545 @@ #include #include #include -#include #include namespace openPMD { #if openPMD_USE_VERIFY -# define VERIFY(CONDITION, TEXT) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } +#define VERIFY(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } #else -# define VERIFY(CONDITION, TEXT) do{ (void)sizeof(CONDITION); } while( 0 ) +#define VERIFY(CONDITION, TEXT) \ + do \ + { \ + (void)sizeof(CONDITION); \ + } while (0) #endif -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::close(int64_t fd) +template +void CommonADIOS1IOHandlerImpl::close(int64_t fd) { int status; status = adios_close(fd); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to close ADIOS file (open_write)"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to close ADIOS file (open_write)"); } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::close(ADIOS_FILE* f) +template +void CommonADIOS1IOHandlerImpl::close(ADIOS_FILE *f) { int status; status = adios_read_close(f); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to close ADIOS file (open_read)"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to close ADIOS file (open_read)"); } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::flush_attribute(int64_t group, std::string const& name, Attribute const& att) +template +void CommonADIOS1IOHandlerImpl::flush_attribute( + int64_t group, std::string const &name, Attribute const &att) { auto dtype = att.dtype; // https://github.com/ComputationalRadiationPhysics/picongpu/pull/1756 - if( dtype == Datatype::BOOL ) + if (dtype == Datatype::BOOL) dtype = Datatype::UCHAR; int nelems = 0; - switch( dtype ) + switch (dtype) { using DT = Datatype; - case DT::VEC_CHAR: - nelems = att.get< std::vector< char > >().size(); - break; - case DT::VEC_SHORT: - nelems = att.get< std::vector< short > >().size(); - break; - case DT::VEC_INT: - nelems = att.get< std::vector< int > >().size(); - break; - case DT::VEC_LONG: - nelems = att.get< std::vector< long > >().size(); - break; - case DT::VEC_LONGLONG: - nelems = att.get< std::vector< long long > >().size(); - break; - case DT::VEC_UCHAR: - nelems = att.get< std::vector< unsigned char > >().size(); - break; - case DT::VEC_USHORT: - nelems = att.get< std::vector< unsigned short > >().size(); - break; - case DT::VEC_UINT: - nelems = att.get< std::vector< unsigned int > >().size(); - break; - case DT::VEC_ULONG: - nelems = att.get< std::vector< unsigned long > >().size(); - break; - case DT::VEC_ULONGLONG: - nelems = att.get< std::vector< unsigned long long > >().size(); - break; - case DT::VEC_FLOAT: - nelems = att.get< std::vector< float > >().size(); - break; - case DT::VEC_DOUBLE: - nelems = att.get< std::vector< double > >().size(); - break; - case DT::VEC_LONG_DOUBLE: - nelems = att.get< std::vector< long double > >().size(); - break; - case DT::VEC_STRING: - nelems = att.get< std::vector< std::string > >().size(); - break; - case DT::ARR_DBL_7: - nelems = 7; - break; - case DT::UNDEFINED: - throw std::runtime_error("[ADIOS1] Unknown Attribute datatype (ADIOS1 Attribute flush)"); - default: - nelems = 1; + case DT::VEC_CHAR: + nelems = att.get>().size(); + break; + case DT::VEC_SHORT: + nelems = att.get>().size(); + break; + case DT::VEC_INT: + nelems = att.get>().size(); + break; + case DT::VEC_LONG: + nelems = att.get>().size(); + break; + case DT::VEC_LONGLONG: + nelems = att.get>().size(); + break; + case DT::VEC_UCHAR: + nelems = att.get>().size(); + break; + case DT::VEC_USHORT: + nelems = att.get>().size(); + break; + case DT::VEC_UINT: + nelems = att.get>().size(); + break; + case DT::VEC_ULONG: + nelems = att.get>().size(); + break; + case DT::VEC_ULONGLONG: + nelems = att.get>().size(); + break; + case DT::VEC_FLOAT: + nelems = att.get>().size(); + break; + case DT::VEC_DOUBLE: + nelems = att.get>().size(); + break; + case DT::VEC_LONG_DOUBLE: + nelems = att.get>().size(); + break; + case DT::VEC_STRING: + nelems = att.get>().size(); + break; + case DT::ARR_DBL_7: + nelems = 7; + break; + case DT::UNDEFINED: + throw std::runtime_error( + "[ADIOS1] Unknown Attribute datatype (ADIOS1 Attribute flush)"); + default: + nelems = 1; } auto values = auxiliary::allocatePtr(dtype, nelems); - switch( att.dtype ) + switch (att.dtype) { using DT = Datatype; - case DT::CHAR: - { - auto ptr = reinterpret_cast< char* >(values.get()); - *ptr = att.get< char >(); - break; - } - case DT::UCHAR: - { - auto ptr = reinterpret_cast< unsigned char* >(values.get()); - *ptr = att.get< unsigned char >(); - break; - } - case DT::SHORT: - { - auto ptr = reinterpret_cast< short* >(values.get()); - *ptr = att.get< short >(); - break; - } - case DT::INT: - { - auto ptr = reinterpret_cast< int* >(values.get()); - *ptr = att.get< int >(); - break; - } - case DT::LONG: - { - auto ptr = reinterpret_cast< long* >(values.get()); - *ptr = att.get< long >(); - break; - } - case DT::LONGLONG: - { - auto ptr = reinterpret_cast< long long* >(values.get()); - *ptr = att.get< long long >(); - break; - } - case DT::USHORT: - { - auto ptr = reinterpret_cast< unsigned short* >(values.get()); - *ptr = att.get< unsigned short >(); - break; - } - case DT::UINT: - { - auto ptr = reinterpret_cast< unsigned int* >(values.get()); - *ptr = att.get< unsigned int >(); - break; - } - case DT::ULONG: - { - auto ptr = reinterpret_cast< unsigned long* >(values.get()); - *ptr = att.get< unsigned long >(); - break; - } - case DT::ULONGLONG: - { - auto ptr = reinterpret_cast< unsigned long long* >(values.get()); - *ptr = att.get< unsigned long long >(); - break; - } - case DT::FLOAT: - { - auto ptr = reinterpret_cast< float* >(values.get()); - *ptr = att.get< float >(); - break; - } - case DT::DOUBLE: - { - auto ptr = reinterpret_cast< double* >(values.get()); - *ptr = att.get< double >(); - break; - } - case DT::LONG_DOUBLE: - { - auto ptr = reinterpret_cast< long double* >(values.get()); - *ptr = att.get< long double >(); - break; - } - case DT::CFLOAT: - { - auto ptr = reinterpret_cast< std::complex< float >* >(values.get()); - *ptr = att.get< std::complex< float > >(); - break; - } - case DT::CDOUBLE: - { - auto ptr = reinterpret_cast< std::complex< double >* >(values.get()); - *ptr = att.get< std::complex< double > >(); - break; - } - case DT::CLONG_DOUBLE: - { - throw std::runtime_error("[ADIOS1] Unknown Attribute datatype (CLONG_DOUBLE)"); - break; - } - case DT::STRING: - { - auto const & v = att.get< std::string >(); - values = auxiliary::allocatePtr(Datatype::CHAR, v.length() + 1u); - strcpy((char*)values.get(), v.c_str()); - break; - } - case DT::VEC_CHAR: - { - auto ptr = reinterpret_cast< char* >(values.get()); - auto const& vec = att.get< std::vector< char > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_SHORT: - { - auto ptr = reinterpret_cast< short* >(values.get()); - auto const& vec = att.get< std::vector< short > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_INT: - { - auto ptr = reinterpret_cast< int* >(values.get()); - auto const& vec = att.get< std::vector< int > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_LONG: - { - auto ptr = reinterpret_cast< long* >(values.get()); - auto const& vec = att.get< std::vector< long > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_LONGLONG: - { - auto ptr = reinterpret_cast< long long* >(values.get()); - auto const& vec = att.get< std::vector< long long > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_UCHAR: - { - auto ptr = reinterpret_cast< unsigned char* >(values.get()); - auto const& vec = att.get< std::vector< unsigned char > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_USHORT: - { - auto ptr = reinterpret_cast< unsigned short* >(values.get()); - auto const& vec = att.get< std::vector< unsigned short > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_UINT: - { - auto ptr = reinterpret_cast< unsigned int* >(values.get()); - auto const& vec = att.get< std::vector< unsigned int > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_ULONG: - { - auto ptr = reinterpret_cast< unsigned long* >(values.get()); - auto const& vec = att.get< std::vector< unsigned long > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_ULONGLONG: - { - auto ptr = reinterpret_cast< unsigned long long* >(values.get()); - auto const& vec = att.get< std::vector< unsigned long long > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_FLOAT: - { - auto ptr = reinterpret_cast< float* >(values.get()); - auto const& vec = att.get< std::vector< float > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_DOUBLE: - { - auto ptr = reinterpret_cast< double* >(values.get()); - auto const& vec = att.get< std::vector< double > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - case DT::VEC_LONG_DOUBLE: - { - auto ptr = reinterpret_cast< long double* >(values.get()); - auto const& vec = att.get< std::vector< long double > >(); - for( size_t i = 0; i < vec.size(); ++i ) - ptr[i] = vec[i]; - break; - } - /* not supported by ADIOS 1.13.1: - * https://github.com/ornladios/ADIOS/issues/212 - */ - case DT::VEC_CFLOAT: - case DT::VEC_CDOUBLE: - case DT::VEC_CLONG_DOUBLE: - { - throw std::runtime_error("[ADIOS1] Arrays of complex attributes are not supported"); - break; - } - case DT::VEC_STRING: - { - auto ptr = reinterpret_cast< char** >(values.get()); - auto const& vec = att.get< std::vector< std::string > >(); - for( size_t i = 0; i < vec.size(); ++i ) - { - size_t size = vec[i].size() + 1; - ptr[i] = new char[size]; - strncpy(ptr[i], vec[i].c_str(), size); - } - break; - } - case DT::ARR_DBL_7: - { - auto ptr = reinterpret_cast< double* >(values.get()); - auto const& arr = att.get< std::array< double, 7> >(); - for( size_t i = 0; i < 7; ++i ) - ptr[i] = arr[i]; - break; - } - case DT::BOOL: + case DT::CHAR: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::UCHAR: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::SHORT: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::INT: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::LONG: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::LONGLONG: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::USHORT: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::UINT: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::ULONG: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::ULONGLONG: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::FLOAT: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::DOUBLE: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::LONG_DOUBLE: { + auto ptr = reinterpret_cast(values.get()); + *ptr = att.get(); + break; + } + case DT::CFLOAT: { + auto ptr = reinterpret_cast *>(values.get()); + *ptr = att.get>(); + break; + } + case DT::CDOUBLE: { + auto ptr = reinterpret_cast *>(values.get()); + *ptr = att.get>(); + break; + } + case DT::CLONG_DOUBLE: { + throw std::runtime_error( + "[ADIOS1] Unknown Attribute datatype (CLONG_DOUBLE)"); + break; + } + case DT::STRING: { + auto const &v = att.get(); + values = auxiliary::allocatePtr(Datatype::CHAR, v.length() + 1u); + strcpy((char *)values.get(), v.c_str()); + break; + } + case DT::VEC_CHAR: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get>(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_SHORT: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get>(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_INT: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get>(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_LONG: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get>(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_LONGLONG: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get>(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_UCHAR: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get>(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_USHORT: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get>(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_UINT: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get>(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_ULONG: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get>(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_ULONGLONG: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get>(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_FLOAT: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get>(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_DOUBLE: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get>(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + case DT::VEC_LONG_DOUBLE: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get>(); + for (size_t i = 0; i < vec.size(); ++i) + ptr[i] = vec[i]; + break; + } + /* not supported by ADIOS 1.13.1: + * https://github.com/ornladios/ADIOS/issues/212 + */ + case DT::VEC_CFLOAT: + case DT::VEC_CDOUBLE: + case DT::VEC_CLONG_DOUBLE: { + throw std::runtime_error( + "[ADIOS1] Arrays of complex attributes are not supported"); + break; + } + case DT::VEC_STRING: { + auto ptr = reinterpret_cast(values.get()); + auto const &vec = att.get>(); + for (size_t i = 0; i < vec.size(); ++i) { - auto ptr = reinterpret_cast< unsigned char* >(values.get()); - *ptr = static_cast< unsigned char >(att.get< bool >()); - break; + size_t size = vec[i].size() + 1; + ptr[i] = new char[size]; + strncpy(ptr[i], vec[i].c_str(), size); } - case DT::UNDEFINED: - throw std::runtime_error("[ADIOS1] Unknown Attribute datatype (ADIOS1 Attribute flush)"); - default: - throw std::runtime_error("[ADIOS1] Datatype not implemented in ADIOS IO"); + break; + } + case DT::ARR_DBL_7: { + auto ptr = reinterpret_cast(values.get()); + auto const &arr = att.get>(); + for (size_t i = 0; i < 7; ++i) + ptr[i] = arr[i]; + break; + } + case DT::BOOL: { + auto ptr = reinterpret_cast(values.get()); + *ptr = static_cast(att.get()); + break; + } + case DT::UNDEFINED: + throw std::runtime_error( + "[ADIOS1] Unknown Attribute datatype (ADIOS1 Attribute flush)"); + default: + throw std::runtime_error( + "[ADIOS1] Datatype not implemented in ADIOS IO"); } int status; - status = adios_define_attribute_byvalue(group, - name.c_str(), - "", - getBP1DataType(att.dtype), - nelems, - values.get()); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to define ADIOS attribute by value"); - - if( att.dtype == Datatype::VEC_STRING ) + status = adios_define_attribute_byvalue( + group, + name.c_str(), + "", + getBP1DataType(att.dtype), + nelems, + values.get()); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to define ADIOS attribute by value"); + + if (att.dtype == Datatype::VEC_STRING) { - auto ptr = reinterpret_cast< char** >(values.get()); - for( int i = 0; i < nelems; ++i ) + auto ptr = reinterpret_cast(values.get()); + for (int i = 0; i < nelems; ++i) delete[] ptr[i]; } } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::createFile(Writable* writable, - Parameter< Operation::CREATE_FILE > const& parameters) +template +void CommonADIOS1IOHandlerImpl::createFile( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[ADIOS1] Creating a file in read-only mode is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[ADIOS1] Creating a file in read-only mode is not possible."); - if( !writable->written ) + if (!writable->written) { - if( !auxiliary::directory_exists(m_handler->directory) ) + if (!auxiliary::directory_exists(m_handler->directory)) { bool success = auxiliary::create_directories(m_handler->directory); - VERIFY(success, "[ADIOS1] Internal error: Failed to create directories during ADIOS file creation"); + VERIFY( + success, + "[ADIOS1] Internal error: Failed to create directories during " + "ADIOS file creation"); } std::string name = m_handler->directory + parameters.name; - if( !auxiliary::ends_with(name, ".bp") ) + if (!auxiliary::ends_with(name, ".bp")) name += ".bp"; writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS1FilePosition >("/"); + writable->abstractFilePosition = + std::make_shared("/"); - m_filePaths[writable] = std::make_shared< std::string >(name); + m_filePaths[writable] = std::make_shared(name); /* our control flow allows for more than one open file handle - * if multiple files are opened with the same group, data might be lost */ + * if multiple files are opened with the same group, data might be lost + */ - /* defer actually opening the file handle until the first Operation::WRITE_DATASET occurs */ + /* defer actually opening the file handle until the first + * Operation::WRITE_DATASET occurs */ m_existsOnDisk[m_filePaths[writable]] = false; GetFileHandle(writable); } } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::createPath(Writable* writable, - Parameter< Operation::CREATE_PATH > const& parameters) +template +void CommonADIOS1IOHandlerImpl::createPath( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[ADIOS1] Creating a path in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[ADIOS1] Creating a path in a file opened as read only is not " + "possible."); - if( !writable->written ) + if (!writable->written) { /* Sanitize path */ std::string path = parameters.path; - if( auxiliary::starts_with(path, '/') ) + if (auxiliary::starts_with(path, '/')) path = auxiliary::replace_first(path, "/", ""); - if( !auxiliary::ends_with(path, '/') ) + if (!auxiliary::ends_with(path, '/')) path += '/'; /* ADIOS has no concept for explicitly creating paths. - * They are implicitly created with the paths of variables/attributes. */ + * They are implicitly created with the paths of variables/attributes. + */ writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS1FilePosition >(path); + writable->abstractFilePosition = + std::make_shared(path); - Writable* position; - if( writable->parent ) + Writable *position; + if (writable->parent) position = writable->parent; else - position = writable; /* root does not have a parent but might still have to be written */ + position = writable; /* root does not have a parent but might still + have to be written */ auto res = m_filePaths.find(position); m_filePaths[writable] = res->second; } } -static std::optional< std::string > datasetTransform( - json::TracingJSON config ) +static std::optional datasetTransform(json::TracingJSON config) { - using ret_t = std::optional< std::string >; - if( !config.json().contains( "dataset" ) ) + using ret_t = std::optional; + if (!config.json().contains("dataset")) { return ret_t{}; } - config = config[ "dataset" ]; - if( !config.json().contains( "transform" ) ) + config = config["dataset"]; + if (!config.json().contains("transform")) { return ret_t{}; } - config = config[ "transform" ]; - auto maybeRes = json::asStringDynamic( config.json() ); - if( maybeRes.has_value() ) + config = config["transform"]; + auto maybeRes = json::asStringDynamic(config.json()); + if (maybeRes.has_value()) { - return std::move( maybeRes.value() ); + return std::move(maybeRes.value()); } else { throw error::BackendConfigSchema( - { "adios1", "dataset", "transform" }, - "Key must convertible to type string." ); + {"adios1", "dataset", "transform"}, + "Key must convertible to type string."); } } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::createDataset(Writable* writable, - Parameter< Operation::CREATE_DATASET > const& parameters) +template +void CommonADIOS1IOHandlerImpl::createDataset( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[ADIOS1] Creating a dataset in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[ADIOS1] Creating a dataset in a file opened as read only is not " + "possible."); - if( !writable->written ) + if (!writable->written) { - /* ADIOS variable definitions require the file to be (re-)opened to take effect/not cause errors */ + /* ADIOS variable definitions require the file to be (re-)opened to take + * effect/not cause errors */ auto res = m_filePaths.find(writable); - if( res == m_filePaths.end() ) + if (res == m_filePaths.end()) res = m_filePaths.find(writable->parent); int64_t group = m_groups[res->second]; /* Sanitize name */ std::string name = parameters.name; - if( auxiliary::starts_with(name, '/') ) + if (auxiliary::starts_with(name, '/')) name = auxiliary::replace_first(name, "/", ""); - if( auxiliary::ends_with(name, '/') ) + if (auxiliary::ends_with(name, '/')) name = auxiliary::replace_last(name, "/", ""); std::string path = concrete_bp1_file_position(writable) + name; size_t ndims = parameters.extent.size(); - std::vector< std::string > chunkSize(ndims, ""); - std::vector< std::string > chunkOffset(ndims, ""); + std::vector chunkSize(ndims, ""); + std::vector chunkOffset(ndims, ""); int64_t id; - for( size_t i = 0; i < ndims; ++i ) + for (size_t i = 0; i < ndims; ++i) { chunkSize[i] = "/tmp" + path + "_chunkSize" + std::to_string(i); - id = adios_define_var(group, chunkSize[i].c_str(), "", adios_unsigned_long, "", "", ""); - VERIFY(id != 0, "[ADIOS1] Internal error: Failed to define ADIOS variable during Dataset creation"); + id = adios_define_var( + group, + chunkSize[i].c_str(), + "", + adios_unsigned_long, + "", + "", + ""); + VERIFY( + id != 0, + "[ADIOS1] Internal error: Failed to define ADIOS variable " + "during Dataset creation"); chunkOffset[i] = "/tmp" + path + "_chunkOffset" + std::to_string(i); - id = adios_define_var(group, chunkOffset[i].c_str(), "", adios_unsigned_long, "", "", ""); - VERIFY(id != 0, "[ADIOS1] Internal error: Failed to define ADIOS variable during Dataset creation"); + id = adios_define_var( + group, + chunkOffset[i].c_str(), + "", + adios_unsigned_long, + "", + "", + ""); + VERIFY( + id != 0, + "[ADIOS1] Internal error: Failed to define ADIOS variable " + "during Dataset creation"); } std::string chunkSizeParam = auxiliary::join(chunkSize, ","); std::string globalSize = getBP1Extent(parameters.extent); std::string chunkOffsetParam = auxiliary::join(chunkOffset, ","); - id = adios_define_var(group, - path.c_str(), - "", - getBP1DataType(parameters.dtype), - chunkSizeParam.c_str(), - globalSize.c_str(), - chunkOffsetParam.c_str()); - VERIFY(id != 0, "[ADIOS1] Internal error: Failed to define ADIOS variable during Dataset creation"); + id = adios_define_var( + group, + path.c_str(), + "", + getBP1DataType(parameters.dtype), + chunkSizeParam.c_str(), + globalSize.c_str(), + chunkOffsetParam.c_str()); + VERIFY( + id != 0, + "[ADIOS1] Internal error: Failed to define ADIOS variable during " + "Dataset creation"); std::string transform = ""; { json::TracingJSON options = json::parseOptions( - parameters.options, /* considerFiles = */ false ); - if( options.json().contains( "adios1" ) ) + parameters.options, /* considerFiles = */ false); + if (options.json().contains("adios1")) { options = options["adios1"]; - auto maybeTransform = datasetTransform( options ); - if( maybeTransform.has_value() ) + auto maybeTransform = datasetTransform(options); + if (maybeTransform.has_value()) { transform = maybeTransform.value(); } @@ -566,244 +584,247 @@ CommonADIOS1IOHandlerImpl< ChildClass >::createDataset(Writable* writable, options, "ADIOS1", "Warning: parts of the backend configuration for " - "ADIOS1 dataset '" + name + "' remain unused:\n" ); + "ADIOS1 dataset '" + + name + "' remain unused:\n"); } } // Fallback: global option - if( transform.empty() ) + if (transform.empty()) { transform = m_defaultTransform; } - if( !transform.empty() ) + if (!transform.empty()) { int status; status = adios_set_transform(id, transform.c_str()); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to set ADIOS transform during Dataset creation"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to set ADIOS transform during " + "Dataset creation"); } writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS1FilePosition >(name); + writable->abstractFilePosition = + std::make_shared(name); m_filePaths[writable] = res->second; } } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::extendDataset(Writable*, - Parameter< Operation::EXTEND_DATASET > const&) +template +void CommonADIOS1IOHandlerImpl::extendDataset( + Writable *, Parameter const &) { - throw std::runtime_error("[ADIOS1] Dataset extension not implemented in ADIOS backend"); + throw std::runtime_error( + "[ADIOS1] Dataset extension not implemented in ADIOS backend"); } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::openFile(Writable* writable, - Parameter< Operation::OPEN_FILE > const& parameters) +template +void CommonADIOS1IOHandlerImpl::openFile( + Writable *writable, Parameter const ¶meters) { - if( !auxiliary::directory_exists(m_handler->directory) ) - throw no_such_file_error("[ADIOS1] Supplied directory is not valid: " + m_handler->directory); + if (!auxiliary::directory_exists(m_handler->directory)) + throw no_such_file_error( + "[ADIOS1] Supplied directory is not valid: " + + m_handler->directory); std::string name = m_handler->directory + parameters.name; - if( !auxiliary::ends_with(name, ".bp") ) + if (!auxiliary::ends_with(name, ".bp")) name += ".bp"; - std::shared_ptr< std::string > filePath; - auto it = std::find_if(m_filePaths.begin(), m_filePaths.end(), - [name](std::unordered_map< Writable*, std::shared_ptr< std::string > >::value_type const& entry){ return *entry.second == name; }); - if( it == m_filePaths.end() ) - filePath = std::make_shared< std::string >(name); + std::shared_ptr filePath; + auto it = std::find_if( + m_filePaths.begin(), + m_filePaths.end(), + [name](std::unordered_map>:: + value_type const &entry) { return *entry.second == name; }); + if (it == m_filePaths.end()) + filePath = std::make_shared(name); else filePath = it->second; - if( m_handler->m_backendAccess == Access::CREATE ) + if (m_handler->m_backendAccess == Access::CREATE) { // called at Series::flush for iterations that has been flushed before - // this is to make sure to point the Series.m_writer points to this iteration - // so when call Series.flushAttribute(), the attributes can be flushed to the iteration level file. + // this is to make sure to point the Series.m_writer points to this + // iteration so when call Series.flushAttribute(), the attributes can be + // flushed to the iteration level file. m_filePaths[writable] = filePath; writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS1FilePosition >("/"); + writable->abstractFilePosition = + std::make_shared("/"); return; - } + } /* close the handle that corresponds to the file we want to open */ - if( m_openWriteFileHandles.find(filePath) != m_openWriteFileHandles.end() ) + if (m_openWriteFileHandles.find(filePath) != m_openWriteFileHandles.end()) { close(m_openWriteFileHandles[filePath]); m_openWriteFileHandles.erase(filePath); } - if( m_groups.find(filePath) == m_groups.end() ) + if (m_groups.find(filePath) == m_groups.end()) m_groups[filePath] = - static_cast< ChildClass * >( this )->initialize_group(name); + static_cast(this)->initialize_group(name); - if( m_openReadFileHandles.find(filePath) == m_openReadFileHandles.end() ) + if (m_openReadFileHandles.find(filePath) == m_openReadFileHandles.end()) { - ADIOS_FILE* f = static_cast< ChildClass * >( this )->open_read(name); + ADIOS_FILE *f = static_cast(this)->open_read(name); m_openReadFileHandles[filePath] = f; } writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS1FilePosition >("/"); + writable->abstractFilePosition = std::make_shared("/"); m_filePaths[writable] = filePath; m_existsOnDisk[filePath] = true; } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::closeFile( - Writable * writable, - Parameter< Operation::CLOSE_FILE > const & ) +template +void CommonADIOS1IOHandlerImpl::closeFile( + Writable *writable, Parameter const &) { - auto myFile = m_filePaths.find( writable ); - if( myFile == m_filePaths.end() ) + auto myFile = m_filePaths.find(writable); + if (myFile == m_filePaths.end()) { return; } // finish write operations - auto myGroup = m_groups.find( myFile->second ); - if( myGroup != m_groups.end() ) + auto myGroup = m_groups.find(myFile->second); + if (myGroup != m_groups.end()) { - auto attributeWrites = m_attributeWrites.find( myGroup->second ); - if( this->m_handler->m_backendAccess != Access::READ_ONLY && - attributeWrites != m_attributeWrites.end() ) + auto attributeWrites = m_attributeWrites.find(myGroup->second); + if (this->m_handler->m_backendAccess != Access::READ_ONLY && + attributeWrites != m_attributeWrites.end()) { - for( auto & att : attributeWrites->second ) + for (auto &att : attributeWrites->second) { - flush_attribute( myGroup->second, att.first, att.second ); + flush_attribute(myGroup->second, att.first, att.second); } - m_attributeWrites.erase( attributeWrites ); + m_attributeWrites.erase(attributeWrites); } - m_groups.erase( myGroup ); + m_groups.erase(myGroup); } - auto handle_write = m_openWriteFileHandles.find( myFile->second ); - if( handle_write != m_openWriteFileHandles.end() ) + auto handle_write = m_openWriteFileHandles.find(myFile->second); + if (handle_write != m_openWriteFileHandles.end()) { - close( handle_write->second ); - m_openWriteFileHandles.erase( handle_write ); + close(handle_write->second); + m_openWriteFileHandles.erase(handle_write); } // finish read operations - auto handle_read = m_openReadFileHandles.find( myFile->second ); - if( handle_read != m_openReadFileHandles.end() ) + auto handle_read = m_openReadFileHandles.find(myFile->second); + if (handle_read != m_openReadFileHandles.end()) { - auto scheduled = m_scheduledReads.find( handle_read->second ); - if( scheduled != m_scheduledReads.end() ) + auto scheduled = m_scheduledReads.find(handle_read->second); + if (scheduled != m_scheduledReads.end()) { - auto status = adios_perform_reads( scheduled->first, 1 ); + auto status = adios_perform_reads(scheduled->first, 1); VERIFY( status == err_no_error, "[ADIOS1] Internal error: Failed to perform ADIOS reads during " - "dataset reading" ); + "dataset reading"); - for( auto & sel : scheduled->second ) - adios_selection_delete( sel ); - m_scheduledReads.erase( scheduled ); + for (auto &sel : scheduled->second) + adios_selection_delete(sel); + m_scheduledReads.erase(scheduled); } - close( handle_read->second ); - m_openReadFileHandles.erase( handle_read ); + close(handle_read->second); + m_openReadFileHandles.erase(handle_read); } - m_existsOnDisk.erase( myFile->second ); - m_filePaths.erase( myFile ); + m_existsOnDisk.erase(myFile->second); + m_filePaths.erase(myFile); } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::availableChunks( - Writable * writable, - Parameter< Operation::AVAILABLE_CHUNKS > & params ) +template +void CommonADIOS1IOHandlerImpl::availableChunks( + Writable *writable, Parameter ¶ms) { - ADIOS_FILE * f; - f = m_openReadFileHandles.at( m_filePaths.at( writable ) ); - std::string name = concrete_bp1_file_position( writable ); + ADIOS_FILE *f; + f = m_openReadFileHandles.at(m_filePaths.at(writable)); + std::string name = concrete_bp1_file_position(writable); VERIFY( - std::strcmp( f->path, m_filePaths.at( writable )->c_str() ) == 0, - "[ADIOS1] Internal Error: Invalid ADIOS read file handle" ); - ADIOS_VARINFO * varinfo = adios_inq_var( f, name.c_str() ); + std::strcmp(f->path, m_filePaths.at(writable)->c_str()) == 0, + "[ADIOS1] Internal Error: Invalid ADIOS read file handle"); + ADIOS_VARINFO *varinfo = adios_inq_var(f, name.c_str()); VERIFY( adios_errno == err_no_error, "[ADIOS1] Internal error: Failed to inquire ADIOS variable while " - "querying available chunks." ); - int err = adios_inq_var_blockinfo( f, varinfo ); + "querying available chunks."); + int err = adios_inq_var_blockinfo(f, varinfo); VERIFY( err == 0, "[ADIOS1] Internal error: Failed to obtain ADIOS varinfo while " - "querying available chunks." ); - int nblocks = - varinfo->nblocks[ 0 ]; // we don't use steps, so index 0 is fine + "querying available chunks."); + int nblocks = varinfo->nblocks[0]; // we don't use steps, so index 0 is fine int ndim = varinfo->ndim; - auto & table = *params.chunks; - table.reserve( nblocks ); - for( int block = 0; block < nblocks; ++block ) + auto &table = *params.chunks; + table.reserve(nblocks); + for (int block = 0; block < nblocks; ++block) { - ADIOS_VARBLOCK & varblock = varinfo->blockinfo[ block ]; - Offset offset( ndim ); - Extent extent( ndim ); - for( int i = 0; i < ndim; ++i ) + ADIOS_VARBLOCK &varblock = varinfo->blockinfo[block]; + Offset offset(ndim); + Extent extent(ndim); + for (int i = 0; i < ndim; ++i) { - offset[ i ] = varblock.start[ i ]; - extent[ i ] = varblock.count[ i ]; + offset[i] = varblock.start[i]; + extent[i] = varblock.count[i]; } - table.emplace_back( offset, extent, int( varblock.process_id ) ); + table.emplace_back(offset, extent, int(varblock.process_id)); } - adios_free_varinfo( varinfo ); + adios_free_varinfo(varinfo); } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::openPath( - Writable * writable, - Parameter< Operation::OPEN_PATH > const & parameters ) +template +void CommonADIOS1IOHandlerImpl::openPath( + Writable *writable, Parameter const ¶meters) { /* Sanitize path */ std::string path = parameters.path; - if( !path.empty() ) + if (!path.empty()) { - if( auxiliary::starts_with(path, '/') ) + if (auxiliary::starts_with(path, '/')) path = auxiliary::replace_first(path, "/", ""); - if( !auxiliary::ends_with(path, '/') ) + if (!auxiliary::ends_with(path, '/')) path += '/'; } writable->written = true; - writable->abstractFilePosition = std::make_shared< ADIOS1FilePosition >(path); + writable->abstractFilePosition = std::make_shared(path); - auto res = writable->parent ? m_filePaths.find(writable->parent) : m_filePaths.find(writable); + auto res = writable->parent ? m_filePaths.find(writable->parent) + : m_filePaths.find(writable); m_filePaths[writable] = res->second; } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::openDataset(Writable* writable, - Parameter< Operation::OPEN_DATASET >& parameters) +template +void CommonADIOS1IOHandlerImpl::openDataset( + Writable *writable, Parameter ¶meters) { ADIOS_FILE *f; auto res = m_filePaths.find(writable); - if( res == m_filePaths.end() ) + if (res == m_filePaths.end()) res = m_filePaths.find(writable->parent); f = m_openReadFileHandles.at(res->second); /* Sanitize name */ std::string name = parameters.name; - if( auxiliary::starts_with(name, '/') ) + if (auxiliary::starts_with(name, '/')) name = auxiliary::replace_first(name, "/", ""); std::string datasetname = writable->abstractFilePosition ? concrete_bp1_file_position(writable) : concrete_bp1_file_position(writable) + name; - ADIOS_VARINFO* vi; - vi = adios_inq_var(f, - datasetname.c_str()); + ADIOS_VARINFO *vi; + vi = adios_inq_var(f, datasetname.c_str()); std::string error_string("[ADIOS1] Internal error: "); error_string.append("Failed to inquire about ADIOS variable '") - .append(datasetname) - .append("' during dataset opening"); + .append(datasetname) + .append("' during dataset opening"); VERIFY(adios_errno == err_no_error, error_string); VERIFY(vi != nullptr, error_string); @@ -811,154 +832,166 @@ CommonADIOS1IOHandlerImpl< ChildClass >::openDataset(Writable* writable, // note the ill-named fixed-byte adios_... types // https://github.com/ornladios/ADIOS/issues/187 - switch( vi->type ) + switch (vi->type) { using DT = Datatype; - case adios_byte: - dtype = DT::CHAR; - break; - case adios_short: - if( sizeof(short) == 2u ) - dtype = DT::SHORT; - else if( sizeof(int) == 2u ) - dtype = DT::INT; - else if( sizeof(long) == 2u ) - dtype = DT::LONG; - else if( sizeof(long long) == 2u ) - dtype = DT::LONGLONG; - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_short found."); - break; - case adios_integer: - if( sizeof(short) == 4u ) - dtype = DT::SHORT; - else if( sizeof(int) == 4u ) - dtype = DT::INT; - else if( sizeof(long) == 4u ) - dtype = DT::LONG; - else if( sizeof(long long) == 4u ) - dtype = DT::LONGLONG; - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_integer found."); - break; - case adios_long: - if( sizeof(short) == 8u ) - dtype = DT::SHORT; - else if( sizeof(int) == 8u ) - dtype = DT::INT; - else if( sizeof(long) == 8u ) - dtype = DT::LONG; - else if( sizeof(long long) == 8u ) - dtype = DT::LONGLONG; - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_long found."); - break; - case adios_unsigned_byte: - dtype = DT::UCHAR; - break; - case adios_unsigned_short: - if( sizeof(unsigned short) == 2u ) - dtype = DT::USHORT; - else if( sizeof(unsigned int) == 2u ) - dtype = DT::UINT; - else if( sizeof(unsigned long) == 2u ) - dtype = DT::ULONG; - else if( sizeof(unsigned long long) == 2u ) - dtype = DT::ULONGLONG; - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_short found."); - break; - case adios_unsigned_integer: - if( sizeof(unsigned short) == 4u ) - dtype = DT::USHORT; - else if( sizeof(unsigned int) == 4u ) - dtype = DT::UINT; - else if( sizeof(unsigned long) == 4u ) - dtype = DT::ULONG; - else if( sizeof(unsigned long long) == 4u ) - dtype = DT::ULONGLONG; - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_integer found."); - break; - case adios_unsigned_long: - if( sizeof(unsigned short) == 8u ) - dtype = DT::USHORT; - else if( sizeof(unsigned int) == 8u ) - dtype = DT::UINT; - else if( sizeof(unsigned long) == 8u ) - dtype = DT::ULONG; - else if( sizeof(unsigned long long) == 8u ) - dtype = DT::ULONGLONG; - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_long found."); - break; - case adios_real: - dtype = DT::FLOAT; - break; - case adios_double: - dtype = DT::DOUBLE; - break; - case adios_long_double: - dtype = DT::LONG_DOUBLE; - break; - case adios_complex: - dtype = DT::CFLOAT; - break; - case adios_double_complex: - dtype = DT::CDOUBLE; - break; - - case adios_string: - case adios_string_array: - default: - throw unsupported_data_error("[ADIOS1] Datatype not implemented for ADIOS dataset writing"); + case adios_byte: + dtype = DT::CHAR; + break; + case adios_short: + if (sizeof(short) == 2u) + dtype = DT::SHORT; + else if (sizeof(int) == 2u) + dtype = DT::INT; + else if (sizeof(long) == 2u) + dtype = DT::LONG; + else if (sizeof(long long) == 2u) + dtype = DT::LONGLONG; + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_short " + "found."); + break; + case adios_integer: + if (sizeof(short) == 4u) + dtype = DT::SHORT; + else if (sizeof(int) == 4u) + dtype = DT::INT; + else if (sizeof(long) == 4u) + dtype = DT::LONG; + else if (sizeof(long long) == 4u) + dtype = DT::LONGLONG; + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_integer " + "found."); + break; + case adios_long: + if (sizeof(short) == 8u) + dtype = DT::SHORT; + else if (sizeof(int) == 8u) + dtype = DT::INT; + else if (sizeof(long) == 8u) + dtype = DT::LONG; + else if (sizeof(long long) == 8u) + dtype = DT::LONGLONG; + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_long found."); + break; + case adios_unsigned_byte: + dtype = DT::UCHAR; + break; + case adios_unsigned_short: + if (sizeof(unsigned short) == 2u) + dtype = DT::USHORT; + else if (sizeof(unsigned int) == 2u) + dtype = DT::UINT; + else if (sizeof(unsigned long) == 2u) + dtype = DT::ULONG; + else if (sizeof(unsigned long long) == 2u) + dtype = DT::ULONGLONG; + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_short found."); + break; + case adios_unsigned_integer: + if (sizeof(unsigned short) == 4u) + dtype = DT::USHORT; + else if (sizeof(unsigned int) == 4u) + dtype = DT::UINT; + else if (sizeof(unsigned long) == 4u) + dtype = DT::ULONG; + else if (sizeof(unsigned long long) == 4u) + dtype = DT::ULONGLONG; + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_integer found."); + break; + case adios_unsigned_long: + if (sizeof(unsigned short) == 8u) + dtype = DT::USHORT; + else if (sizeof(unsigned int) == 8u) + dtype = DT::UINT; + else if (sizeof(unsigned long) == 8u) + dtype = DT::ULONG; + else if (sizeof(unsigned long long) == 8u) + dtype = DT::ULONGLONG; + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_long found."); + break; + case adios_real: + dtype = DT::FLOAT; + break; + case adios_double: + dtype = DT::DOUBLE; + break; + case adios_long_double: + dtype = DT::LONG_DOUBLE; + break; + case adios_complex: + dtype = DT::CFLOAT; + break; + case adios_double_complex: + dtype = DT::CDOUBLE; + break; + + case adios_string: + case adios_string_array: + default: + throw unsupported_data_error( + "[ADIOS1] Datatype not implemented for ADIOS dataset writing"); } *parameters.dtype = dtype; Extent e; e.resize(vi->ndim); - for( int i = 0; i < vi->ndim; ++i ) + for (int i = 0; i < vi->ndim; ++i) e[i] = vi->dims[i]; *parameters.extent = e; writable->written = true; - if( !writable->abstractFilePosition ) + if (!writable->abstractFilePosition) { - writable->abstractFilePosition - = std::make_shared< ADIOS1FilePosition >(name); + writable->abstractFilePosition = + std::make_shared(name); } m_openReadFileHandles[res->second] = f; m_filePaths[writable] = res->second; } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::deleteFile(Writable* writable, - Parameter< Operation::DELETE_FILE > const& parameters) +template +void CommonADIOS1IOHandlerImpl::deleteFile( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[ADIOS1] Deleting a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[ADIOS1] Deleting a file opened as read only is not possible."); - if( writable->written ) + if (writable->written) { auto path = m_filePaths.at(writable); - if( m_openReadFileHandles.find(path) != m_openReadFileHandles.end() ) + if (m_openReadFileHandles.find(path) != m_openReadFileHandles.end()) { close(m_openReadFileHandles.at(path)); m_openReadFileHandles.erase(path); } - if( m_openWriteFileHandles.find(path) != m_openWriteFileHandles.end() ) + if (m_openWriteFileHandles.find(path) != m_openWriteFileHandles.end()) { close(m_openWriteFileHandles.at(path)); m_openWriteFileHandles.erase(path); } std::string name = m_handler->directory + parameters.name; - if( !auxiliary::ends_with(name, ".bp") ) + if (!auxiliary::ends_with(name, ".bp")) name += ".bp"; - if( !auxiliary::file_exists(name) ) + if (!auxiliary::file_exists(name)) throw std::runtime_error("[ADIOS1] File does not exist: " + name); auxiliary::remove_file(name); @@ -970,59 +1003,62 @@ CommonADIOS1IOHandlerImpl< ChildClass >::deleteFile(Writable* writable, } } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::deletePath(Writable*, - Parameter< Operation::DELETE_PATH > const&) +template +void CommonADIOS1IOHandlerImpl::deletePath( + Writable *, Parameter const &) { - throw std::runtime_error("[ADIOS1] Path deletion not implemented in ADIOS backend"); + throw std::runtime_error( + "[ADIOS1] Path deletion not implemented in ADIOS backend"); } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::deleteDataset(Writable*, - Parameter< Operation::DELETE_DATASET > const&) +template +void CommonADIOS1IOHandlerImpl::deleteDataset( + Writable *, Parameter const &) { - throw std::runtime_error("[ADIOS1] Dataset deletion not implemented in ADIOS backend"); + throw std::runtime_error( + "[ADIOS1] Dataset deletion not implemented in ADIOS backend"); } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::deleteAttribute(Writable*, - Parameter< Operation::DELETE_ATT > const&) +template +void CommonADIOS1IOHandlerImpl::deleteAttribute( + Writable *, Parameter const &) { - throw std::runtime_error("[ADIOS1] Attribute deletion not implemented in ADIOS backend"); + throw std::runtime_error( + "[ADIOS1] Attribute deletion not implemented in ADIOS backend"); } -template< typename ChildClass > -int64_t CommonADIOS1IOHandlerImpl< ChildClass >::GetFileHandle(Writable* writable) +template +int64_t CommonADIOS1IOHandlerImpl::GetFileHandle(Writable *writable) { auto res = m_filePaths.find(writable); - if( res == m_filePaths.end() ) + if (res == m_filePaths.end()) res = m_filePaths.find(writable->parent); int64_t fd; - if( m_openWriteFileHandles.find(res->second) == m_openWriteFileHandles.end() ) + if (m_openWriteFileHandles.find(res->second) == + m_openWriteFileHandles.end()) { - std::string name = *(res->second); + std::string name = *(res->second); m_groups[m_filePaths[writable]] = - static_cast< ChildClass * >( this )->initialize_group(name); + static_cast(this)->initialize_group(name); - fd = static_cast< ChildClass * >( this )->open_write(writable); + fd = static_cast(this)->open_write(writable); m_openWriteFileHandles[res->second] = fd; - } else + } + else fd = m_openWriteFileHandles.at(res->second); return fd; } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::writeDataset(Writable* writable, - Parameter< Operation::WRITE_DATASET > const& parameters) +template +void CommonADIOS1IOHandlerImpl::writeDataset( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[ADIOS1] Writing into a dataset in a file opened as read-only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[ADIOS1] Writing into a dataset in a file opened as read-only is " + "not possible."); int64_t fd = GetFileHandle(writable); @@ -1033,569 +1069,652 @@ CommonADIOS1IOHandlerImpl< ChildClass >::writeDataset(Writable* writable, std::string chunkSize; std::string chunkOffset; int status; - for( size_t i = 0; i < ndims; ++i ) + for (size_t i = 0; i < ndims; ++i) { chunkSize = "/tmp" + name + "_chunkSize" + std::to_string(i); status = adios_write(fd, chunkSize.c_str(), ¶meters.extent[i]); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to write ADIOS variable during Dataset writing"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to write ADIOS variable during " + "Dataset writing"); chunkOffset = "/tmp" + name + "_chunkOffset" + std::to_string(i); status = adios_write(fd, chunkOffset.c_str(), ¶meters.offset[i]); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to write ADIOS variable during Dataset writing"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to write ADIOS variable during " + "Dataset writing"); } - status = adios_write(fd, - name.c_str(), - parameters.data.get()); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to write ADIOS variable during Dataset writing"); + status = adios_write(fd, name.c_str(), parameters.data.get()); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to write ADIOS variable during " + "Dataset writing"); } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::writeAttribute(Writable* writable, - Parameter< Operation::WRITE_ATT > const& parameters) +template +void CommonADIOS1IOHandlerImpl::writeAttribute( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[ADIOS1] Writing an attribute in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[ADIOS1] Writing an attribute in a file opened as read only is " + "not possible."); std::string name = concrete_bp1_file_position(writable); - if( !auxiliary::ends_with(name, '/') ) + if (!auxiliary::ends_with(name, '/')) name += '/'; name += parameters.name; auto res = m_filePaths.find(writable); - if( res == m_filePaths.end() ) + if (res == m_filePaths.end()) res = m_filePaths.find(writable->parent); GetFileHandle(writable); int64_t group = m_groups[res->second]; - auto& attributes = m_attributeWrites[group]; + auto &attributes = m_attributeWrites[group]; attributes.erase(name); attributes.emplace(name, parameters.resource); } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::readDataset(Writable* writable, - Parameter< Operation::READ_DATASET >& parameters) +template +void CommonADIOS1IOHandlerImpl::readDataset( + Writable *writable, Parameter ¶meters) { - switch( parameters.dtype ) + switch (parameters.dtype) { using DT = Datatype; - case DT::DOUBLE: - case DT::FLOAT: - case DT::CDOUBLE: - case DT::CFLOAT: - case DT::SHORT: - case DT::INT: - case DT::LONG: - case DT::LONGLONG: - case DT::USHORT: - case DT::UINT: - case DT::ULONG: - case DT::ULONGLONG: - case DT::CHAR: - case DT::UCHAR: - case DT::BOOL: - break; - case DT::UNDEFINED: - throw std::runtime_error("[ADIOS1] Unknown Attribute datatype (ADIOS1 Dataset read)"); - default: - throw std::runtime_error("[ADIOS1] Datatype not implemented in ADIOS1 IO"); + case DT::DOUBLE: + case DT::FLOAT: + case DT::CDOUBLE: + case DT::CFLOAT: + case DT::SHORT: + case DT::INT: + case DT::LONG: + case DT::LONGLONG: + case DT::USHORT: + case DT::UINT: + case DT::ULONG: + case DT::ULONGLONG: + case DT::CHAR: + case DT::UCHAR: + case DT::BOOL: + break; + case DT::UNDEFINED: + throw std::runtime_error( + "[ADIOS1] Unknown Attribute datatype (ADIOS1 Dataset read)"); + default: + throw std::runtime_error( + "[ADIOS1] Datatype not implemented in ADIOS1 IO"); } - ADIOS_FILE* f; + ADIOS_FILE *f; f = m_openReadFileHandles.at(m_filePaths.at(writable)); - VERIFY(std::strcmp(f->path, m_filePaths.at(writable)->c_str()) == 0, - "[ADIOS1] Internal Error: Invalid ADIOS read file handle"); - - ADIOS_SELECTION* sel; - sel = adios_selection_boundingbox(parameters.extent.size(), - parameters.offset.data(), - parameters.extent.data()); - VERIFY(sel != nullptr, "[ADIOS1] Internal error: Failed to select ADIOS bounding box during dataset reading"); - VERIFY(adios_errno == err_no_error, "[ADIOS1] Internal error: Failed to select ADIOS bounding box during dataset reading"); + VERIFY( + std::strcmp(f->path, m_filePaths.at(writable)->c_str()) == 0, + "[ADIOS1] Internal Error: Invalid ADIOS read file handle"); + + ADIOS_SELECTION *sel; + sel = adios_selection_boundingbox( + parameters.extent.size(), + parameters.offset.data(), + parameters.extent.data()); + VERIFY( + sel != nullptr, + "[ADIOS1] Internal error: Failed to select ADIOS bounding box during " + "dataset reading"); + VERIFY( + adios_errno == err_no_error, + "[ADIOS1] Internal error: Failed to select ADIOS bounding box during " + "dataset reading"); std::string varname = concrete_bp1_file_position(writable); - void* data = parameters.data.get(); + void *data = parameters.data.get(); int status; - status = adios_schedule_read(f, - sel, - varname.c_str(), - 0, - 1, - data); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to schedule ADIOS read during dataset reading"); - VERIFY(adios_errno == err_no_error, "[ADIOS1] Internal error: Failed to schedule ADIOS read during dataset reading"); + status = adios_schedule_read(f, sel, varname.c_str(), 0, 1, data); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to schedule ADIOS read during dataset " + "reading"); + VERIFY( + adios_errno == err_no_error, + "[ADIOS1] Internal error: Failed to schedule ADIOS read during dataset " + "reading"); m_scheduledReads[f].push_back(sel); } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::readAttribute(Writable* writable, - Parameter< Operation::READ_ATT >& parameters) +template +void CommonADIOS1IOHandlerImpl::readAttribute( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[ADIOS1] Internal error: Writable not marked written during attribute reading"); + if (!writable->written) + throw std::runtime_error( + "[ADIOS1] Internal error: Writable not marked written during " + "attribute reading"); - ADIOS_FILE* f; + ADIOS_FILE *f; f = m_openReadFileHandles.at(m_filePaths.at(writable)); std::string attrname = concrete_bp1_file_position(writable); - if( !auxiliary::ends_with(attrname, '/') ) + if (!auxiliary::ends_with(attrname, '/')) attrname += "/"; attrname += parameters.name; ADIOS_DATATYPES datatype = adios_unknown; int size = 0; - void* data = nullptr; + void *data = nullptr; int status; - status = adios_get_attr(f, - attrname.c_str(), - &datatype, - &size, - &data); - VERIFY(status == 0, "[ADIOS1] Internal error: Failed to get ADIOS1 attribute during attribute read"); - VERIFY(datatype != adios_unknown, "[ADIOS1] Internal error: Read unknown ADIOS1 datatype during attribute read"); + status = adios_get_attr(f, attrname.c_str(), &datatype, &size, &data); + VERIFY( + status == 0, + "[ADIOS1] Internal error: Failed to get ADIOS1 attribute during " + "attribute read"); + VERIFY( + datatype != adios_unknown, + "[ADIOS1] Internal error: Read unknown ADIOS1 datatype during " + "attribute read"); VERIFY(size != 0, "[ADIOS1] Internal error: ADIOS1 read 0-size attribute"); // size is returned in number of allocated bytes // note the ill-named fixed-byte adios_... types // https://github.com/ornladios/ADIOS/issues/187 - switch( datatype ) + switch (datatype) + { + case adios_byte: + break; + case adios_short: + size /= 2; + break; + case adios_integer: + size /= 4; + break; + case adios_long: + size /= 8; + break; + case adios_unsigned_byte: + break; + case adios_unsigned_short: + size /= 2; + break; + case adios_unsigned_integer: + size /= 4; + break; + case adios_unsigned_long: + size /= 8; + break; + case adios_real: + size /= 4; + break; + case adios_double: + size /= 8; + break; + case adios_long_double: + size /= sizeof(long double); + break; + case adios_complex: + size /= 8; + break; + case adios_double_complex: + size /= 16; + break; + case adios_string: + break; + case adios_string_array: + size /= sizeof(char *); + break; + + default: + throw unsupported_data_error( + "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype '" + + std::to_string(datatype) + "' in size check"); + } + + Datatype dtype; + Attribute a(0); + if (size == 1) { + switch (datatype) + { + using DT = Datatype; case adios_byte: + dtype = DT::CHAR; + a = Attribute(*reinterpret_cast(data)); break; case adios_short: - size /= 2; + if (sizeof(short) == 2u) + { + dtype = DT::SHORT; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(int) == 2u) + { + dtype = DT::INT; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(long) == 2u) + { + dtype = DT::LONG; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(long long) == 2u) + { + dtype = DT::LONGLONG; + a = Attribute(*reinterpret_cast(data)); + } + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_short " + "found."); break; case adios_integer: - size /= 4; + if (sizeof(short) == 4u) + { + dtype = DT::SHORT; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(int) == 4u) + { + dtype = DT::INT; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(long) == 4u) + { + dtype = DT::LONG; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(long long) == 4u) + { + dtype = DT::LONGLONG; + a = Attribute(*reinterpret_cast(data)); + } + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_integer " + "found."); break; case adios_long: - size /= 8; + if (sizeof(short) == 8u) + { + dtype = DT::SHORT; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(int) == 8u) + { + dtype = DT::INT; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(long) == 8u) + { + dtype = DT::LONG; + a = Attribute(*reinterpret_cast(data)); + } + else if (sizeof(long long) == 8u) + { + dtype = DT::LONGLONG; + a = Attribute(*reinterpret_cast(data)); + } + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_long " + "found."); break; case adios_unsigned_byte: + dtype = DT::UCHAR; + a = Attribute(*reinterpret_cast(data)); break; case adios_unsigned_short: - size /= 2; - break; - case adios_unsigned_integer: - size /= 4; - break; - case adios_unsigned_long: - size /= 8; - break; - case adios_real: - size /= 4; - break; - case adios_double: - size /= 8; - break; - case adios_long_double: - size /= sizeof(long double); - break; - case adios_complex: - size /= 8; - break; - case adios_double_complex: - size /= 16; - break; - case adios_string: - break; - case adios_string_array: - size /= sizeof(char*); - break; - - default: - throw unsupported_data_error( - "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype '" + - std::to_string(datatype) + "' in size check"); - } - - Datatype dtype; - Attribute a(0); - if( size == 1 ) - { - switch( datatype ) - { - using DT = Datatype; - case adios_byte: - dtype = DT::CHAR; - a = Attribute(*reinterpret_cast< char* >(data)); - break; - case adios_short: - if( sizeof(short) == 2u ) - { - dtype = DT::SHORT; - a = Attribute(*reinterpret_cast< short* >(data)); - } - else if( sizeof(int) == 2u ) - { - dtype = DT::INT; - a = Attribute(*reinterpret_cast< int* >(data)); - } - else if( sizeof(long) == 2u ) - { - dtype = DT::LONG; - a = Attribute(*reinterpret_cast< long* >(data)); - } - else if( sizeof(long long) == 2u ) - { - dtype = DT::LONGLONG; - a = Attribute(*reinterpret_cast< long long* >(data)); - } - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_short found."); - break; - case adios_integer: - if( sizeof(short) == 4u ) - { - dtype = DT::SHORT; - a = Attribute(*reinterpret_cast< short* >(data)); - } - else if( sizeof(int) == 4u ) - { - dtype = DT::INT; - a = Attribute(*reinterpret_cast< int* >(data)); - } - else if( sizeof(long) == 4u ) - { - dtype = DT::LONG; - a = Attribute(*reinterpret_cast< long* >(data)); - } - else if( sizeof(long long) == 4u ) - { - dtype = DT::LONGLONG; - a = Attribute(*reinterpret_cast< long long* >(data)); - } - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_integer found."); - break; - case adios_long: - if( sizeof(short) == 8u ) - { - dtype = DT::SHORT; - a = Attribute(*reinterpret_cast< short* >(data)); - } - else if( sizeof(int) == 8u ) - { - dtype = DT::INT; - a = Attribute(*reinterpret_cast< int* >(data)); - } - else if( sizeof(long) == 8u ) - { - dtype = DT::LONG; - a = Attribute(*reinterpret_cast< long* >(data)); - } - else if( sizeof(long long) == 8u ) - { - dtype = DT::LONGLONG; - a = Attribute(*reinterpret_cast< long long* >(data)); - } - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_long found."); - break; - case adios_unsigned_byte: - dtype = DT::UCHAR; - a = Attribute(*reinterpret_cast< unsigned char* >(data)); - break; - case adios_unsigned_short: - if( sizeof(unsigned short) == 2u ) - { - dtype = DT::USHORT; - a = Attribute(*reinterpret_cast< unsigned short* >(data)); - } - else if( sizeof(unsigned int) == 2u ) - { - dtype = DT::UINT; - a = Attribute(*reinterpret_cast< unsigned int* >(data)); - } - else if( sizeof(unsigned long) == 2u ) - { - dtype = DT::ULONG; - a = Attribute(*reinterpret_cast< unsigned long* >(data)); - } - else if( sizeof(unsigned long long) == 2u ) - { - dtype = DT::ULONGLONG; - a = Attribute(*reinterpret_cast< unsigned long long* >(data)); - } - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_short found."); - break; - case adios_unsigned_integer: - if( sizeof(unsigned short) == 4u ) - { - dtype = DT::USHORT; - a = Attribute(*reinterpret_cast< unsigned short* >(data)); - } - else if( sizeof(unsigned int) == 4u ) - { - dtype = DT::UINT; - a = Attribute(*reinterpret_cast< unsigned int* >(data)); - } - else if( sizeof(unsigned long) == 4u ) - { - dtype = DT::ULONG; - a = Attribute(*reinterpret_cast< unsigned long* >(data)); - } - else if( sizeof(unsigned long long) == 4u ) - { - dtype = DT::ULONGLONG; - a = Attribute(*reinterpret_cast< unsigned long long* >(data)); - } - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_integer found."); - break; - case adios_unsigned_long: - if( sizeof(unsigned short) == 8u ) - { - dtype = DT::USHORT; - a = Attribute(*reinterpret_cast< unsigned short* >(data)); - } - else if( sizeof(unsigned int) == 8u ) - { - dtype = DT::UINT; - a = Attribute(*reinterpret_cast< unsigned int* >(data)); - } - else if( sizeof(unsigned long) == 8u ) - { - dtype = DT::ULONG; - a = Attribute(*reinterpret_cast< unsigned long* >(data)); - } - else if( sizeof(unsigned long long) == 8u ) - { - dtype = DT::ULONGLONG; - a = Attribute(*reinterpret_cast< unsigned long long* >(data)); - } - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_long found."); - break; - case adios_real: - dtype = DT::FLOAT; - a = Attribute(*reinterpret_cast< float* >(data)); - break; - case adios_double: - dtype = DT::DOUBLE; - a = Attribute(*reinterpret_cast< double* >(data)); - break; - case adios_long_double: - dtype = DT::LONG_DOUBLE; - a = Attribute(*reinterpret_cast< long double* >(data)); - break; - case adios_complex: - dtype = DT::CFLOAT; - a = Attribute(*reinterpret_cast< std::complex* >(data)); - break; - case adios_double_complex: - dtype = DT::CDOUBLE; - a = Attribute(*reinterpret_cast< std::complex* >(data)); - break; - case adios_string: - { - dtype = DT::STRING; - auto c = reinterpret_cast< char* >(data); - a = Attribute(auxiliary::strip(std::string(c, std::strlen(c)), {'\0'})); - break; - } - case adios_string_array: + if (sizeof(unsigned short) == 2u) { - dtype = DT::VEC_STRING; - auto c = reinterpret_cast< char** >(data); - std::vector< std::string > vs; - vs.resize(size); - for( int i = 0; i < size; ++i ) - { - vs[i] = auxiliary::strip(std::string(c[i], std::strlen(c[i])), {'\0'}); - /** @todo pointer should be freed, but this causes memory corruption */ - //free(c[i]); - } - a = Attribute(vs); - break; + dtype = DT::USHORT; + a = Attribute(*reinterpret_cast(data)); } - default: - throw unsupported_data_error( - "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype '" + - std::to_string(datatype) + "' in scalar branch"); - } - } - else - { - switch( datatype ) - { - using DT = Datatype; - case adios_byte: + else if (sizeof(unsigned int) == 2u) { - dtype = DT::VEC_CHAR; - auto c = reinterpret_cast< char* >(data); - std::vector< char > vc; - vc.resize(size); - for( int i = 0; i < size; ++i ) - vc[i] = c[i]; - a = Attribute(vc); - break; + dtype = DT::UINT; + a = Attribute(*reinterpret_cast(data)); } - case adios_short: + else if (sizeof(unsigned long) == 2u) { - if( sizeof(short) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< short >(data, size), DT::VEC_SHORT); - else if( sizeof(int) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< int >(data, size), DT::VEC_INT); - else if( sizeof(long) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< long >(data, size), DT::VEC_LONG); - else if( sizeof(long long) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< long long >(data, size), DT::VEC_LONGLONG); - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_short found."); - break; + dtype = DT::ULONG; + a = Attribute(*reinterpret_cast(data)); } - case adios_integer: + else if (sizeof(unsigned long long) == 2u) { - if( sizeof(short) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< short >(data, size), DT::VEC_SHORT); - else if( sizeof(int) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< int >(data, size), DT::VEC_INT); - else if( sizeof(long) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< long >(data, size), DT::VEC_LONG); - else if( sizeof(long long) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< long long >(data, size), DT::VEC_LONGLONG); - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_integer found."); - break; + dtype = DT::ULONGLONG; + a = Attribute(*reinterpret_cast(data)); } - case adios_long: + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_short found."); + break; + case adios_unsigned_integer: + if (sizeof(unsigned short) == 4u) { - if( sizeof(short) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< short >(data, size), DT::VEC_SHORT); - else if( sizeof(int) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< int >(data, size), DT::VEC_INT); - else if( sizeof(long) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< long >(data, size), DT::VEC_LONG); - else if( sizeof(long long) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< long long >(data, size), DT::VEC_LONGLONG); - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_long found."); - break; + dtype = DT::USHORT; + a = Attribute(*reinterpret_cast(data)); } - case adios_unsigned_byte: + else if (sizeof(unsigned int) == 4u) { - dtype = DT::VEC_UCHAR; - auto uc = reinterpret_cast< unsigned char* >(data); - std::vector< unsigned char > vuc; - vuc.resize(size); - for( int i = 0; i < size; ++i ) - vuc[i] = uc[i]; - a = Attribute(vuc); - break; + dtype = DT::UINT; + a = Attribute(*reinterpret_cast(data)); } - case adios_unsigned_short: + else if (sizeof(unsigned long) == 4u) { - if( sizeof(unsigned short) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned short >(data, size), DT::VEC_USHORT); - else if( sizeof(unsigned int) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned int >(data, size), DT::VEC_UINT); - else if( sizeof(unsigned long) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned long >(data, size), DT::VEC_ULONG); - else if( sizeof(unsigned long long) == 2u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned long long >(data, size), DT::VEC_ULONGLONG); - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_short found."); - break; + dtype = DT::ULONG; + a = Attribute(*reinterpret_cast(data)); } - case adios_unsigned_integer: + else if (sizeof(unsigned long long) == 4u) { - if( sizeof(unsigned short) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned short >(data, size), DT::VEC_USHORT); - else if( sizeof(unsigned int) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned int >(data, size), DT::VEC_UINT); - else if( sizeof(unsigned long) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned long >(data, size), DT::VEC_ULONG); - else if( sizeof(unsigned long long) == 4u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned long long >(data, size), DT::VEC_ULONGLONG); - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_integer found."); - break; + dtype = DT::ULONGLONG; + a = Attribute(*reinterpret_cast(data)); } - case adios_unsigned_long: + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_integer found."); + break; + case adios_unsigned_long: + if (sizeof(unsigned short) == 8u) { - if( sizeof(unsigned short) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned short >(data, size), DT::VEC_USHORT); - else if( sizeof(unsigned int) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned int >(data, size), DT::VEC_UINT); - else if( sizeof(unsigned long) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned long >(data, size), DT::VEC_ULONG); - else if( sizeof(unsigned long long) == 8u ) - std::tie(a, dtype) = std::make_tuple(readVectorAttributeInternal< unsigned long long >(data, size), DT::VEC_ULONGLONG); - else - throw unsupported_data_error("[ADIOS1] No native equivalent for Datatype adios_unsigned_long found."); - break; + dtype = DT::USHORT; + a = Attribute(*reinterpret_cast(data)); } - case adios_real: + else if (sizeof(unsigned int) == 8u) { - dtype = DT::VEC_FLOAT; - auto f4 = reinterpret_cast< float* >(data); - std::vector< float > vf; - vf.resize(size); - for( int i = 0; i < size; ++i ) - vf[i] = f4[i]; - a = Attribute(vf); - break; + dtype = DT::UINT; + a = Attribute(*reinterpret_cast(data)); } - case adios_double: + else if (sizeof(unsigned long) == 8u) { - dtype = DT::VEC_DOUBLE; - auto d8 = reinterpret_cast< double* >(data); - std::vector< double > vd; - vd.resize(size); - for( int i = 0; i < size; ++i ) - vd[i] = d8[i]; - a = Attribute(vd); - break; + dtype = DT::ULONG; + a = Attribute(*reinterpret_cast(data)); } - case adios_long_double: + else if (sizeof(unsigned long long) == 8u) { - dtype = DT::VEC_LONG_DOUBLE; - auto ld = reinterpret_cast< long double* >(data); - std::vector< long double > vld; - vld.resize(size); - for( int i = 0; i < size; ++i ) - vld[i] = ld[i]; - a = Attribute(vld); - break; + dtype = DT::ULONGLONG; + a = Attribute(*reinterpret_cast(data)); } - /* not supported by ADIOS 1.13.1: VEC_CFLOAT, VEC_CDOUBLE, VEC_CLONG_DOUBLE - * https://github.com/ornladios/ADIOS/issues/212 - */ - case adios_string: + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_long found."); + break; + case adios_real: + dtype = DT::FLOAT; + a = Attribute(*reinterpret_cast(data)); + break; + case adios_double: + dtype = DT::DOUBLE; + a = Attribute(*reinterpret_cast(data)); + break; + case adios_long_double: + dtype = DT::LONG_DOUBLE; + a = Attribute(*reinterpret_cast(data)); + break; + case adios_complex: + dtype = DT::CFLOAT; + a = Attribute(*reinterpret_cast *>(data)); + break; + case adios_double_complex: + dtype = DT::CDOUBLE; + a = Attribute(*reinterpret_cast *>(data)); + break; + case adios_string: { + dtype = DT::STRING; + auto c = reinterpret_cast(data); + a = Attribute( + auxiliary::strip(std::string(c, std::strlen(c)), {'\0'})); + break; + } + case adios_string_array: { + dtype = DT::VEC_STRING; + auto c = reinterpret_cast(data); + std::vector vs; + vs.resize(size); + for (int i = 0; i < size; ++i) { - dtype = DT::STRING; - a = Attribute(auxiliary::strip(std::string(reinterpret_cast< char* >(data), size), {'\0'})); - break; + vs[i] = auxiliary::strip( + std::string(c[i], std::strlen(c[i])), {'\0'}); + /** @todo pointer should be freed, but this causes memory + * corruption */ + // free(c[i]); } - case adios_string_array: + a = Attribute(vs); + break; + } + default: + throw unsupported_data_error( + "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype " + "'" + + std::to_string(datatype) + "' in scalar branch"); + } + } + else + { + switch (datatype) + { + using DT = Datatype; + case adios_byte: { + dtype = DT::VEC_CHAR; + auto c = reinterpret_cast(data); + std::vector vc; + vc.resize(size); + for (int i = 0; i < size; ++i) + vc[i] = c[i]; + a = Attribute(vc); + break; + } + case adios_short: { + if (sizeof(short) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_SHORT); + else if (sizeof(int) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), DT::VEC_INT); + else if (sizeof(long) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_LONG); + else if (sizeof(long long) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_LONGLONG); + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_short " + "found."); + break; + } + case adios_integer: { + if (sizeof(short) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_SHORT); + else if (sizeof(int) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), DT::VEC_INT); + else if (sizeof(long) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_LONG); + else if (sizeof(long long) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_LONGLONG); + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_integer " + "found."); + break; + } + case adios_long: { + if (sizeof(short) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_SHORT); + else if (sizeof(int) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), DT::VEC_INT); + else if (sizeof(long) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_LONG); + else if (sizeof(long long) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_LONGLONG); + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype adios_long " + "found."); + break; + } + case adios_unsigned_byte: { + dtype = DT::VEC_UCHAR; + auto uc = reinterpret_cast(data); + std::vector vuc; + vuc.resize(size); + for (int i = 0; i < size; ++i) + vuc[i] = uc[i]; + a = Attribute(vuc); + break; + } + case adios_unsigned_short: { + if (sizeof(unsigned short) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_USHORT); + else if (sizeof(unsigned int) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_UINT); + else if (sizeof(unsigned long) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_ULONG); + else if (sizeof(unsigned long long) == 2u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_ULONGLONG); + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_short found."); + break; + } + case adios_unsigned_integer: { + if (sizeof(unsigned short) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_USHORT); + else if (sizeof(unsigned int) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_UINT); + else if (sizeof(unsigned long) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_ULONG); + else if (sizeof(unsigned long long) == 4u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_ULONGLONG); + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_integer found."); + break; + } + case adios_unsigned_long: { + if (sizeof(unsigned short) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_USHORT); + else if (sizeof(unsigned int) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_UINT); + else if (sizeof(unsigned long) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_ULONG); + else if (sizeof(unsigned long long) == 8u) + std::tie(a, dtype) = std::make_tuple( + readVectorAttributeInternal(data, size), + DT::VEC_ULONGLONG); + else + throw unsupported_data_error( + "[ADIOS1] No native equivalent for Datatype " + "adios_unsigned_long found."); + break; + } + case adios_real: { + dtype = DT::VEC_FLOAT; + auto f4 = reinterpret_cast(data); + std::vector vf; + vf.resize(size); + for (int i = 0; i < size; ++i) + vf[i] = f4[i]; + a = Attribute(vf); + break; + } + case adios_double: { + dtype = DT::VEC_DOUBLE; + auto d8 = reinterpret_cast(data); + std::vector vd; + vd.resize(size); + for (int i = 0; i < size; ++i) + vd[i] = d8[i]; + a = Attribute(vd); + break; + } + case adios_long_double: { + dtype = DT::VEC_LONG_DOUBLE; + auto ld = reinterpret_cast(data); + std::vector vld; + vld.resize(size); + for (int i = 0; i < size; ++i) + vld[i] = ld[i]; + a = Attribute(vld); + break; + } + /* not supported by ADIOS 1.13.1: VEC_CFLOAT, VEC_CDOUBLE, + * VEC_CLONG_DOUBLE https://github.com/ornladios/ADIOS/issues/212 + */ + case adios_string: { + dtype = DT::STRING; + a = Attribute(auxiliary::strip( + std::string(reinterpret_cast(data), size), {'\0'})); + break; + } + case adios_string_array: { + dtype = DT::VEC_STRING; + auto c = reinterpret_cast(data); + std::vector vs; + vs.resize(size); + for (int i = 0; i < size; ++i) { - dtype = DT::VEC_STRING; - auto c = reinterpret_cast< char** >(data); - std::vector< std::string > vs; - vs.resize(size); - for( int i = 0; i < size; ++i ) - { - vs[i] = auxiliary::strip(std::string(c[i], std::strlen(c[i])), {'\0'}); - /** @todo pointer should be freed, but this causes memory corruption */ - //free(c[i]); - } - a = Attribute(vs); - break; + vs[i] = auxiliary::strip( + std::string(c[i], std::strlen(c[i])), {'\0'}); + /** @todo pointer should be freed, but this causes memory + * corruption */ + // free(c[i]); } + a = Attribute(vs); + break; + } - default: - throw unsupported_data_error( - "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype '" + - std::to_string(datatype) + "' in vector branch"); + default: + throw unsupported_data_error( + "[ADIOS1] readAttribute: Unsupported ADIOS1 attribute datatype " + "'" + + std::to_string(datatype) + "' in vector branch"); } } @@ -1605,31 +1724,33 @@ CommonADIOS1IOHandlerImpl< ChildClass >::readAttribute(Writable* writable, *parameters.resource = a.getResource(); } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::listPaths(Writable* writable, - Parameter< Operation::LIST_PATHS >& parameters) +template +void CommonADIOS1IOHandlerImpl::listPaths( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[ADIOS1] Internal error: Writable not marked written during path listing"); + if (!writable->written) + throw std::runtime_error( + "[ADIOS1] Internal error: Writable not marked written during path " + "listing"); - ADIOS_FILE* f; + ADIOS_FILE *f; f = m_openReadFileHandles.at(m_filePaths.at(writable)); std::string name = concrete_bp1_file_position(writable); - std::unordered_set< std::string > paths; - std::unordered_set< std::string > variables; - for( int i = 0; i < f->nvars; ++i ) + std::unordered_set paths; + std::unordered_set variables; + for (int i = 0; i < f->nvars; ++i) { - char* str = f->var_namelist[i]; + char *str = f->var_namelist[i]; std::string s(str, std::strlen(str)); - if( auxiliary::starts_with(s, name) ) + if (auxiliary::starts_with(s, name)) { /* remove the writable's path from the name */ s = auxiliary::replace_first(s, name, ""); variables.emplace(s); - if( std::any_of(s.begin(), s.end(), [](char c) { return c == '/'; }) ) + if (std::any_of( + s.begin(), s.end(), [](char c) { return c == '/'; })) { /* there are more path levels after the current writable */ s = s.substr(0, s.find_first_of('/')); @@ -1637,21 +1758,25 @@ CommonADIOS1IOHandlerImpl< ChildClass >::listPaths(Writable* writable, } } } - for( int i = 0; i < f->nattrs; ++i ) + for (int i = 0; i < f->nattrs; ++i) { - char* str = f->attr_namelist[i]; + char *str = f->attr_namelist[i]; std::string s(str, std::strlen(str)); - if( auxiliary::starts_with(s, name) ) + if (auxiliary::starts_with(s, name)) { /* remove the writable's path from the name */ s = auxiliary::replace_first(s, name, ""); - if( std::any_of(s.begin(), s.end(), [](char c) { return c == '/'; }) ) + if (std::any_of( + s.begin(), s.end(), [](char c) { return c == '/'; })) { /* remove the attribute name */ s = s.substr(0, s.find_last_of('/')); - if( !std::any_of(variables.begin(), - variables.end(), - [&s](std::string const& var){ return auxiliary::starts_with(var, s); })) + if (!std::any_of( + variables.begin(), + variables.end(), + [&s](std::string const &var) { + return auxiliary::starts_with(var, s); + })) { /* this is either a group or a constant scalar */ s = s.substr(0, s.find_first_of('/')); @@ -1661,32 +1786,34 @@ CommonADIOS1IOHandlerImpl< ChildClass >::listPaths(Writable* writable, } } - *parameters.paths = std::vector< std::string >(paths.begin(), paths.end()); + *parameters.paths = std::vector(paths.begin(), paths.end()); } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::listDatasets(Writable* writable, - Parameter< Operation::LIST_DATASETS >& parameters) +template +void CommonADIOS1IOHandlerImpl::listDatasets( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[ADIOS1] Internal error: Writable not marked written during dataset listing"); + if (!writable->written) + throw std::runtime_error( + "[ADIOS1] Internal error: Writable not marked written during " + "dataset listing"); - ADIOS_FILE* f; + ADIOS_FILE *f; f = m_openReadFileHandles.at(m_filePaths.at(writable)); std::string name = concrete_bp1_file_position(writable); - std::unordered_set< std::string > paths; - for( int i = 0; i < f->nvars; ++i ) + std::unordered_set paths; + for (int i = 0; i < f->nvars; ++i) { - char* str = f->var_namelist[i]; + char *str = f->var_namelist[i]; std::string s(str, std::strlen(str)); - if( auxiliary::starts_with(s, name) ) + if (auxiliary::starts_with(s, name)) { /* remove the writable's path from the name */ s = auxiliary::replace_first(s, name, ""); - if( std::none_of(s.begin(), s.end(), [](char c) { return c == '/'; }) ) + if (std::none_of( + s.begin(), s.end(), [](char c) { return c == '/'; })) { /* this is a dataset of the writable */ paths.emplace(s); @@ -1694,89 +1821,97 @@ CommonADIOS1IOHandlerImpl< ChildClass >::listDatasets(Writable* writable, } } - *parameters.datasets = std::vector< std::string >(paths.begin(), paths.end()); + *parameters.datasets = std::vector(paths.begin(), paths.end()); } -template< typename ChildClass > -void -CommonADIOS1IOHandlerImpl< ChildClass >::listAttributes(Writable* writable, - Parameter< Operation::LIST_ATTS >& parameters) +template +void CommonADIOS1IOHandlerImpl::listAttributes( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[ADIOS1] Internal error: Writable not marked written during attribute listing"); + if (!writable->written) + throw std::runtime_error( + "[ADIOS1] Internal error: Writable not marked written during " + "attribute listing"); - ADIOS_FILE* f; + ADIOS_FILE *f; f = m_openReadFileHandles.at(m_filePaths.at(writable)); std::string name = concrete_bp1_file_position(writable); - if( !auxiliary::ends_with(name, '/') ) + if (!auxiliary::ends_with(name, '/')) { /* writable is a dataset and corresponds to an ADIOS variable */ - ADIOS_VARINFO* info; - info = adios_inq_var(f, - name.c_str()); - VERIFY(adios_errno == err_no_error, "[ADIOS1] Internal error: Failed to inquire ADIOS variable during attribute listing"); - VERIFY(info != nullptr, "[ADIOS1] Internal error: Failed to inquire ADIOS variable during attribute listing"); + ADIOS_VARINFO *info; + info = adios_inq_var(f, name.c_str()); + VERIFY( + adios_errno == err_no_error, + "[ADIOS1] Internal error: Failed to inquire ADIOS variable during " + "attribute listing"); + VERIFY( + info != nullptr, + "[ADIOS1] Internal error: Failed to inquire ADIOS variable during " + "attribute listing"); name += '/'; parameters.attributes->reserve(info->nattrs); - for( int i = 0; i < info->nattrs; ++i ) + for (int i = 0; i < info->nattrs; ++i) { - char* c = f->attr_namelist[info->attr_ids[i]]; - parameters.attributes->push_back(auxiliary::replace_first(std::string(c, std::strlen(c)), name, "")); + char *c = f->attr_namelist[info->attr_ids[i]]; + parameters.attributes->push_back(auxiliary::replace_first( + std::string(c, std::strlen(c)), name, "")); } adios_free_varinfo(info); - } else + } + else { /* there is no ADIOS variable associated with the writable */ - std::unordered_set< std::string > attributes; - for( int i = 0; i < f->nattrs; ++i ) + std::unordered_set attributes; + for (int i = 0; i < f->nattrs; ++i) { - char* str = f->attr_namelist[i]; + char *str = f->attr_namelist[i]; std::string s(str, std::strlen(str)); - if( auxiliary::starts_with(s, name) ) + if (auxiliary::starts_with(s, name)) { /* remove the writable's path from the name */ s = auxiliary::replace_first(s, name, ""); - if( std::none_of(s.begin(), s.end(), [](char c) { return c == '/'; }) ) + if (std::none_of( + s.begin(), s.end(), [](char c) { return c == '/'; })) { /* this is an attribute of the writable */ attributes.insert(s); } } } - *parameters.attributes = std::vector< std::string >(attributes.begin(), attributes.end()); + *parameters.attributes = + std::vector(attributes.begin(), attributes.end()); } } -template< typename ChildClass > -void CommonADIOS1IOHandlerImpl< ChildClass >::initJson( - json::TracingJSON config ) +template +void CommonADIOS1IOHandlerImpl::initJson(json::TracingJSON config) { - if( !config.json().contains( "adios1" ) ) + if (!config.json().contains("adios1")) { return; } - auto maybeTransform = datasetTransform( config[ "adios1" ] ); - if( maybeTransform.has_value() ) + auto maybeTransform = datasetTransform(config["adios1"]); + if (maybeTransform.has_value()) { - m_defaultTransform = std::move( maybeTransform.value() ); + m_defaultTransform = std::move(maybeTransform.value()); } auto shadow = config.invertShadow(); - if( shadow.size() > 0 ) + if (shadow.size() > 0) { - switch( config.originallySpecifiedAs ) + switch (config.originallySpecifiedAs) { case json::SupportedLanguages::JSON: std::cerr << "Warning: parts of the JSON configuration for ADIOS1 " "remain unused:\n" << shadow << std::endl; break; - case json::SupportedLanguages::TOML: - { - auto asToml = json::jsonToToml( shadow ); + case json::SupportedLanguages::TOML: { + auto asToml = json::jsonToToml(shadow); std::cerr << "Warning: parts of the JSON configuration for ADIOS1 " "remain unused:\n" << asToml << std::endl; @@ -1786,9 +1921,9 @@ void CommonADIOS1IOHandlerImpl< ChildClass >::initJson( } } -template class CommonADIOS1IOHandlerImpl< ADIOS1IOHandlerImpl >; +template class CommonADIOS1IOHandlerImpl; #if openPMD_HAVE_MPI -template class CommonADIOS1IOHandlerImpl< ParallelADIOS1IOHandlerImpl >; +template class CommonADIOS1IOHandlerImpl; #endif // openPMD_HAVE_MPI } // namespace openPMD diff --git a/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp b/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp index fdaec9957c..a4388782ac 100644 --- a/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp +++ b/src/IO/ADIOS/ParallelADIOS1IOHandler.cpp @@ -22,46 +22,54 @@ #include "openPMD/IO/ADIOS/ParallelADIOS1IOHandlerImpl.hpp" #if openPMD_HAVE_MPI && openPMD_HAVE_ADIOS1 -# include "openPMD/IO/IOTask.hpp" -# include -# include -# include -# include -# include -# include +#include "openPMD/IO/IOTask.hpp" +#include +#include +#include +#include +#include +#include #endif namespace openPMD { #if openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI -# if openPMD_USE_VERIFY -# define VERIFY(CONDITION, TEXT) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } -# else -# define VERIFY(CONDITION, TEXT) do{ (void)sizeof(CONDITION); } while( 0 ) -# endif - -ParallelADIOS1IOHandlerImpl::ParallelADIOS1IOHandlerImpl(AbstractIOHandler* handler, - json::TracingJSON json, - MPI_Comm comm) - : Base_t{handler}, - m_mpiInfo{MPI_INFO_NULL} +#if openPMD_USE_VERIFY +#define VERIFY(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } +#else +#define VERIFY(CONDITION, TEXT) \ + do \ + { \ + (void)sizeof(CONDITION); \ + } while (0) +#endif + +ParallelADIOS1IOHandlerImpl::ParallelADIOS1IOHandlerImpl( + AbstractIOHandler *handler, json::TracingJSON json, MPI_Comm comm) + : Base_t{handler}, m_mpiInfo{MPI_INFO_NULL} { int status = MPI_SUCCESS; status = MPI_Comm_dup(comm, &m_mpiComm); - VERIFY(status == MPI_SUCCESS, "[ADIOS1] Internal error: Failed to duplicate MPI communicator"); - initJson( std::move( json ) ); + VERIFY( + status == MPI_SUCCESS, + "[ADIOS1] Internal error: Failed to duplicate MPI communicator"); + initJson(std::move(json)); } ParallelADIOS1IOHandlerImpl::~ParallelADIOS1IOHandlerImpl() { - for( auto& f : m_openReadFileHandles ) + for (auto &f : m_openReadFileHandles) close(f.second); m_openReadFileHandles.clear(); - if( this->m_handler->m_backendAccess != Access::READ_ONLY ) + if (this->m_handler->m_backendAccess != Access::READ_ONLY) { - for( auto& group : m_attributeWrites ) - for( auto& att : group.second ) + for (auto &group : m_attributeWrites) + for (auto &att : group.second) flush_attribute(group.first, att.first, att.second); // unordered map caused the value of the same container @@ -69,11 +77,11 @@ ParallelADIOS1IOHandlerImpl::~ParallelADIOS1IOHandlerImpl() // which caused trouble with close(), which is collective // so I just sort by file name to force all processors close // all the fids in the same order - std::map< std::string, int64_t > allFiles; - for( auto& f : m_openWriteFileHandles ) + std::map allFiles; + for (auto &f : m_openWriteFileHandles) allFiles[*(f.first)] = f.second; - for( auto const& p : allFiles ) + for (auto const &p : allFiles) { auto const fid = p.second; close(fid); @@ -85,139 +93,213 @@ ParallelADIOS1IOHandlerImpl::~ParallelADIOS1IOHandlerImpl() int status; MPI_Barrier(m_mpiComm); status = adios_read_finalize_method(m_readMethod); - if( status != err_no_error ) - std::cerr << "Internal error: Failed to finalize ADIOS reading method (parallel)\n"; + if (status != err_no_error) + std::cerr << "Internal error: Failed to finalize ADIOS reading method " + "(parallel)\n"; MPI_Barrier(m_mpiComm); int rank = 0; MPI_Comm_rank(m_mpiComm, &rank); status = adios_finalize(rank); - if( status != err_no_error ) + if (status != err_no_error) std::cerr << "Internal error: Failed to finalize ADIOS (parallel)\n"; MPI_Comm_free(&m_mpiComm); } -std::future< void > -ParallelADIOS1IOHandlerImpl::flush() +std::future ParallelADIOS1IOHandlerImpl::flush() { using namespace auxiliary; - auto handler = dynamic_cast< ParallelADIOS1IOHandler* >(m_handler); - while( !handler->m_setup.empty() ) + auto handler = dynamic_cast(m_handler); + while (!handler->m_setup.empty()) { - IOTask& i = handler->m_setup.front(); + IOTask &i = handler->m_setup.front(); try { - switch( i.operation ) + switch (i.operation) { using O = Operation; - case O::CREATE_FILE: - createFile(i.writable, deref_dynamic_cast< Parameter< Operation::CREATE_FILE > >(i.parameter.get())); - break; - case O::CREATE_PATH: - createPath(i.writable, deref_dynamic_cast< Parameter< O::CREATE_PATH > >(i.parameter.get())); - break; - case O::OPEN_PATH: - openPath(i.writable, deref_dynamic_cast< Parameter< O::OPEN_PATH > >(i.parameter.get())); - break; - case O::CREATE_DATASET: - createDataset(i.writable, deref_dynamic_cast< Parameter< O::CREATE_DATASET > >(i.parameter.get())); - break; - case O::WRITE_ATT: - writeAttribute(i.writable, deref_dynamic_cast< Parameter< O::WRITE_ATT > >(i.parameter.get())); - break; - case O::OPEN_FILE: - openFile(i.writable, deref_dynamic_cast< Parameter< O::OPEN_FILE > >(i.parameter.get())); - break; - default: - VERIFY(false, "[ADIOS1] Internal error: Wrong operation in ADIOS setup queue"); + case O::CREATE_FILE: + createFile( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::CREATE_PATH: + createPath( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::OPEN_PATH: + openPath( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::CREATE_DATASET: + createDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::WRITE_ATT: + writeAttribute( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::OPEN_FILE: + openFile( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + default: + VERIFY( + false, + "[ADIOS1] Internal error: Wrong operation in ADIOS setup " + "queue"); } - } catch (...) + } + catch (...) { - std::cerr - << "[AbstractIOHandlerImpl] IO Task " - << internal::operationAsString( i.operation ) - << " failed with exception. Removing task" - << " from IO queue and passing on the exception." - << std::endl; + std::cerr << "[AbstractIOHandlerImpl] IO Task " + << internal::operationAsString(i.operation) + << " failed with exception. Removing task" + << " from IO queue and passing on the exception." + << std::endl; handler->m_setup.pop(); throw; } handler->m_setup.pop(); } - - while( !handler->m_work.empty() ) + while (!handler->m_work.empty()) { - IOTask& i = handler->m_work.front(); + IOTask &i = handler->m_work.front(); try { - switch( i.operation ) + switch (i.operation) { using O = Operation; - case O::EXTEND_DATASET: - extendDataset(i.writable, deref_dynamic_cast< Parameter< O::EXTEND_DATASET > >(i.parameter.get())); - break; - case O::CLOSE_PATH: - closePath(i.writable, deref_dynamic_cast< Parameter< O::CLOSE_PATH > >(i.parameter.get())); - break; - case O::OPEN_DATASET: - openDataset(i.writable, deref_dynamic_cast< Parameter< O::OPEN_DATASET > >(i.parameter.get())); - break; - case O::CLOSE_FILE: - closeFile(i.writable, *dynamic_cast< Parameter< O::CLOSE_FILE >* >(i.parameter.get())); - break; - case O::DELETE_FILE: - deleteFile(i.writable, deref_dynamic_cast< Parameter< O::DELETE_FILE > >(i.parameter.get())); - break; - case O::DELETE_PATH: - deletePath(i.writable, deref_dynamic_cast< Parameter< O::DELETE_PATH > >(i.parameter.get())); - break; - case O::DELETE_DATASET: - deleteDataset(i.writable, deref_dynamic_cast< Parameter< O::DELETE_DATASET > >(i.parameter.get())); - break; - case O::DELETE_ATT: - deleteAttribute(i.writable, deref_dynamic_cast< Parameter< O::DELETE_ATT > >(i.parameter.get())); - break; - case O::WRITE_DATASET: - writeDataset(i.writable, deref_dynamic_cast< Parameter< O::WRITE_DATASET > >(i.parameter.get())); - break; - case O::READ_DATASET: - readDataset(i.writable, deref_dynamic_cast< Parameter< O::READ_DATASET > >(i.parameter.get())); - break; - case O::GET_BUFFER_VIEW: - getBufferView(i.writable, deref_dynamic_cast< Parameter< O::GET_BUFFER_VIEW > >(i.parameter.get())); - break; - case O::READ_ATT: - readAttribute(i.writable, deref_dynamic_cast< Parameter< O::READ_ATT > >(i.parameter.get())); - break; - case O::LIST_PATHS: - listPaths(i.writable, deref_dynamic_cast< Parameter< O::LIST_PATHS > >(i.parameter.get())); - break; - case O::LIST_DATASETS: - listDatasets(i.writable, deref_dynamic_cast< Parameter< O::LIST_DATASETS > >(i.parameter.get())); - break; - case O::LIST_ATTS: - listAttributes(i.writable, deref_dynamic_cast< Parameter< O::LIST_ATTS > >(i.parameter.get())); - break; - case O::ADVANCE: - advance(i.writable, deref_dynamic_cast< Parameter< O::ADVANCE > >(i.parameter.get())); - break; - case O::AVAILABLE_CHUNKS: - availableChunks(i.writable, deref_dynamic_cast< Parameter< O::AVAILABLE_CHUNKS > >(i.parameter.get())); - break; - default: - VERIFY(false, "[ADIOS1] Internal error: Wrong operation in ADIOS work queue"); + case O::EXTEND_DATASET: + extendDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::CLOSE_PATH: + closePath( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::OPEN_DATASET: + openDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::CLOSE_FILE: + closeFile( + i.writable, + *dynamic_cast *>( + i.parameter.get())); + break; + case O::DELETE_FILE: + deleteFile( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::DELETE_PATH: + deletePath( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::DELETE_DATASET: + deleteDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::DELETE_ATT: + deleteAttribute( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::WRITE_DATASET: + writeDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::READ_DATASET: + readDataset( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::GET_BUFFER_VIEW: + getBufferView( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::READ_ATT: + readAttribute( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::LIST_PATHS: + listPaths( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::LIST_DATASETS: + listDatasets( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::LIST_ATTS: + listAttributes( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::ADVANCE: + advance( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + case O::AVAILABLE_CHUNKS: + availableChunks( + i.writable, + deref_dynamic_cast>( + i.parameter.get())); + break; + default: + VERIFY( + false, + "[ADIOS1] Internal error: Wrong operation in ADIOS work " + "queue"); } - } catch (...) + } + catch (...) { - std::cerr - << "[AbstractIOHandlerImpl] IO Task " - << internal::operationAsString( i.operation ) - << " failed with exception. Removing task" - << " from IO queue and passing on the exception." - << std::endl; + std::cerr << "[AbstractIOHandlerImpl] IO Task " + << internal::operationAsString(i.operation) + << " failed with exception. Removing task" + << " from IO queue and passing on the exception." + << std::endl; m_handler->m_work.pop(); throw; } @@ -225,83 +307,84 @@ ParallelADIOS1IOHandlerImpl::flush() } int status; - for( auto& file : m_scheduledReads ) + for (auto &file : m_scheduledReads) { - status = adios_perform_reads(file.first, - 1); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to perform ADIOS reads during dataset reading"); + status = adios_perform_reads(file.first, 1); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to perform ADIOS reads during " + "dataset reading"); - for( auto& sel : file.second ) + for (auto &sel : file.second) adios_selection_delete(sel); } m_scheduledReads.clear(); - return std::future< void >(); + return std::future(); } -void -ParallelADIOS1IOHandlerImpl::init() +void ParallelADIOS1IOHandlerImpl::init() { int status; status = adios_init_noxml(m_mpiComm); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to initialize ADIOS"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to initialize ADIOS"); /** @todo ADIOS_READ_METHOD_BP_AGGREGATE */ m_readMethod = ADIOS_READ_METHOD_BP; status = adios_read_init_method(m_readMethod, m_mpiComm, ""); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to initialize ADIOS reading method"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to initialize ADIOS reading method"); } -ParallelADIOS1IOHandler::ParallelADIOS1IOHandler(std::string path, - Access at, - json::TracingJSON json, - MPI_Comm comm) - : AbstractIOHandler(std::move(path), at, comm), - m_impl{new ParallelADIOS1IOHandlerImpl(this, std::move(json), comm)} +ParallelADIOS1IOHandler::ParallelADIOS1IOHandler( + std::string path, Access at, json::TracingJSON json, MPI_Comm comm) + : AbstractIOHandler(std::move(path), at, comm) + , m_impl{new ParallelADIOS1IOHandlerImpl(this, std::move(json), comm)} { m_impl->init(); } ParallelADIOS1IOHandler::~ParallelADIOS1IOHandler() = default; -std::future< void > -ParallelADIOS1IOHandler::flush() +std::future ParallelADIOS1IOHandler::flush() { return m_impl->flush(); } -void -ParallelADIOS1IOHandler::enqueue(IOTask const& i) +void ParallelADIOS1IOHandler::enqueue(IOTask const &i) { - switch( i.operation ) + switch (i.operation) { - case Operation::CREATE_FILE: - case Operation::CREATE_PATH: - case Operation::OPEN_PATH: - case Operation::CREATE_DATASET: - case Operation::OPEN_FILE: - case Operation::WRITE_ATT: - m_setup.push(i); - return; - default: - m_work.push(i); - return; + case Operation::CREATE_FILE: + case Operation::CREATE_PATH: + case Operation::OPEN_PATH: + case Operation::CREATE_DATASET: + case Operation::OPEN_FILE: + case Operation::WRITE_ATT: + m_setup.push(i); + return; + default: + m_work.push(i); + return; } } -int64_t -ParallelADIOS1IOHandlerImpl::open_write(Writable* writable) +int64_t ParallelADIOS1IOHandlerImpl::open_write(Writable *writable) { auto res = m_filePaths.find(writable); - if( res == m_filePaths.end() ) + if (res == m_filePaths.end()) res = m_filePaths.find(writable->parent); std::string mode; - if( m_existsOnDisk[res->second] ) + if (m_existsOnDisk[res->second]) { mode = "u"; /* close the handle that corresponds to the file we want to append to */ - if( m_openReadFileHandles.find(res->second) != m_openReadFileHandles.end() ) + if (m_openReadFileHandles.find(res->second) != + m_openReadFileHandles.end()) { close(m_openReadFileHandles[res->second]); m_openReadFileHandles.erase(res->second); @@ -315,36 +398,41 @@ ParallelADIOS1IOHandlerImpl::open_write(Writable* writable) int64_t fd; int status; - status = adios_open(&fd, - res->second->c_str(), - res->second->c_str(), - mode.c_str(), - m_mpiComm); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to open_write ADIOS file"); + status = adios_open( + &fd, + res->second->c_str(), + res->second->c_str(), + mode.c_str(), + m_mpiComm); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to open_write ADIOS file"); return fd; } -ADIOS_FILE* -ParallelADIOS1IOHandlerImpl::open_read(std::string const & name) +ADIOS_FILE *ParallelADIOS1IOHandlerImpl::open_read(std::string const &name) { ADIOS_FILE *f; - f = adios_read_open_file(name.c_str(), - m_readMethod, - m_mpiComm); - VERIFY(adios_errno != err_file_not_found, "[ADIOS1] Internal error: ADIOS file not found"); - VERIFY(f != nullptr, "[ADIOS1] Internal error: Failed to open_read ADIOS file"); + f = adios_read_open_file(name.c_str(), m_readMethod, m_mpiComm); + VERIFY( + adios_errno != err_file_not_found, + "[ADIOS1] Internal error: ADIOS file not found"); + VERIFY( + f != nullptr, + "[ADIOS1] Internal error: Failed to open_read ADIOS file"); return f; } -int64_t -ParallelADIOS1IOHandlerImpl::initialize_group(std::string const &name) +int64_t ParallelADIOS1IOHandlerImpl::initialize_group(std::string const &name) { std::stringstream params; - params << "num_aggregators=" << getEnvNum("OPENPMD_ADIOS_NUM_AGGREGATORS", "1") + params << "num_aggregators=" + << getEnvNum("OPENPMD_ADIOS_NUM_AGGREGATORS", "1") << ";num_ost=" << getEnvNum("OPENPMD_ADIOS_NUM_OST", "0") - << ";have_metadata_file=" << getEnvNum("OPENPMD_ADIOS_HAVE_METADATA_FILE", "1") + << ";have_metadata_file=" + << getEnvNum("OPENPMD_ADIOS_HAVE_METADATA_FILE", "1") << ";verbose=2"; std::string params_str = params.str(); // important: copy out of temporary! @@ -352,45 +440,45 @@ ParallelADIOS1IOHandlerImpl::initialize_group(std::string const &name) int64_t group; ADIOS_STATISTICS_FLAG noStatistics = adios_stat_no; status = adios_declare_group(&group, name.c_str(), "", noStatistics); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to declare ADIOS group"); - status = adios_select_method(group, "MPI_AGGREGATE", params_str.c_str(), ""); - VERIFY(status == err_no_error, "[ADIOS1] Internal error: Failed to select ADIOS method"); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to declare ADIOS group"); + status = + adios_select_method(group, "MPI_AGGREGATE", params_str.c_str(), ""); + VERIFY( + status == err_no_error, + "[ADIOS1] Internal error: Failed to select ADIOS method"); return group; } #else // openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI -# if openPMD_HAVE_MPI -ParallelADIOS1IOHandler::ParallelADIOS1IOHandler(std::string path, - Access at, - json::TracingJSON, - MPI_Comm comm) - : AbstractIOHandler(std::move(path), at, comm) +#if openPMD_HAVE_MPI +ParallelADIOS1IOHandler::ParallelADIOS1IOHandler( + std::string path, Access at, json::TracingJSON, MPI_Comm comm) + : AbstractIOHandler(std::move(path), at, comm) { throw std::runtime_error("openPMD-api built without ADIOS1 support"); } -# else -ParallelADIOS1IOHandler::ParallelADIOS1IOHandler(std::string path, - Access at, - json::TracingJSON) - : AbstractIOHandler(std::move(path), at) +#else +ParallelADIOS1IOHandler::ParallelADIOS1IOHandler( + std::string path, Access at, json::TracingJSON) + : AbstractIOHandler(std::move(path), at) { - throw std::runtime_error("openPMD-api built without parallel ADIOS1 support"); + throw std::runtime_error( + "openPMD-api built without parallel ADIOS1 support"); } -# endif // openPMD_HAVE_MPI +#endif // openPMD_HAVE_MPI ParallelADIOS1IOHandler::~ParallelADIOS1IOHandler() = default; -std::future< void > -ParallelADIOS1IOHandler::flush() +std::future ParallelADIOS1IOHandler::flush() { - return std::future< void >(); + return std::future(); } #if openPMD_HAVE_ADIOS1 -void -ParallelADIOS1IOHandler::enqueue(IOTask const&) -{ -} +void ParallelADIOS1IOHandler::enqueue(IOTask const &) +{} #endif // openPMD_HAVE_ADIOS1 #endif // openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI } // namespace openPMD diff --git a/src/IO/AbstractIOHandlerHelper.cpp b/src/IO/AbstractIOHandlerHelper.cpp index f37b094dbe..c05eec7c19 100644 --- a/src/IO/AbstractIOHandlerHelper.cpp +++ b/src/IO/AbstractIOHandlerHelper.cpp @@ -32,91 +32,86 @@ namespace openPMD { #if openPMD_HAVE_MPI - template<> - std::shared_ptr< AbstractIOHandler > - createIOHandler< json::TracingJSON >( - std::string path, - Access access, - Format format, - MPI_Comm comm, - json::TracingJSON options ) +template <> +std::shared_ptr createIOHandler( + std::string path, + Access access, + Format format, + MPI_Comm comm, + json::TracingJSON options) +{ + (void)options; + switch (format) { - (void) options; - switch( format ) - { - case Format::HDF5: - return std::make_shared< ParallelHDF5IOHandler >( - path, access, comm, std::move( options ) ); - case Format::ADIOS1: -# if openPMD_HAVE_ADIOS1 - return std::make_shared< ParallelADIOS1IOHandler >( - path, access, std::move( options ), comm ); -# else - throw std::runtime_error("openPMD-api built without ADIOS1 support"); -# endif - case Format::ADIOS2: - return std::make_shared< ADIOS2IOHandler >( - path, access, comm, std::move( options ), "bp4" ); - case Format::ADIOS2_SST: - return std::make_shared< ADIOS2IOHandler >( - path, access, comm, std::move( options ), "sst" ); - case Format::ADIOS2_SSC: - return std::make_shared< ADIOS2IOHandler >( - path, access, comm, std::move( options ), "ssc" ); - default: - throw std::runtime_error( - "Unknown file format! Did you specify a file ending?" ); - } + case Format::HDF5: + return std::make_shared( + path, access, comm, std::move(options)); + case Format::ADIOS1: +#if openPMD_HAVE_ADIOS1 + return std::make_shared( + path, access, std::move(options), comm); +#else + throw std::runtime_error("openPMD-api built without ADIOS1 support"); +#endif + case Format::ADIOS2: + return std::make_shared( + path, access, comm, std::move(options), "bp4"); + case Format::ADIOS2_SST: + return std::make_shared( + path, access, comm, std::move(options), "sst"); + case Format::ADIOS2_SSC: + return std::make_shared( + path, access, comm, std::move(options), "ssc"); + default: + throw std::runtime_error( + "Unknown file format! Did you specify a file ending?"); } +} #endif - template<> - std::shared_ptr< AbstractIOHandler > - createIOHandler< json::TracingJSON >( - std::string path, - Access access, - Format format, - json::TracingJSON options ) +template <> +std::shared_ptr createIOHandler( + std::string path, Access access, Format format, json::TracingJSON options) +{ + (void)options; + switch (format) { - (void) options; - switch( format ) - { - case Format::HDF5: - return std::make_shared< HDF5IOHandler >( - path, access, std::move( options ) ); - case Format::ADIOS1: + case Format::HDF5: + return std::make_shared( + path, access, std::move(options)); + case Format::ADIOS1: #if openPMD_HAVE_ADIOS1 - return std::make_shared< ADIOS1IOHandler >( - path, access, std::move( options ) ); + return std::make_shared( + path, access, std::move(options)); #else - throw std::runtime_error("openPMD-api built without ADIOS1 support"); + throw std::runtime_error("openPMD-api built without ADIOS1 support"); #endif #if openPMD_HAVE_ADIOS2 - case Format::ADIOS2: - return std::make_shared< ADIOS2IOHandler >( - path, access, std::move( options ), "bp4" ); - case Format::ADIOS2_SST: - return std::make_shared< ADIOS2IOHandler >( - path, access, std::move( options ), "sst" ); - case Format::ADIOS2_SSC: - return std::make_shared< ADIOS2IOHandler >( - path, access, std::move( options ), "ssc" ); + case Format::ADIOS2: + return std::make_shared( + path, access, std::move(options), "bp4"); + case Format::ADIOS2_SST: + return std::make_shared( + path, access, std::move(options), "sst"); + case Format::ADIOS2_SSC: + return std::make_shared( + path, access, std::move(options), "ssc"); #endif // openPMD_HAVE_ADIOS2 - case Format::JSON: - return std::make_shared< JSONIOHandler >( path, access ); - default: - throw std::runtime_error( - "Unknown file format! Did you specify a file ending?" ); - } + case Format::JSON: + return std::make_shared(path, access); + default: + throw std::runtime_error( + "Unknown file format! Did you specify a file ending?"); } +} - std::shared_ptr< AbstractIOHandler > - createIOHandler( std::string path, Access access, Format format ) - { - return createIOHandler( - std::move( path ), - access, - format, - json::TracingJSON( json::ParsedConfig{} )); - } +std::shared_ptr +createIOHandler(std::string path, Access access, Format format) +{ + return createIOHandler( + std::move(path), + access, + format, + json::TracingJSON(json::ParsedConfig{})); +} } // namespace openPMD diff --git a/src/IO/DummyIOHandler.cpp b/src/IO/DummyIOHandler.cpp index 3a7fd9559f..f10cd50ac9 100644 --- a/src/IO/DummyIOHandler.cpp +++ b/src/IO/DummyIOHandler.cpp @@ -23,19 +23,17 @@ #include #include - namespace openPMD { - DummyIOHandler::DummyIOHandler(std::string path, Access at) - : AbstractIOHandler(std::move(path), at) - { } +DummyIOHandler::DummyIOHandler(std::string path, Access at) + : AbstractIOHandler(std::move(path), at) +{} - void DummyIOHandler::enqueue(IOTask const&) - { } +void DummyIOHandler::enqueue(IOTask const &) +{} - std::future< void > - DummyIOHandler::flush() - { - return std::future< void >(); - } -} // openPMD +std::future DummyIOHandler::flush() +{ + return std::future(); +} +} // namespace openPMD diff --git a/src/IO/HDF5/HDF5Auxiliary.cpp b/src/IO/HDF5/HDF5Auxiliary.cpp index f206dcda3d..3c7bab8103 100644 --- a/src/IO/HDF5/HDF5Auxiliary.cpp +++ b/src/IO/HDF5/HDF5Auxiliary.cpp @@ -20,303 +20,296 @@ */ #include "openPMD/config.hpp" #if openPMD_HAVE_HDF5 -# include "openPMD/IO/HDF5/HDF5Auxiliary.hpp" -# include "openPMD/auxiliary/StringManip.hpp" -# include "openPMD/backend/Attribute.hpp" -# include "openPMD/backend/Writable.hpp" -# include "openPMD/IO/HDF5/HDF5FilePosition.hpp" +#include "openPMD/IO/HDF5/HDF5Auxiliary.hpp" +#include "openPMD/IO/HDF5/HDF5FilePosition.hpp" +#include "openPMD/auxiliary/StringManip.hpp" +#include "openPMD/backend/Attribute.hpp" +#include "openPMD/backend/Writable.hpp" -# include +#include -# include -# include -# include -# include -# include -# include -# include -# include - -# if openPMD_USE_VERIFY -# define VERIFY(CONDITION, TEXT) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } -# else -# define VERIFY(CONDITION, TEXT) do{ (void)sizeof(CONDITION); } while( 0 ) -# endif +#include +#include +#include +#include +#include +#include +#include +#include +#if openPMD_USE_VERIFY +#define VERIFY(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } +#else +#define VERIFY(CONDITION, TEXT) \ + do \ + { \ + (void)sizeof(CONDITION); \ + } while (0) +#endif -hid_t -openPMD::GetH5DataType::operator()(Attribute const &att) +hid_t openPMD::GetH5DataType::operator()(Attribute const &att) { using DT = Datatype; - switch (att.dtype) { - case DT::CHAR: - case DT::VEC_CHAR: - return H5Tcopy(H5T_NATIVE_CHAR); - case DT::UCHAR: - case DT::VEC_UCHAR: - return H5Tcopy(H5T_NATIVE_UCHAR); - case DT::SHORT: - case DT::VEC_SHORT: - return H5Tcopy(H5T_NATIVE_SHORT); - case DT::INT: - case DT::VEC_INT: - return H5Tcopy(H5T_NATIVE_INT); - case DT::LONG: - case DT::VEC_LONG: - return H5Tcopy(H5T_NATIVE_LONG); - case DT::LONGLONG: - case DT::VEC_LONGLONG: - return H5Tcopy(H5T_NATIVE_LLONG); - case DT::USHORT: - case DT::VEC_USHORT: - return H5Tcopy(H5T_NATIVE_USHORT); - case DT::UINT: - case DT::VEC_UINT: - return H5Tcopy(H5T_NATIVE_UINT); - case DT::ULONG: - case DT::VEC_ULONG: - return H5Tcopy(H5T_NATIVE_ULONG); - case DT::ULONGLONG: - case DT::VEC_ULONGLONG: - return H5Tcopy(H5T_NATIVE_ULLONG); - case DT::FLOAT: - case DT::VEC_FLOAT: - return H5Tcopy(H5T_NATIVE_FLOAT); - case DT::DOUBLE: - case DT::ARR_DBL_7: - case DT::VEC_DOUBLE: - return H5Tcopy(H5T_NATIVE_DOUBLE); - case DT::LONG_DOUBLE: - case DT::VEC_LONG_DOUBLE: - return H5Tcopy(H5T_NATIVE_LDOUBLE); - case DT::CFLOAT: - case DT::VEC_CFLOAT: - return H5Tcopy( m_userTypes.at( typeid(std::complex< float >).name() ) ); - case DT::CDOUBLE: - case DT::VEC_CDOUBLE: - return H5Tcopy( m_userTypes.at( typeid(std::complex< double >).name() ) ); - case DT::CLONG_DOUBLE: - case DT::VEC_CLONG_DOUBLE: - return H5Tcopy( m_userTypes.at( typeid(std::complex< long double >).name() ) ); - case DT::STRING: { - hid_t string_t_id = H5Tcopy(H5T_C_S1); - size_t const max_len = att.get().size(); - VERIFY(max_len > 0, "[HDF5] max_len must be >0 for STRING"); - herr_t status = H5Tset_size(string_t_id, max_len); - VERIFY(status >= 0, "[HDF5] Internal error: Failed in H5Tset_size for STRING"); - return string_t_id; - } - case DT::VEC_STRING: { - hid_t string_t_id = H5Tcopy(H5T_C_S1); - size_t max_len = 0; - for (std::string const &s : att.get >()) - max_len = std::max(max_len, s.size()); - VERIFY(max_len > 0, "[HDF5] max_len must be >0 for VEC_STRING"); - herr_t status = H5Tset_size(string_t_id, max_len); - VERIFY(status >= 0, "[HDF5] Internal error: Failed in H5Tset_size for VEC_STRING"); - return string_t_id; - } - case DT::BOOL: - return H5Tcopy( m_userTypes.at( typeid(bool).name() ) ); - case DT::UNDEFINED: - throw std::runtime_error("[HDF5] Unknown Attribute datatype (HDF5 datatype)"); - default: - throw std::runtime_error("[HDF5] Datatype not implemented"); + switch (att.dtype) + { + case DT::CHAR: + case DT::VEC_CHAR: + return H5Tcopy(H5T_NATIVE_CHAR); + case DT::UCHAR: + case DT::VEC_UCHAR: + return H5Tcopy(H5T_NATIVE_UCHAR); + case DT::SHORT: + case DT::VEC_SHORT: + return H5Tcopy(H5T_NATIVE_SHORT); + case DT::INT: + case DT::VEC_INT: + return H5Tcopy(H5T_NATIVE_INT); + case DT::LONG: + case DT::VEC_LONG: + return H5Tcopy(H5T_NATIVE_LONG); + case DT::LONGLONG: + case DT::VEC_LONGLONG: + return H5Tcopy(H5T_NATIVE_LLONG); + case DT::USHORT: + case DT::VEC_USHORT: + return H5Tcopy(H5T_NATIVE_USHORT); + case DT::UINT: + case DT::VEC_UINT: + return H5Tcopy(H5T_NATIVE_UINT); + case DT::ULONG: + case DT::VEC_ULONG: + return H5Tcopy(H5T_NATIVE_ULONG); + case DT::ULONGLONG: + case DT::VEC_ULONGLONG: + return H5Tcopy(H5T_NATIVE_ULLONG); + case DT::FLOAT: + case DT::VEC_FLOAT: + return H5Tcopy(H5T_NATIVE_FLOAT); + case DT::DOUBLE: + case DT::ARR_DBL_7: + case DT::VEC_DOUBLE: + return H5Tcopy(H5T_NATIVE_DOUBLE); + case DT::LONG_DOUBLE: + case DT::VEC_LONG_DOUBLE: + return H5Tcopy(H5T_NATIVE_LDOUBLE); + case DT::CFLOAT: + case DT::VEC_CFLOAT: + return H5Tcopy(m_userTypes.at(typeid(std::complex).name())); + case DT::CDOUBLE: + case DT::VEC_CDOUBLE: + return H5Tcopy(m_userTypes.at(typeid(std::complex).name())); + case DT::CLONG_DOUBLE: + case DT::VEC_CLONG_DOUBLE: + return H5Tcopy( + m_userTypes.at(typeid(std::complex).name())); + case DT::STRING: { + hid_t string_t_id = H5Tcopy(H5T_C_S1); + size_t const max_len = att.get().size(); + VERIFY(max_len > 0, "[HDF5] max_len must be >0 for STRING"); + herr_t status = H5Tset_size(string_t_id, max_len); + VERIFY( + status >= 0, + "[HDF5] Internal error: Failed in H5Tset_size for STRING"); + return string_t_id; + } + case DT::VEC_STRING: { + hid_t string_t_id = H5Tcopy(H5T_C_S1); + size_t max_len = 0; + for (std::string const &s : att.get>()) + max_len = std::max(max_len, s.size()); + VERIFY(max_len > 0, "[HDF5] max_len must be >0 for VEC_STRING"); + herr_t status = H5Tset_size(string_t_id, max_len); + VERIFY( + status >= 0, + "[HDF5] Internal error: Failed in H5Tset_size for VEC_STRING"); + return string_t_id; + } + case DT::BOOL: + return H5Tcopy(m_userTypes.at(typeid(bool).name())); + case DT::UNDEFINED: + throw std::runtime_error( + "[HDF5] Unknown Attribute datatype (HDF5 datatype)"); + default: + throw std::runtime_error("[HDF5] Datatype not implemented"); } } - -hid_t -openPMD::getH5DataSpace(Attribute const& att) +hid_t openPMD::getH5DataSpace(Attribute const &att) { using DT = Datatype; - switch( att.dtype ) + switch (att.dtype) { - case DT::CHAR: - case DT::UCHAR: - case DT::SHORT: - case DT::INT: - case DT::LONG: - case DT::LONGLONG: - case DT::USHORT: - case DT::UINT: - case DT::ULONG: - case DT::ULONGLONG: - case DT::FLOAT: - case DT::DOUBLE: - case DT::LONG_DOUBLE: - case DT::CFLOAT: - case DT::CDOUBLE: - case DT::CLONG_DOUBLE: - case DT::STRING: - case DT::BOOL: - return H5Screate(H5S_SCALAR); - case DT::VEC_CHAR: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< char > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_SHORT: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< short > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_INT: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< int > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_LONG: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< long > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_LONGLONG: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< long long > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_UCHAR: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< unsigned char > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_USHORT: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< unsigned short > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_UINT: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< unsigned int > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_ULONG: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< unsigned long > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_ULONGLONG: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< unsigned long long > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_FLOAT: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< float > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_DOUBLE: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< double > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_LONG_DOUBLE: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< long double > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_CFLOAT: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< std::complex< float > > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_CDOUBLE: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< std::complex< double > > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_CLONG_DOUBLE: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< std::complex< long double > > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::VEC_STRING: - { - hid_t vec_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {att.get< std::vector< std::string > >().size()}; - H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); - return vec_t_id; - } - case DT::ARR_DBL_7: - { - hid_t array_t_id = H5Screate(H5S_SIMPLE); - hsize_t dims[1] = {7}; - H5Sset_extent_simple(array_t_id, 1, dims, nullptr); - return array_t_id; - } - case DT::UNDEFINED: - throw std::runtime_error("Unknown Attribute datatype (HDF5 dataspace)"); - default: - throw std::runtime_error("Datatype not implemented in HDF5 IO"); + case DT::CHAR: + case DT::UCHAR: + case DT::SHORT: + case DT::INT: + case DT::LONG: + case DT::LONGLONG: + case DT::USHORT: + case DT::UINT: + case DT::ULONG: + case DT::ULONGLONG: + case DT::FLOAT: + case DT::DOUBLE: + case DT::LONG_DOUBLE: + case DT::CFLOAT: + case DT::CDOUBLE: + case DT::CLONG_DOUBLE: + case DT::STRING: + case DT::BOOL: + return H5Screate(H5S_SCALAR); + case DT::VEC_CHAR: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_SHORT: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_INT: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_LONG: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_LONGLONG: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_UCHAR: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_USHORT: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_UINT: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_ULONG: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_ULONGLONG: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_FLOAT: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_DOUBLE: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_LONG_DOUBLE: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_CFLOAT: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_CDOUBLE: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_CLONG_DOUBLE: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = { + att.get>>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::VEC_STRING: { + hid_t vec_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {att.get>().size()}; + H5Sset_extent_simple(vec_t_id, 1, dims, nullptr); + return vec_t_id; + } + case DT::ARR_DBL_7: { + hid_t array_t_id = H5Screate(H5S_SIMPLE); + hsize_t dims[1] = {7}; + H5Sset_extent_simple(array_t_id, 1, dims, nullptr); + return array_t_id; + } + case DT::UNDEFINED: + throw std::runtime_error("Unknown Attribute datatype (HDF5 dataspace)"); + default: + throw std::runtime_error("Datatype not implemented in HDF5 IO"); } } -std::string -openPMD::concrete_h5_file_position(Writable* w) +std::string openPMD::concrete_h5_file_position(Writable *w) { - std::stack< Writable* > hierarchy; - if( !w->abstractFilePosition ) + std::stack hierarchy; + if (!w->abstractFilePosition) w = w->parent; - while( w ) + while (w) { hierarchy.push(w); w = w->parent; } std::string pos; - while( !hierarchy.empty() ) + while (!hierarchy.empty()) { - pos += std::dynamic_pointer_cast< HDF5FilePosition >(hierarchy.top()->abstractFilePosition)->location; + pos += std::dynamic_pointer_cast( + hierarchy.top()->abstractFilePosition) + ->location; hierarchy.pop(); } return auxiliary::replace_all(pos, "//", "/"); } - -std::vector< hsize_t > -openPMD::getOptimalChunkDims( std::vector< hsize_t > const dims, - size_t const typeSize ) +std::vector openPMD::getOptimalChunkDims( + std::vector const dims, size_t const typeSize) { auto const ndims = dims.size(); - std::vector< hsize_t > chunk_dims( dims.size() ); + std::vector chunk_dims(dims.size()); // chunk sizes in KiByte - constexpr std::array< size_t, 7u > CHUNK_SIZES_KiB - {{4096u, 2048u, 1024u, 512u, 256u, 128u, 64u}}; + constexpr std::array CHUNK_SIZES_KiB{ + {4096u, 2048u, 1024u, 512u, 256u, 128u, 64u}}; size_t total_data_size = typeSize; size_t max_chunk_size = typeSize; @@ -342,7 +335,7 @@ openPMD::getOptimalChunkDims( std::vector< hsize_t > const dims, } // compute the target chunk size - for( auto const & chunk_size : CHUNK_SIZES_KiB ) + for (auto const &chunk_size : CHUNK_SIZES_KiB) { target_chunk_size = chunk_size * 1024; if (target_chunk_size <= max_chunk_size) @@ -352,7 +345,7 @@ openPMD::getOptimalChunkDims( std::vector< hsize_t > const dims, size_t current_chunk_size = typeSize; size_t last_chunk_diff = target_chunk_size; std::multimap::const_iterator current_index = - dims_order.begin(); + dims_order.begin(); while (current_chunk_size < target_chunk_size) { diff --git a/src/IO/HDF5/HDF5IOHandler.cpp b/src/IO/HDF5/HDF5IOHandler.cpp index 3d2229733e..685c5a20e6 100644 --- a/src/IO/HDF5/HDF5IOHandler.cpp +++ b/src/IO/HDF5/HDF5IOHandler.cpp @@ -23,14 +23,14 @@ #include "openPMD/auxiliary/Environment.hpp" #if openPMD_HAVE_HDF5 -# include "openPMD/Datatype.hpp" -# include "openPMD/Error.hpp" -# include "openPMD/auxiliary/Filesystem.hpp" -# include "openPMD/auxiliary/StringManip.hpp" -# include "openPMD/backend/Attribute.hpp" -# include "openPMD/IO/IOTask.hpp" -# include "openPMD/IO/HDF5/HDF5Auxiliary.hpp" -# include "openPMD/IO/HDF5/HDF5FilePosition.hpp" +#include "openPMD/Datatype.hpp" +#include "openPMD/Error.hpp" +#include "openPMD/IO/HDF5/HDF5Auxiliary.hpp" +#include "openPMD/IO/HDF5/HDF5FilePosition.hpp" +#include "openPMD/IO/IOTask.hpp" +#include "openPMD/auxiliary/Filesystem.hpp" +#include "openPMD/auxiliary/StringManip.hpp" +#include "openPMD/backend/Attribute.hpp" #endif #include @@ -47,38 +47,56 @@ namespace openPMD { #if openPMD_HAVE_HDF5 -# if openPMD_USE_VERIFY -# define VERIFY(CONDITION, TEXT) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } -# else -# define VERIFY(CONDITION, TEXT) do{ (void)sizeof(CONDITION); } while( 0 ) -# endif +#if openPMD_USE_VERIFY +#define VERIFY(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } +#else +#define VERIFY(CONDITION, TEXT) \ + do \ + { \ + (void)sizeof(CONDITION); \ + } while (0) +#endif HDF5IOHandlerImpl::HDF5IOHandlerImpl( - AbstractIOHandler* handler, json::TracingJSON config) - : AbstractIOHandlerImpl(handler), - m_datasetTransferProperty{H5P_DEFAULT}, - m_fileAccessProperty{H5P_DEFAULT}, - m_H5T_BOOL_ENUM{H5Tenum_create(H5T_NATIVE_INT8)}, - m_H5T_CFLOAT{H5Tcreate(H5T_COMPOUND, sizeof(float) * 2)}, - m_H5T_CDOUBLE{H5Tcreate(H5T_COMPOUND, sizeof(double) * 2)}, - m_H5T_CLONG_DOUBLE{H5Tcreate(H5T_COMPOUND, sizeof(long double) * 2)} + AbstractIOHandler *handler, json::TracingJSON config) + : AbstractIOHandlerImpl(handler) + , m_datasetTransferProperty{H5P_DEFAULT} + , m_fileAccessProperty{H5P_DEFAULT} + , m_H5T_BOOL_ENUM{H5Tenum_create(H5T_NATIVE_INT8)} + , m_H5T_CFLOAT{H5Tcreate(H5T_COMPOUND, sizeof(float) * 2)} + , m_H5T_CDOUBLE{H5Tcreate(H5T_COMPOUND, sizeof(double) * 2)} + , m_H5T_CLONG_DOUBLE{H5Tcreate(H5T_COMPOUND, sizeof(long double) * 2)} { // create a h5py compatible bool type - VERIFY(m_H5T_BOOL_ENUM >= 0, "[HDF5] Internal error: Failed to create bool enum"); + VERIFY( + m_H5T_BOOL_ENUM >= 0, + "[HDF5] Internal error: Failed to create bool enum"); std::string t{"TRUE"}; std::string f{"FALSE"}; int64_t tVal = 1; int64_t fVal = 0; herr_t status; status = H5Tenum_insert(m_H5T_BOOL_ENUM, t.c_str(), &tVal); - VERIFY(status == 0, "[HDF5] Internal error: Failed to insert into HDF5 enum"); + VERIFY( + status == 0, "[HDF5] Internal error: Failed to insert into HDF5 enum"); status = H5Tenum_insert(m_H5T_BOOL_ENUM, f.c_str(), &fVal); - VERIFY(status == 0, "[HDF5] Internal error: Failed to insert into HDF5 enum"); + VERIFY( + status == 0, "[HDF5] Internal error: Failed to insert into HDF5 enum"); // create h5py compatible complex types - VERIFY(m_H5T_CFLOAT >= 0, "[HDF5] Internal error: Failed to create complex float"); - VERIFY(m_H5T_CDOUBLE >= 0, "[HDF5] Internal error: Failed to create complex double"); - VERIFY(m_H5T_CLONG_DOUBLE >= 0, "[HDF5] Internal error: Failed to create complex long double"); + VERIFY( + m_H5T_CFLOAT >= 0, + "[HDF5] Internal error: Failed to create complex float"); + VERIFY( + m_H5T_CDOUBLE >= 0, + "[HDF5] Internal error: Failed to create complex double"); + VERIFY( + m_H5T_CLONG_DOUBLE >= 0, + "[HDF5] Internal error: Failed to create complex long double"); H5Tinsert(m_H5T_CFLOAT, "r", 0, H5T_NATIVE_FLOAT); H5Tinsert(m_H5T_CFLOAT, "i", sizeof(float), H5T_NATIVE_FLOAT); H5Tinsert(m_H5T_CDOUBLE, "r", 0, H5T_NATIVE_DOUBLE); @@ -86,66 +104,66 @@ HDF5IOHandlerImpl::HDF5IOHandlerImpl( H5Tinsert(m_H5T_CLONG_DOUBLE, "r", 0, H5T_NATIVE_LDOUBLE); H5Tinsert(m_H5T_CLONG_DOUBLE, "i", sizeof(long double), H5T_NATIVE_LDOUBLE); - m_chunks = auxiliary::getEnvString( "OPENPMD_HDF5_CHUNKS", "auto" ); + m_chunks = auxiliary::getEnvString("OPENPMD_HDF5_CHUNKS", "auto"); // JSON option can overwrite env option: - if( config.json().contains( "hdf5" ) ) + if (config.json().contains("hdf5")) { - m_config = config[ "hdf5" ]; + m_config = config["hdf5"]; // check for global dataset configs - if( m_config.json().contains( "dataset" ) ) + if (m_config.json().contains("dataset")) { - auto datasetConfig = m_config[ "dataset" ]; - if( datasetConfig.json().contains( "chunks" ) ) + auto datasetConfig = m_config["dataset"]; + if (datasetConfig.json().contains("chunks")) { auto maybeChunks = json::asLowerCaseStringDynamic( - datasetConfig[ "chunks" ].json() ); - if( maybeChunks.has_value() ) + datasetConfig["chunks"].json()); + if (maybeChunks.has_value()) { - m_chunks = std::move( maybeChunks.value() ); + m_chunks = std::move(maybeChunks.value()); } else { throw error::BackendConfigSchema( {"hdf5", "dataset", "chunks"}, - "Must be convertible to string type." ); + "Must be convertible to string type."); } } } - if( m_chunks != "auto" && m_chunks != "none" ) + if (m_chunks != "auto" && m_chunks != "none") { std::cerr << "Warning: HDF5 chunking option set to an invalid " - "value '" << m_chunks << "'. Reset to 'auto'." - << std::endl; + "value '" + << m_chunks << "'. Reset to 'auto'." << std::endl; m_chunks = "auto"; } // unused params auto shadow = m_config.invertShadow(); - if( shadow.size() > 0 ) + if (shadow.size() > 0) { - switch( m_config.originallySpecifiedAs ) + switch (m_config.originallySpecifiedAs) { case json::SupportedLanguages::JSON: std::cerr << "Warning: parts of the backend configuration for " - "HDF5 remain unused:\n" - << shadow << std::endl; + "HDF5 remain unused:\n" + << shadow << std::endl; break; - case json::SupportedLanguages::TOML: - { - auto asToml = json::jsonToToml( shadow ); + case json::SupportedLanguages::TOML: { + auto asToml = json::jsonToToml(shadow); std::cerr << "Warning: parts of the backend configuration for " - "HDF5 remain unused:\n" - << asToml << std::endl; + "HDF5 remain unused:\n" + << asToml << std::endl; break; } } } } -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - auto const hdf5_collective_metadata = auxiliary::getEnvString( "OPENPMD_HDF5_COLLECTIVE_METADATA", "ON" ); - if( hdf5_collective_metadata == "ON" ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + auto const hdf5_collective_metadata = + auxiliary::getEnvString("OPENPMD_HDF5_COLLECTIVE_METADATA", "ON"); + if (hdf5_collective_metadata == "ON") m_hdf5_collective_metadata = 1; else m_hdf5_collective_metadata = 0; @@ -156,88 +174,97 @@ HDF5IOHandlerImpl::~HDF5IOHandlerImpl() { herr_t status; status = H5Tclose(m_H5T_BOOL_ENUM); - if( status < 0 ) + if (status < 0) std::cerr << "[HDF5] Internal error: Failed to close bool enum\n"; status = H5Tclose(m_H5T_CFLOAT); - if( status < 0 ) - std::cerr << "[HDF5] Internal error: Failed to close complex float type\n"; + if (status < 0) + std::cerr + << "[HDF5] Internal error: Failed to close complex float type\n"; status = H5Tclose(m_H5T_CDOUBLE); - if( status < 0 ) - std::cerr << "[HDF5] Internal error: Failed to close complex double type\n"; + if (status < 0) + std::cerr + << "[HDF5] Internal error: Failed to close complex double type\n"; status = H5Tclose(m_H5T_CLONG_DOUBLE); - if( status < 0 ) - std::cerr << "[HDF5] Internal error: Failed to close complex long double type\n"; + if (status < 0) + std::cerr << "[HDF5] Internal error: Failed to close complex long " + "double type\n"; - while( !m_openFileIDs.empty() ) + while (!m_openFileIDs.empty()) { auto file = m_openFileIDs.begin(); status = H5Fclose(*file); - if( status < 0 ) - std::cerr << "[HDF5] Internal error: Failed to close HDF5 file (serial)\n"; + if (status < 0) + std::cerr << "[HDF5] Internal error: Failed to close HDF5 file " + "(serial)\n"; m_openFileIDs.erase(file); } - if( m_datasetTransferProperty != H5P_DEFAULT ) + if (m_datasetTransferProperty != H5P_DEFAULT) { status = H5Pclose(m_datasetTransferProperty); - if( status < 0 ) - std::cerr << "[HDF5] Internal error: Failed to close HDF5 dataset transfer property\n"; + if (status < 0) + std::cerr << "[HDF5] Internal error: Failed to close HDF5 dataset " + "transfer property\n"; } - if( m_fileAccessProperty != H5P_DEFAULT ) + if (m_fileAccessProperty != H5P_DEFAULT) { status = H5Pclose(m_fileAccessProperty); - if( status < 0 ) - std::cerr << "[HDF5] Internal error: Failed to close HDF5 file access property\n"; + if (status < 0) + std::cerr << "[HDF5] Internal error: Failed to close HDF5 file " + "access property\n"; } } -void -HDF5IOHandlerImpl::createFile(Writable* writable, - Parameter< Operation::CREATE_FILE > const& parameters) +void HDF5IOHandlerImpl::createFile( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Creating a file in read-only mode is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Creating a file in read-only mode is not possible."); - if( !writable->written ) + if (!writable->written) { - if( !auxiliary::directory_exists(m_handler->directory) ) + if (!auxiliary::directory_exists(m_handler->directory)) { bool success = auxiliary::create_directories(m_handler->directory); - VERIFY(success, "[HDF5] Internal error: Failed to create directories during HDF5 file creation"); + VERIFY( + success, + "[HDF5] Internal error: Failed to create directories during " + "HDF5 file creation"); } std::string name = m_handler->directory + parameters.name; - if( !auxiliary::ends_with(name, ".h5") ) + if (!auxiliary::ends_with(name, ".h5")) name += ".h5"; unsigned flags; - if( m_handler->m_backendAccess == Access::CREATE ) + if (m_handler->m_backendAccess == Access::CREATE) flags = H5F_ACC_TRUNC; else flags = H5F_ACC_EXCL; - hid_t id = H5Fcreate(name.c_str(), - flags, - H5P_DEFAULT, - m_fileAccessProperty); + hid_t id = + H5Fcreate(name.c_str(), flags, H5P_DEFAULT, m_fileAccessProperty); VERIFY(id >= 0, "[HDF5] Internal error: Failed to create HDF5 file"); writable->written = true; - writable->abstractFilePosition = std::make_shared< HDF5FilePosition >("/"); + writable->abstractFilePosition = + std::make_shared("/"); m_fileNames[writable] = name; - m_fileNamesWithID[std::move(name)]=id; + m_fileNamesWithID[std::move(name)] = id; m_openFileIDs.insert(id); } } -void -HDF5IOHandlerImpl::createPath(Writable* writable, - Parameter< Operation::CREATE_PATH > const& parameters) +void HDF5IOHandlerImpl::createPath( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Creating a path in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Creating a path in a file opened as read only is not " + "possible."); hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(gapl, true); } @@ -245,184 +272,216 @@ HDF5IOHandlerImpl::createPath(Writable* writable, herr_t status; - if( !writable->written ) + if (!writable->written) { /* Sanitize path */ std::string path = parameters.path; - if( auxiliary::starts_with(path, '/') ) + if (auxiliary::starts_with(path, '/')) path = auxiliary::replace_first(path, "/", ""); - if( !auxiliary::ends_with(path, '/') ) + if (!auxiliary::ends_with(path, '/')) path += '/'; /* Open H5Object to write into */ - Writable* position; - if( writable->parent ) + Writable *position; + if (writable->parent) position = writable->parent; else - position = writable; /* root does not have a parent but might still have to be written */ - File file = getFile( position ).value(); - hid_t node_id = H5Gopen(file.id, - concrete_h5_file_position(position).c_str(), - gapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during path creation"); + position = writable; /* root does not have a parent but might still + have to be written */ + File file = getFile(position).value(); + hid_t node_id = + H5Gopen(file.id, concrete_h5_file_position(position).c_str(), gapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during path " + "creation"); /* Create the path in the file */ - std::stack< hid_t > groups; + std::stack groups; groups.push(node_id); - for( std::string const& folder : auxiliary::split(path, "/", false) ) + for (std::string const &folder : auxiliary::split(path, "/", false)) { // avoid creation of paths that already exist - htri_t const found = H5Lexists(groups.top(), folder.c_str(), H5P_DEFAULT); + htri_t const found = + H5Lexists(groups.top(), folder.c_str(), H5P_DEFAULT); if (found > 0) - continue; - - hid_t group_id = H5Gcreate(groups.top(), - folder.c_str(), - H5P_DEFAULT, - H5P_DEFAULT, - H5P_DEFAULT); - VERIFY(group_id >= 0, "[HDF5] Internal error: Failed to create HDF5 group during path creation"); + continue; + + hid_t group_id = H5Gcreate( + groups.top(), + folder.c_str(), + H5P_DEFAULT, + H5P_DEFAULT, + H5P_DEFAULT); + VERIFY( + group_id >= 0, + "[HDF5] Internal error: Failed to create HDF5 group during " + "path creation"); groups.push(group_id); } /* Close the groups */ - while( !groups.empty() ) + while (!groups.empty()) { status = H5Gclose(groups.top()); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during path creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during path " + "creation"); groups.pop(); } writable->written = true; - writable->abstractFilePosition = std::make_shared< HDF5FilePosition >(path); + writable->abstractFilePosition = + std::make_shared(path); m_fileNames[writable] = file.name; } status = H5Pclose(gapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during path creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during path " + "creation"); } -void -HDF5IOHandlerImpl::createDataset(Writable* writable, - Parameter< Operation::CREATE_DATASET > const& parameters) +void HDF5IOHandlerImpl::createDataset( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Creating a dataset in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Creating a dataset in a file opened as read only is not " + "possible."); - if( !writable->written ) + if (!writable->written) { /* Sanitize name */ std::string name = parameters.name; - if( auxiliary::starts_with(name, '/') ) + if (auxiliary::starts_with(name, '/')) name = auxiliary::replace_first(name, "/", ""); - if( auxiliary::ends_with(name, '/') ) + if (auxiliary::ends_with(name, '/')) name = auxiliary::replace_last(name, "/", ""); - json::TracingJSON config = json::parseOptions( - parameters.options, /* considerFiles = */ false ); + json::TracingJSON config = + json::parseOptions(parameters.options, /* considerFiles = */ false); // general bool is_resizable_dataset = false; - if( config.json().contains( "resizable" ) ) + if (config.json().contains("resizable")) { - is_resizable_dataset = config[ "resizable" ].json().get< bool >(); + is_resizable_dataset = config["resizable"].json().get(); } // HDF5 specific - if( config.json().contains( "hdf5" ) && - config[ "hdf5" ].json().contains( "dataset" ) ) + if (config.json().contains("hdf5") && + config["hdf5"].json().contains("dataset")) { - json::TracingJSON datasetConfig{ - config[ "hdf5" ][ "dataset" ] }; + json::TracingJSON datasetConfig{config["hdf5"]["dataset"]}; /* * @todo Read more options from config here. */ - ( void )datasetConfig; + (void)datasetConfig; } parameters.warnUnusedParameters( config, "hdf5", "Warning: parts of the backend configuration for HDF5 dataset '" + - name + "' remain unused:\n" ); + name + "' remain unused:\n"); hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(gapl, true); } #endif /* Open H5Object to write into */ - auto res = getFile( writable ); - File file = res ? res.value() : getFile( writable->parent ).value(); - hid_t node_id = H5Gopen(file.id, - concrete_h5_file_position(writable).c_str(), - gapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during dataset creation"); + auto res = getFile(writable); + File file = res ? res.value() : getFile(writable->parent).value(); + hid_t node_id = + H5Gopen(file.id, concrete_h5_file_position(writable).c_str(), gapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during dataset " + "creation"); Datatype d = parameters.dtype; - if( d == Datatype::UNDEFINED ) + if (d == Datatype::UNDEFINED) { // TODO handle unknown dtype - std::cerr << "[HDF5] Datatype::UNDEFINED caught during dataset creation (serial HDF5)" << std::endl; + std::cerr << "[HDF5] Datatype::UNDEFINED caught during dataset " + "creation (serial HDF5)" + << std::endl; d = Datatype::BOOL; } Attribute a(0); a.dtype = d; - std::vector< hsize_t > dims; + std::vector dims; std::uint64_t num_elements = 1u; - for( auto const& val : parameters.extent ) { - dims.push_back(static_cast< hsize_t >(val)); + for (auto const &val : parameters.extent) + { + dims.push_back(static_cast(val)); num_elements *= val; } - std::vector< hsize_t > max_dims( dims.begin(), dims.end() ); - if( is_resizable_dataset ) - max_dims.assign( dims.size(), H5F_UNLIMITED ); + std::vector max_dims(dims.begin(), dims.end()); + if (is_resizable_dataset) + max_dims.assign(dims.size(), H5F_UNLIMITED); - hid_t space = H5Screate_simple(static_cast< int >(dims.size()), dims.data(), max_dims.data()); - VERIFY(space >= 0, "[HDF5] Internal error: Failed to create dataspace during dataset creation"); + hid_t space = H5Screate_simple( + static_cast(dims.size()), dims.data(), max_dims.data()); + VERIFY( + space >= 0, + "[HDF5] Internal error: Failed to create dataspace during dataset " + "creation"); /* enable chunking on the created dataspace */ hid_t datasetCreationProperty = H5Pcreate(H5P_DATASET_CREATE); H5Pset_fill_time(datasetCreationProperty, H5D_FILL_TIME_NEVER); - if( num_elements != 0u && m_chunks != "none" ) + if (num_elements != 0u && m_chunks != "none") { //! @todo add per dataset chunk control from JSON config // get chunking dimensions - std::vector< hsize_t > chunk_dims = getOptimalChunkDims(dims, toBytes(d)); + std::vector chunk_dims = + getOptimalChunkDims(dims, toBytes(d)); //! @todo allow overwrite with user-provided chunk size - //for( auto const& val : parameters.chunkSize ) + // for( auto const& val : parameters.chunkSize ) // chunk_dims.push_back(static_cast< hsize_t >(val)); - herr_t status = H5Pset_chunk(datasetCreationProperty, chunk_dims.size(), chunk_dims.data()); - VERIFY(status == 0, "[HDF5] Internal error: Failed to set chunk size during dataset creation"); + herr_t status = H5Pset_chunk( + datasetCreationProperty, chunk_dims.size(), chunk_dims.data()); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to set chunk size during " + "dataset creation"); } - std::string const& compression = ""; // @todo read from JSON - if( !compression.empty() ) - std::cerr << "[HDF5] Compression not yet implemented in HDF5 backend." - << std::endl; + std::string const &compression = ""; // @todo read from JSON + if (!compression.empty()) + std::cerr + << "[HDF5] Compression not yet implemented in HDF5 backend." + << std::endl; /* { - std::vector< std::string > args = auxiliary::split(compression, ":"); - std::string const& format = args[0]; - if( (format == "zlib" || format == "gzip" || format == "deflate") + std::vector< std::string > args = auxiliary::split(compression, + ":"); std::string const& format = args[0]; if( (format == "zlib" || + format == "gzip" || format == "deflate") && args.size() == 2 ) { - status = H5Pset_deflate(datasetCreationProperty, std::stoi(args[1])); - VERIFY(status == 0, "[HDF5] Internal error: Failed to set deflate compression during dataset creation"); - } else if( format == "szip" || format == "nbit" || format == "scaleoffset" ) - std::cerr << "[HDF5] Compression format " << format - << " not yet implemented. Data will not be compressed!" + status = H5Pset_deflate(datasetCreationProperty, + std::stoi(args[1])); VERIFY(status == 0, "[HDF5] Internal error: Failed + to set deflate compression during dataset creation"); } else if( format + == "szip" || format == "nbit" || format == "scaleoffset" ) std::cerr << + "[HDF5] Compression format " << format + << " not yet implemented. Data will not be + compressed!" << std::endl; else std::cerr << "[HDF5] Compression format " << format @@ -432,121 +491,156 @@ HDF5IOHandlerImpl::createDataset(Writable* writable, */ GetH5DataType getH5DataType({ - { typeid(bool).name(), m_H5T_BOOL_ENUM }, - { typeid(std::complex< float >).name(), m_H5T_CFLOAT }, - { typeid(std::complex< double >).name(), m_H5T_CDOUBLE }, - { typeid(std::complex< long double >).name(), m_H5T_CLONG_DOUBLE }, + {typeid(bool).name(), m_H5T_BOOL_ENUM}, + {typeid(std::complex).name(), m_H5T_CFLOAT}, + {typeid(std::complex).name(), m_H5T_CDOUBLE}, + {typeid(std::complex).name(), m_H5T_CLONG_DOUBLE}, }); hid_t datatype = getH5DataType(a); - VERIFY(datatype >= 0, "[HDF5] Internal error: Failed to get HDF5 datatype during dataset creation"); - hid_t group_id = H5Dcreate(node_id, - name.c_str(), - datatype, - space, - H5P_DEFAULT, - datasetCreationProperty, - H5P_DEFAULT); - VERIFY(group_id >= 0, "[HDF5] Internal error: Failed to create HDF5 group during dataset creation"); + VERIFY( + datatype >= 0, + "[HDF5] Internal error: Failed to get HDF5 datatype during dataset " + "creation"); + hid_t group_id = H5Dcreate( + node_id, + name.c_str(), + datatype, + space, + H5P_DEFAULT, + datasetCreationProperty, + H5P_DEFAULT); + VERIFY( + group_id >= 0, + "[HDF5] Internal error: Failed to create HDF5 group during dataset " + "creation"); herr_t status; status = H5Dclose(group_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset during dataset creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset during " + "dataset creation"); status = H5Tclose(datatype); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 datatype during dataset creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 datatype during " + "dataset creation"); status = H5Pclose(datasetCreationProperty); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset creation property during dataset creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset creation " + "property during dataset creation"); status = H5Sclose(space); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset space during dataset creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset space during " + "dataset creation"); status = H5Gclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during dataset creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during dataset " + "creation"); status = H5Pclose(gapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during dataset creation"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during " + "dataset creation"); writable->written = true; - writable->abstractFilePosition = std::make_shared< HDF5FilePosition >(name); + writable->abstractFilePosition = + std::make_shared(name); m_fileNames[writable] = file.name; } } -void -HDF5IOHandlerImpl::extendDataset(Writable* writable, - Parameter< Operation::EXTEND_DATASET > const& parameters) +void HDF5IOHandlerImpl::extendDataset( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Extending a dataset in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Extending a dataset in a file opened as read only is not " + "possible."); - if( !writable->written ) - throw std::runtime_error("[HDF5] Extending an unwritten Dataset is not possible."); + if (!writable->written) + throw std::runtime_error( + "[HDF5] Extending an unwritten Dataset is not possible."); - auto res = getFile( writable ); - if( !res ) - res = getFile( writable->parent ); - hid_t dataset_id = H5Dopen(res.value().id, - concrete_h5_file_position(writable).c_str(), - H5P_DEFAULT); - VERIFY(dataset_id >= 0, "[HDF5] Internal error: Failed to open HDF5 dataset during dataset extension"); + auto res = getFile(writable); + if (!res) + res = getFile(writable->parent); + hid_t dataset_id = H5Dopen( + res.value().id, + concrete_h5_file_position(writable).c_str(), + H5P_DEFAULT); + VERIFY( + dataset_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 dataset during dataset " + "extension"); // Datasets may only be extended if they have chunked layout, so let's see // whether this one does { - hid_t dataset_space = H5Dget_space( dataset_id ); - int ndims = H5Sget_simple_extent_ndims( dataset_space ); + hid_t dataset_space = H5Dget_space(dataset_id); + int ndims = H5Sget_simple_extent_ndims(dataset_space); VERIFY( ndims >= 0, "[HDF5]: Internal error: Failed to retrieve dimensionality of " - "dataset during dataset read." ); - hid_t propertyList = H5Dget_create_plist( dataset_id ); - std::vector< hsize_t > chunkExtent( ndims, 0 ); + "dataset during dataset read."); + hid_t propertyList = H5Dget_create_plist(dataset_id); + std::vector chunkExtent(ndims, 0); int chunkDimensionality = - H5Pget_chunk( propertyList, ndims, chunkExtent.data() ); - if( chunkDimensionality < 0 ) + H5Pget_chunk(propertyList, ndims, chunkExtent.data()); + if (chunkDimensionality < 0) { throw std::runtime_error( "[HDF5] Cannot extend datasets unless written with chunked " - "layout." ); + "layout."); } } - std::vector< hsize_t > size; - for( auto const& val : parameters.extent ) - size.push_back(static_cast< hsize_t >(val)); + std::vector size; + for (auto const &val : parameters.extent) + size.push_back(static_cast(val)); herr_t status; status = H5Dset_extent(dataset_id, size.data()); - VERIFY(status == 0, "[HDF5] Internal error: Failed to extend HDF5 dataset during dataset extension"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to extend HDF5 dataset during dataset " + "extension"); status = H5Dclose(dataset_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset during dataset extension"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset during dataset " + "extension"); } -void -HDF5IOHandlerImpl::availableChunks( - Writable * writable, - Parameter< Operation::AVAILABLE_CHUNKS > & parameters ) +void HDF5IOHandlerImpl::availableChunks( + Writable *writable, Parameter ¶meters) { - auto fname = m_fileNames.find( writable ); - VERIFY( fname != m_fileNames.end(), - "[HDF5] File name not found in writable" ); - auto fid = m_fileNamesWithID.find( fname->second ); - VERIFY( fid != m_fileNamesWithID.end(), - "[HDF5] File ID not found with file name" ); + auto fname = m_fileNames.find(writable); + VERIFY( + fname != m_fileNames.end(), "[HDF5] File name not found in writable"); + auto fid = m_fileNamesWithID.find(fname->second); + VERIFY( + fid != m_fileNamesWithID.end(), + "[HDF5] File ID not found with file name"); hid_t dataset_id = H5Dopen( - fid->second, - concrete_h5_file_position( writable ).c_str(), - H5P_DEFAULT ); + fid->second, concrete_h5_file_position(writable).c_str(), H5P_DEFAULT); VERIFY( dataset_id >= 0, "[HDF5] Internal error: Failed to open HDF5 dataset during dataset " - "read" ); - hid_t dataset_space = H5Dget_space( dataset_id ); - int ndims = H5Sget_simple_extent_ndims( dataset_space ); + "read"); + hid_t dataset_space = H5Dget_space(dataset_id); + int ndims = H5Sget_simple_extent_ndims(dataset_space); VERIFY( ndims >= 0, "[HDF5]: Internal error: Failed to retrieve dimensionality of " "dataset " - "during dataset read." ); + "during dataset read."); // // now let's figure out whether this one has chunks // hid_t propertyList = H5Dget_create_plist( dataset_id ); @@ -566,168 +660,175 @@ HDF5IOHandlerImpl::availableChunks( // */ // } - std::vector< hsize_t > dims( ndims, 0 ); + std::vector dims(ndims, 0); // return value is equal to ndims - H5Sget_simple_extent_dims( dataset_space, dims.data(), nullptr ); + H5Sget_simple_extent_dims(dataset_space, dims.data(), nullptr); - Offset offset( ndims, 0 ); + Offset offset(ndims, 0); Extent extent; - extent.reserve( ndims ); - for( auto e : dims ) + extent.reserve(ndims); + for (auto e : dims) { - extent.push_back( e ); + extent.push_back(e); } parameters.chunks->push_back( - WrittenChunkInfo( std::move( offset ), std::move( extent ) ) ); + WrittenChunkInfo(std::move(offset), std::move(extent))); } -void -HDF5IOHandlerImpl::openFile( - Writable * writable, - Parameter< Operation::OPEN_FILE > const & parameters ) +void HDF5IOHandlerImpl::openFile( + Writable *writable, Parameter const ¶meters) { - if( !auxiliary::directory_exists(m_handler->directory) ) - throw no_such_file_error("[HDF5] Supplied directory is not valid: " + m_handler->directory); + if (!auxiliary::directory_exists(m_handler->directory)) + throw no_such_file_error( + "[HDF5] Supplied directory is not valid: " + m_handler->directory); std::string name = m_handler->directory + parameters.name; - if( !auxiliary::ends_with(name, ".h5") ) + if (!auxiliary::ends_with(name, ".h5")) name += ".h5"; // this may (intentionally) overwrite - m_fileNames[ writable ] = name; + m_fileNames[writable] = name; // check if file already open auto search = m_fileNamesWithID.find(name); - if (search != m_fileNamesWithID.end()) { - return; + if (search != m_fileNamesWithID.end()) + { + return; } unsigned flags; Access at = m_handler->m_backendAccess; - if( at == Access::READ_ONLY ) + if (at == Access::READ_ONLY) flags = H5F_ACC_RDONLY; - else if( at == Access::READ_WRITE || at == Access::CREATE ) + else if (at == Access::READ_WRITE || at == Access::CREATE) flags = H5F_ACC_RDWR; else throw std::runtime_error("[HDF5] Unknown file Access"); hid_t file_id; - file_id = H5Fopen(name.c_str(), - flags, - m_fileAccessProperty); - if( file_id < 0 ) + file_id = H5Fopen(name.c_str(), flags, m_fileAccessProperty); + if (file_id < 0) throw no_such_file_error("[HDF5] Failed to open HDF5 file " + name); writable->written = true; - writable->abstractFilePosition = std::make_shared< HDF5FilePosition >("/"); + writable->abstractFilePosition = std::make_shared("/"); m_fileNamesWithID.erase(name); m_fileNamesWithID.insert({std::move(name), file_id}); m_openFileIDs.insert(file_id); } -void -HDF5IOHandlerImpl::closeFile( - Writable * writable, - Parameter< Operation::CLOSE_FILE > const & ) +void HDF5IOHandlerImpl::closeFile( + Writable *writable, Parameter const &) { - auto optionalFile = getFile( writable ); - if( ! optionalFile ) + auto optionalFile = getFile(writable); + if (!optionalFile) { throw std::runtime_error( "[HDF5] Trying to close a file that is not " - "present in the backend" ); + "present in the backend"); } File file = optionalFile.value(); - H5Fclose( file.id ); - m_openFileIDs.erase( file.id ); - m_fileNames.erase( writable ); + H5Fclose(file.id); + m_openFileIDs.erase(file.id); + m_fileNames.erase(writable); - m_fileNamesWithID.erase( file.name ); + m_fileNamesWithID.erase(file.name); } -void -HDF5IOHandlerImpl::openPath( - Writable * writable, - Parameter< Operation::OPEN_PATH > const & parameters ) +void HDF5IOHandlerImpl::openPath( + Writable *writable, Parameter const ¶meters) { File file = getFile(writable->parent).value(); hid_t node_id, path_id; hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(gapl, true); } #endif - node_id = H5Gopen(file.id, - concrete_h5_file_position(writable->parent).c_str(), - gapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during path opening"); + node_id = H5Gopen( + file.id, concrete_h5_file_position(writable->parent).c_str(), gapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during path opening"); /* Sanitize path */ std::string path = parameters.path; - if( !path.empty() ) + if (!path.empty()) { - if( auxiliary::starts_with(path, '/') ) + if (auxiliary::starts_with(path, '/')) path = auxiliary::replace_first(path, "/", ""); - if( !auxiliary::ends_with(path, '/') ) + if (!auxiliary::ends_with(path, '/')) path += '/'; - path_id = H5Gopen(node_id, - path.c_str(), - gapl); - VERIFY(path_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during path opening"); + path_id = H5Gopen(node_id, path.c_str(), gapl); + VERIFY( + path_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during path " + "opening"); herr_t status; status = H5Gclose(path_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during path opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during path " + "opening"); } herr_t status; status = H5Gclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during path opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during path " + "opening"); status = H5Pclose(gapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during path opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during path " + "opening"); writable->written = true; - writable->abstractFilePosition = std::make_shared< HDF5FilePosition >(path); + writable->abstractFilePosition = std::make_shared(path); m_fileNames.erase(writable); m_fileNames.insert({writable, file.name}); } -void -HDF5IOHandlerImpl::openDataset(Writable* writable, - Parameter< Operation::OPEN_DATASET > & parameters) +void HDF5IOHandlerImpl::openDataset( + Writable *writable, Parameter ¶meters) { - File file = getFile( writable->parent ).value(); + File file = getFile(writable->parent).value(); hid_t node_id, dataset_id; hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(gapl, true); } #endif - node_id = H5Gopen(file.id, - concrete_h5_file_position(writable->parent).c_str(), - gapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during dataset opening"); + node_id = H5Gopen( + file.id, concrete_h5_file_position(writable->parent).c_str(), gapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during dataset " + "opening"); /* Sanitize name */ std::string name = parameters.name; - if( auxiliary::starts_with(name, '/') ) + if (auxiliary::starts_with(name, '/')) name = auxiliary::replace_first(name, "/", ""); - if( !auxiliary::ends_with(name, '/') ) + if (!auxiliary::ends_with(name, '/')) name += '/'; - dataset_id = H5Dopen(node_id, - name.c_str(), - H5P_DEFAULT); - VERIFY(dataset_id >= 0, "[HDF5] Internal error: Failed to open HDF5 dataset during dataset opening"); + dataset_id = H5Dopen(node_id, name.c_str(), H5P_DEFAULT); + VERIFY( + dataset_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 dataset during dataset " + "opening"); hid_t dataset_type, dataset_space; dataset_type = H5Dget_type(dataset_id); @@ -737,99 +838,117 @@ HDF5IOHandlerImpl::openDataset(Writable* writable, using DT = Datatype; Datatype d; - if( dataset_class == H5S_SIMPLE || dataset_class == H5S_SCALAR || dataset_class == H5S_NULL ) + if (dataset_class == H5S_SIMPLE || dataset_class == H5S_SCALAR || + dataset_class == H5S_NULL) { - if( H5Tequal(dataset_type, H5T_NATIVE_CHAR) ) + if (H5Tequal(dataset_type, H5T_NATIVE_CHAR)) d = DT::CHAR; - else if( H5Tequal(dataset_type, H5T_NATIVE_UCHAR) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_UCHAR)) d = DT::UCHAR; - else if( H5Tequal(dataset_type, H5T_NATIVE_SHORT) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_SHORT)) d = DT::SHORT; - else if( H5Tequal(dataset_type, H5T_NATIVE_INT) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_INT)) d = DT::INT; - else if( H5Tequal(dataset_type, H5T_NATIVE_LONG) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_LONG)) d = DT::LONG; - else if( H5Tequal(dataset_type, H5T_NATIVE_LLONG) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_LLONG)) d = DT::LONGLONG; - else if( H5Tequal(dataset_type, H5T_NATIVE_FLOAT) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_FLOAT)) d = DT::FLOAT; - else if( H5Tequal(dataset_type, H5T_NATIVE_DOUBLE) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_DOUBLE)) d = DT::DOUBLE; - else if( H5Tequal(dataset_type, H5T_NATIVE_LDOUBLE) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_LDOUBLE)) d = DT::LONG_DOUBLE; - else if( H5Tequal(dataset_type, m_H5T_CFLOAT) ) + else if (H5Tequal(dataset_type, m_H5T_CFLOAT)) d = DT::CFLOAT; - else if( H5Tequal(dataset_type, m_H5T_CDOUBLE) ) + else if (H5Tequal(dataset_type, m_H5T_CDOUBLE)) d = DT::CDOUBLE; - else if( H5Tequal(dataset_type, m_H5T_CLONG_DOUBLE) ) + else if (H5Tequal(dataset_type, m_H5T_CLONG_DOUBLE)) d = DT::CLONG_DOUBLE; - else if( H5Tequal(dataset_type, H5T_NATIVE_USHORT) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_USHORT)) d = DT::USHORT; - else if( H5Tequal(dataset_type, H5T_NATIVE_UINT) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_UINT)) d = DT::UINT; - else if( H5Tequal(dataset_type, H5T_NATIVE_ULONG) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_ULONG)) d = DT::ULONG; - else if( H5Tequal(dataset_type, H5T_NATIVE_ULLONG) ) + else if (H5Tequal(dataset_type, H5T_NATIVE_ULLONG)) d = DT::ULONGLONG; - else if( H5Tget_class(dataset_type) == H5T_STRING ) + else if (H5Tget_class(dataset_type) == H5T_STRING) d = DT::STRING; else throw std::runtime_error("[HDF5] Unknown dataset type"); - } else + } + else throw std::runtime_error("[HDF5] Unsupported dataset class"); auto dtype = parameters.dtype; *dtype = d; int ndims = H5Sget_simple_extent_ndims(dataset_space); - std::vector< hsize_t > dims(ndims, 0); - std::vector< hsize_t > maxdims(ndims, 0); + std::vector dims(ndims, 0); + std::vector maxdims(ndims, 0); - H5Sget_simple_extent_dims(dataset_space, - dims.data(), - maxdims.data()); + H5Sget_simple_extent_dims(dataset_space, dims.data(), maxdims.data()); Extent e; - for( auto const& val : dims ) + for (auto const &val : dims) e.push_back(val); auto extent = parameters.extent; *extent = e; herr_t status; status = H5Sclose(dataset_space); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset space during dataset opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset space during " + "dataset opening"); status = H5Tclose(dataset_type); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset type during dataset opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset type during " + "dataset opening"); status = H5Dclose(dataset_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataset during dataset opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataset during dataset " + "opening"); status = H5Gclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during dataset opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during dataset " + "opening"); status = H5Pclose(gapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during dataset opening"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during dataset " + "opening"); writable->written = true; - writable->abstractFilePosition = std::make_shared< HDF5FilePosition >(name); + writable->abstractFilePosition = std::make_shared(name); m_fileNames[writable] = file.name; } -void -HDF5IOHandlerImpl::deleteFile(Writable* writable, - Parameter< Operation::DELETE_FILE > const& parameters) +void HDF5IOHandlerImpl::deleteFile( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Deleting a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Deleting a file opened as read only is not possible."); - if( writable->written ) + if (writable->written) { - hid_t file_id = getFile( writable ).value().id; + hid_t file_id = getFile(writable).value().id; herr_t status = H5Fclose(file_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 file during file deletion"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 file during file " + "deletion"); std::string name = m_handler->directory + parameters.name; - if( !auxiliary::ends_with(name, ".h5") ) + if (!auxiliary::ends_with(name, ".h5")) name += ".h5"; - if( !auxiliary::file_exists(name) ) + if (!auxiliary::file_exists(name)) throw std::runtime_error("[HDF5] File does not exist: " + name); auxiliary::remove_file(name); @@ -843,41 +962,50 @@ HDF5IOHandlerImpl::deleteFile(Writable* writable, } } -void -HDF5IOHandlerImpl::deletePath(Writable* writable, - Parameter< Operation::DELETE_PATH > const& parameters) +void HDF5IOHandlerImpl::deletePath( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Deleting a path in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Deleting a path in a file opened as read only is not " + "possible."); - if( writable->written ) + if (writable->written) { /* Sanitize path */ std::string path = parameters.path; - if( auxiliary::starts_with(path, '/') ) + if (auxiliary::starts_with(path, '/')) path = auxiliary::replace_first(path, "/", ""); - if( !auxiliary::ends_with(path, '/') ) + if (!auxiliary::ends_with(path, '/')) path += '/'; /* Open H5Object to delete in * Ugly hack: H5Ldelete can't delete "." * Work around this by deleting from the parent */ - auto res = getFile( writable ); - File file = res ? res.value() : getFile( writable->parent ).value(); - hid_t node_id = H5Gopen(file.id, - concrete_h5_file_position(writable->parent).c_str(), - H5P_DEFAULT); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during path deletion"); - - path += static_cast< HDF5FilePosition* >(writable->abstractFilePosition.get())->location; - herr_t status = H5Ldelete(node_id, - path.c_str(), - H5P_DEFAULT); - VERIFY(status == 0, "[HDF5] Internal error: Failed to delete HDF5 group"); + auto res = getFile(writable); + File file = res ? res.value() : getFile(writable->parent).value(); + hid_t node_id = H5Gopen( + file.id, + concrete_h5_file_position(writable->parent).c_str(), + H5P_DEFAULT); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during path " + "deletion"); + + path += static_cast( + writable->abstractFilePosition.get()) + ->location; + herr_t status = H5Ldelete(node_id, path.c_str(), H5P_DEFAULT); + VERIFY( + status == 0, "[HDF5] Internal error: Failed to delete HDF5 group"); status = H5Gclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during path deletion"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during path " + "deletion"); writable->written = false; writable->abstractFilePosition.reset(); @@ -886,41 +1014,50 @@ HDF5IOHandlerImpl::deletePath(Writable* writable, } } -void -HDF5IOHandlerImpl::deleteDataset(Writable* writable, - Parameter< Operation::DELETE_DATASET > const& parameters) +void HDF5IOHandlerImpl::deleteDataset( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Deleting a path in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Deleting a path in a file opened as read only is not " + "possible."); - if( writable->written ) + if (writable->written) { /* Sanitize name */ std::string name = parameters.name; - if( auxiliary::starts_with(name, '/') ) + if (auxiliary::starts_with(name, '/')) name = auxiliary::replace_first(name, "/", ""); - if( !auxiliary::ends_with(name, '/') ) + if (!auxiliary::ends_with(name, '/')) name += '/'; /* Open H5Object to delete in * Ugly hack: H5Ldelete can't delete "." * Work around this by deleting from the parent */ - auto res = getFile( writable ); - File file = res ? res.value() : getFile( writable->parent ).value(); - hid_t node_id = H5Gopen(file.id, - concrete_h5_file_position(writable->parent).c_str(), - H5P_DEFAULT); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during dataset deletion"); - - name += static_cast< HDF5FilePosition* >(writable->abstractFilePosition.get())->location; - herr_t status = H5Ldelete(node_id, - name.c_str(), - H5P_DEFAULT); - VERIFY(status == 0, "[HDF5] Internal error: Failed to delete HDF5 group"); + auto res = getFile(writable); + File file = res ? res.value() : getFile(writable->parent).value(); + hid_t node_id = H5Gopen( + file.id, + concrete_h5_file_position(writable->parent).c_str(), + H5P_DEFAULT); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during dataset " + "deletion"); + + name += static_cast( + writable->abstractFilePosition.get()) + ->location; + herr_t status = H5Ldelete(node_id, name.c_str(), H5P_DEFAULT); + VERIFY( + status == 0, "[HDF5] Internal error: Failed to delete HDF5 group"); status = H5Gclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during dataset deletion"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during dataset " + "deletion"); writable->written = false; writable->abstractFilePosition.reset(); @@ -929,698 +1066,761 @@ HDF5IOHandlerImpl::deleteDataset(Writable* writable, } } -void -HDF5IOHandlerImpl::deleteAttribute(Writable* writable, - Parameter< Operation::DELETE_ATT > const& parameters) +void HDF5IOHandlerImpl::deleteAttribute( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Deleting an attribute in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Deleting an attribute in a file opened as read only is not " + "possible."); - if( writable->written ) + if (writable->written) { std::string name = parameters.name; /* Open H5Object to delete in */ - auto res = getFile( writable ); - File file = res ? res.value() : getFile( writable->parent ).value(); - hid_t node_id = H5Oopen(file.id, - concrete_h5_file_position(writable).c_str(), - H5P_DEFAULT); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during attribute deletion"); + auto res = getFile(writable); + File file = res ? res.value() : getFile(writable->parent).value(); + hid_t node_id = H5Oopen( + file.id, concrete_h5_file_position(writable).c_str(), H5P_DEFAULT); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during attribute " + "deletion"); - herr_t status = H5Adelete(node_id, - name.c_str()); - VERIFY(status == 0, "[HDF5] Internal error: Failed to delete HDF5 attribute"); + herr_t status = H5Adelete(node_id, name.c_str()); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to delete HDF5 attribute"); status = H5Oclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group during attribute deletion"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group during " + "attribute deletion"); } } -void -HDF5IOHandlerImpl::writeDataset(Writable* writable, - Parameter< Operation::WRITE_DATASET > const& parameters) +void HDF5IOHandlerImpl::writeDataset( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Writing into a dataset in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Writing into a dataset in a file opened as read only is " + "not possible."); - auto res = getFile( writable ); - File file = res ? res.value() : getFile( writable->parent ).value(); + auto res = getFile(writable); + File file = res ? res.value() : getFile(writable->parent).value(); hid_t dataset_id, filespace, memspace; herr_t status; - dataset_id = H5Dopen(file.id, - concrete_h5_file_position(writable).c_str(), - H5P_DEFAULT); - VERIFY(dataset_id >= 0, "[HDF5] Internal error: Failed to open HDF5 dataset during dataset write"); - - std::vector< hsize_t > start; - for( auto const& val : parameters.offset ) - start.push_back(static_cast< hsize_t >(val)); - std::vector< hsize_t > stride(start.size(), 1); /* contiguous region */ - std::vector< hsize_t > count(start.size(), 1); /* single region */ - std::vector< hsize_t > block; - for( auto const& val : parameters.extent ) - block.push_back(static_cast< hsize_t >(val)); - memspace = H5Screate_simple(static_cast< int >(block.size()), block.data(), nullptr); + dataset_id = H5Dopen( + file.id, concrete_h5_file_position(writable).c_str(), H5P_DEFAULT); + VERIFY( + dataset_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 dataset during dataset " + "write"); + + std::vector start; + for (auto const &val : parameters.offset) + start.push_back(static_cast(val)); + std::vector stride(start.size(), 1); /* contiguous region */ + std::vector count(start.size(), 1); /* single region */ + std::vector block; + for (auto const &val : parameters.extent) + block.push_back(static_cast(val)); + memspace = + H5Screate_simple(static_cast(block.size()), block.data(), nullptr); filespace = H5Dget_space(dataset_id); - status = H5Sselect_hyperslab(filespace, - H5S_SELECT_SET, - start.data(), - stride.data(), - count.data(), - block.data()); - VERIFY(status == 0, "[HDF5] Internal error: Failed to select hyperslab during dataset write"); + status = H5Sselect_hyperslab( + filespace, + H5S_SELECT_SET, + start.data(), + stride.data(), + count.data(), + block.data()); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to select hyperslab during dataset " + "write"); - std::shared_ptr< void const > data = parameters.data; + std::shared_ptr data = parameters.data; GetH5DataType getH5DataType({ - { typeid(bool).name(), m_H5T_BOOL_ENUM }, - { typeid(std::complex< float >).name(), m_H5T_CFLOAT }, - { typeid(std::complex< double >).name(), m_H5T_CDOUBLE }, - { typeid(std::complex< long double >).name(), m_H5T_CLONG_DOUBLE }, + {typeid(bool).name(), m_H5T_BOOL_ENUM}, + {typeid(std::complex).name(), m_H5T_CFLOAT}, + {typeid(std::complex).name(), m_H5T_CDOUBLE}, + {typeid(std::complex).name(), m_H5T_CLONG_DOUBLE}, }); - //TODO Check if parameter dtype and dataset dtype match + // TODO Check if parameter dtype and dataset dtype match Attribute a(0); a.dtype = parameters.dtype; hid_t dataType = getH5DataType(a); - VERIFY(dataType >= 0, "[HDF5] Internal error: Failed to get HDF5 datatype during dataset write"); - switch( a.dtype ) + VERIFY( + dataType >= 0, + "[HDF5] Internal error: Failed to get HDF5 datatype during dataset " + "write"); + switch (a.dtype) { using DT = Datatype; - case DT::LONG_DOUBLE: - case DT::DOUBLE: - case DT::FLOAT: - case DT::CLONG_DOUBLE: - case DT::CDOUBLE: - case DT::CFLOAT: - case DT::SHORT: - case DT::INT: - case DT::LONG: - case DT::LONGLONG: - case DT::USHORT: - case DT::UINT: - case DT::ULONG: - case DT::ULONGLONG: - case DT::CHAR: - case DT::UCHAR: - case DT::BOOL: - status = H5Dwrite(dataset_id, - dataType, - memspace, - filespace, - m_datasetTransferProperty, - data.get()); - VERIFY(status == 0, "[HDF5] Internal error: Failed to write dataset " + concrete_h5_file_position(writable)); - break; - case DT::UNDEFINED: - throw std::runtime_error("[HDF5] Undefined Attribute datatype"); - default: - throw std::runtime_error("[HDF5] Datatype not implemented in HDF5 IO"); + case DT::LONG_DOUBLE: + case DT::DOUBLE: + case DT::FLOAT: + case DT::CLONG_DOUBLE: + case DT::CDOUBLE: + case DT::CFLOAT: + case DT::SHORT: + case DT::INT: + case DT::LONG: + case DT::LONGLONG: + case DT::USHORT: + case DT::UINT: + case DT::ULONG: + case DT::ULONGLONG: + case DT::CHAR: + case DT::UCHAR: + case DT::BOOL: + status = H5Dwrite( + dataset_id, + dataType, + memspace, + filespace, + m_datasetTransferProperty, + data.get()); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to write dataset " + + concrete_h5_file_position(writable)); + break; + case DT::UNDEFINED: + throw std::runtime_error("[HDF5] Undefined Attribute datatype"); + default: + throw std::runtime_error("[HDF5] Datatype not implemented in HDF5 IO"); } status = H5Tclose(dataType); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset datatype during dataset write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset datatype during " + "dataset write"); status = H5Sclose(filespace); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset file space during dataset write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset file space during " + "dataset write"); status = H5Sclose(memspace); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset memory space during dataset write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset memory space during " + "dataset write"); status = H5Dclose(dataset_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset " + concrete_h5_file_position(writable) + " during dataset write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset " + + concrete_h5_file_position(writable) + " during dataset write"); m_fileNames[writable] = file.name; } -void -HDF5IOHandlerImpl::writeAttribute(Writable* writable, - Parameter< Operation::WRITE_ATT > const& parameters) +void HDF5IOHandlerImpl::writeAttribute( + Writable *writable, Parameter const ¶meters) { - if( m_handler->m_backendAccess == Access::READ_ONLY ) - throw std::runtime_error("[HDF5] Writing an attribute in a file opened as read only is not possible."); + if (m_handler->m_backendAccess == Access::READ_ONLY) + throw std::runtime_error( + "[HDF5] Writing an attribute in a file opened as read only is not " + "possible."); - auto res = getFile( writable ); - File file = res ? res.value() : getFile( writable->parent ).value(); + auto res = getFile(writable); + File file = res ? res.value() : getFile(writable->parent).value(); hid_t node_id, attribute_id; hid_t fapl = H5Pcreate(H5P_LINK_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(fapl, true); } #endif - node_id = H5Oopen(file.id, - concrete_h5_file_position(writable).c_str(), - fapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 object during attribute write"); + node_id = + H5Oopen(file.id, concrete_h5_file_position(writable).c_str(), fapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 object during attribute " + "write"); Attribute const att(parameters.resource); Datatype dtype = parameters.dtype; herr_t status; GetH5DataType getH5DataType({ - { typeid(bool).name(), m_H5T_BOOL_ENUM }, - { typeid(std::complex< float >).name(), m_H5T_CFLOAT }, - { typeid(std::complex< double >).name(), m_H5T_CDOUBLE }, - { typeid(std::complex< long double >).name(), m_H5T_CLONG_DOUBLE }, + {typeid(bool).name(), m_H5T_BOOL_ENUM}, + {typeid(std::complex).name(), m_H5T_CFLOAT}, + {typeid(std::complex).name(), m_H5T_CDOUBLE}, + {typeid(std::complex).name(), m_H5T_CLONG_DOUBLE}, }); hid_t dataType = getH5DataType(att); - VERIFY(dataType >= 0, "[HDF5] Internal error: Failed to get HDF5 datatype during attribute write"); + VERIFY( + dataType >= 0, + "[HDF5] Internal error: Failed to get HDF5 datatype during attribute " + "write"); std::string name = parameters.name; - if( H5Aexists(node_id, name.c_str()) == 0 ) + if (H5Aexists(node_id, name.c_str()) == 0) { hid_t dataspace = getH5DataSpace(att); - VERIFY(dataspace >= 0, "[HDF5] Internal error: Failed to get HDF5 dataspace during attribute write"); - attribute_id = H5Acreate(node_id, - name.c_str(), - dataType, - dataspace, - H5P_DEFAULT, - H5P_DEFAULT); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to create HDF5 attribute during attribute write"); + VERIFY( + dataspace >= 0, + "[HDF5] Internal error: Failed to get HDF5 dataspace during " + "attribute write"); + attribute_id = H5Acreate( + node_id, + name.c_str(), + dataType, + dataspace, + H5P_DEFAULT, + H5P_DEFAULT); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to create HDF5 attribute during " + "attribute write"); status = H5Sclose(dataspace); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 dataspace during attribute write"); - } else + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 dataspace during " + "attribute write"); + } + else { - attribute_id = H5Aopen(node_id, - name.c_str(), - H5P_DEFAULT); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 attribute during attribute write"); + attribute_id = H5Aopen(node_id, name.c_str(), H5P_DEFAULT); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 attribute during " + "attribute write"); } using DT = Datatype; - switch( dtype ) + switch (dtype) { - case DT::CHAR: - { - char c = att.get< char >(); - status = H5Awrite(attribute_id, dataType, &c); - break; - } - case DT::UCHAR: - { - auto u = att.get< unsigned char >(); - status = H5Awrite(attribute_id, dataType, &u); - break; - } - case DT::SHORT: - { - auto i = att.get< short >(); - status = H5Awrite(attribute_id, dataType, &i); - break; - } - case DT::INT: - { - int i = att.get< int >(); - status = H5Awrite(attribute_id, dataType, &i); - break; - } - case DT::LONG: - { - long i = att.get< long >(); - status = H5Awrite(attribute_id, dataType, &i); - break; - } - case DT::LONGLONG: - { - auto i = att.get< long long >(); - status = H5Awrite(attribute_id, dataType, &i); - break; - } - case DT::USHORT: - { - auto u = att.get< unsigned short >(); - status = H5Awrite(attribute_id, dataType, &u); - break; - } - case DT::UINT: - { - auto u = att.get< unsigned int >(); - status = H5Awrite(attribute_id, dataType, &u); - break; - } - case DT::ULONG: - { - auto u = att.get< unsigned long >(); - status = H5Awrite(attribute_id, dataType, &u); - break; - } - case DT::ULONGLONG: - { - auto u = att.get< unsigned long long >(); - status = H5Awrite(attribute_id, dataType, &u); - break; - } - case DT::FLOAT: - { - auto f = att.get< float >(); - status = H5Awrite(attribute_id, dataType, &f); - break; - } - case DT::DOUBLE: - { - auto d = att.get< double >(); - status = H5Awrite(attribute_id, dataType, &d); - break; - } - case DT::LONG_DOUBLE: - { - auto d = att.get< long double >(); - status = H5Awrite(attribute_id, dataType, &d); - break; - } - case DT::CFLOAT: - { - std::complex< float > f = att.get< std::complex< float > >(); - status = H5Awrite(attribute_id, dataType, &f); - break; - } - case DT::CDOUBLE: - { - std::complex< double > d = att.get< std::complex< double > >(); - status = H5Awrite(attribute_id, dataType, &d); - break; - } - case DT::CLONG_DOUBLE: - { - std::complex< long double > d = att.get< std::complex< long double > >(); - status = H5Awrite(attribute_id, dataType, &d); - break; - } - case DT::STRING: - status = H5Awrite(attribute_id, - dataType, - att.get< std::string >().c_str()); - break; - case DT::VEC_CHAR: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< char > >().data()); - break; - case DT::VEC_SHORT: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< short > >().data()); - break; - case DT::VEC_INT: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< int > >().data()); - break; - case DT::VEC_LONG: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< long > >().data()); - break; - case DT::VEC_LONGLONG: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< long long > >().data()); - break; - case DT::VEC_UCHAR: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< unsigned char > >().data()); - break; - case DT::VEC_USHORT: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< unsigned short > >().data()); - break; - case DT::VEC_UINT: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< unsigned int > >().data()); - break; - case DT::VEC_ULONG: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< unsigned long > >().data()); - break; - case DT::VEC_ULONGLONG: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< unsigned long long > >().data()); - break; - case DT::VEC_FLOAT: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< float > >().data()); - break; - case DT::VEC_DOUBLE: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< double > >().data()); - break; - case DT::VEC_LONG_DOUBLE: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< long double > >().data()); - break; - case DT::VEC_CFLOAT: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< std::complex< float > > >().data()); - break; - case DT::VEC_CDOUBLE: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< std::complex< double > > >().data()); - break; - case DT::VEC_CLONG_DOUBLE: - status = H5Awrite(attribute_id, - dataType, - att.get< std::vector< std::complex< long double > > >().data()); - break; - case DT::VEC_STRING: - { - auto vs = att.get< std::vector< std::string > >(); - size_t max_len = 0; - for( std::string const& s : vs ) - max_len = std::max(max_len, s.size()); - std::unique_ptr< char[] > c_str(new char[max_len * vs.size()]); - for( size_t i = 0; i < vs.size(); ++i ) - strncpy(c_str.get() + i*max_len, vs[i].c_str(), max_len); - status = H5Awrite(attribute_id, dataType, c_str.get()); - break; - } - case DT::ARR_DBL_7: - status = H5Awrite(attribute_id, - dataType, - att.get< std::array< double, 7 > >().data()); - break; - case DT::BOOL: - { - bool b = att.get< bool >(); - status = H5Awrite(attribute_id, dataType, &b); - break; - } - case DT::UNDEFINED: - default: - throw std::runtime_error("[HDF5] Datatype not implemented in HDF5 IO"); + case DT::CHAR: { + char c = att.get(); + status = H5Awrite(attribute_id, dataType, &c); + break; + } + case DT::UCHAR: { + auto u = att.get(); + status = H5Awrite(attribute_id, dataType, &u); + break; + } + case DT::SHORT: { + auto i = att.get(); + status = H5Awrite(attribute_id, dataType, &i); + break; + } + case DT::INT: { + int i = att.get(); + status = H5Awrite(attribute_id, dataType, &i); + break; + } + case DT::LONG: { + long i = att.get(); + status = H5Awrite(attribute_id, dataType, &i); + break; + } + case DT::LONGLONG: { + auto i = att.get(); + status = H5Awrite(attribute_id, dataType, &i); + break; + } + case DT::USHORT: { + auto u = att.get(); + status = H5Awrite(attribute_id, dataType, &u); + break; + } + case DT::UINT: { + auto u = att.get(); + status = H5Awrite(attribute_id, dataType, &u); + break; + } + case DT::ULONG: { + auto u = att.get(); + status = H5Awrite(attribute_id, dataType, &u); + break; + } + case DT::ULONGLONG: { + auto u = att.get(); + status = H5Awrite(attribute_id, dataType, &u); + break; + } + case DT::FLOAT: { + auto f = att.get(); + status = H5Awrite(attribute_id, dataType, &f); + break; + } + case DT::DOUBLE: { + auto d = att.get(); + status = H5Awrite(attribute_id, dataType, &d); + break; + } + case DT::LONG_DOUBLE: { + auto d = att.get(); + status = H5Awrite(attribute_id, dataType, &d); + break; + } + case DT::CFLOAT: { + std::complex f = att.get>(); + status = H5Awrite(attribute_id, dataType, &f); + break; + } + case DT::CDOUBLE: { + std::complex d = att.get>(); + status = H5Awrite(attribute_id, dataType, &d); + break; + } + case DT::CLONG_DOUBLE: { + std::complex d = att.get>(); + status = H5Awrite(attribute_id, dataType, &d); + break; + } + case DT::STRING: + status = + H5Awrite(attribute_id, dataType, att.get().c_str()); + break; + case DT::VEC_CHAR: + status = H5Awrite( + attribute_id, dataType, att.get>().data()); + break; + case DT::VEC_SHORT: + status = H5Awrite( + attribute_id, dataType, att.get>().data()); + break; + case DT::VEC_INT: + status = H5Awrite( + attribute_id, dataType, att.get>().data()); + break; + case DT::VEC_LONG: + status = H5Awrite( + attribute_id, dataType, att.get>().data()); + break; + case DT::VEC_LONGLONG: + status = H5Awrite( + attribute_id, dataType, att.get>().data()); + break; + case DT::VEC_UCHAR: + status = H5Awrite( + attribute_id, + dataType, + att.get>().data()); + break; + case DT::VEC_USHORT: + status = H5Awrite( + attribute_id, + dataType, + att.get>().data()); + break; + case DT::VEC_UINT: + status = H5Awrite( + attribute_id, + dataType, + att.get>().data()); + break; + case DT::VEC_ULONG: + status = H5Awrite( + attribute_id, + dataType, + att.get>().data()); + break; + case DT::VEC_ULONGLONG: + status = H5Awrite( + attribute_id, + dataType, + att.get>().data()); + break; + case DT::VEC_FLOAT: + status = H5Awrite( + attribute_id, dataType, att.get>().data()); + break; + case DT::VEC_DOUBLE: + status = H5Awrite( + attribute_id, dataType, att.get>().data()); + break; + case DT::VEC_LONG_DOUBLE: + status = H5Awrite( + attribute_id, dataType, att.get>().data()); + break; + case DT::VEC_CFLOAT: + status = H5Awrite( + attribute_id, + dataType, + att.get>>().data()); + break; + case DT::VEC_CDOUBLE: + status = H5Awrite( + attribute_id, + dataType, + att.get>>().data()); + break; + case DT::VEC_CLONG_DOUBLE: + status = H5Awrite( + attribute_id, + dataType, + att.get>>().data()); + break; + case DT::VEC_STRING: { + auto vs = att.get>(); + size_t max_len = 0; + for (std::string const &s : vs) + max_len = std::max(max_len, s.size()); + std::unique_ptr c_str(new char[max_len * vs.size()]); + for (size_t i = 0; i < vs.size(); ++i) + strncpy(c_str.get() + i * max_len, vs[i].c_str(), max_len); + status = H5Awrite(attribute_id, dataType, c_str.get()); + break; + } + case DT::ARR_DBL_7: + status = H5Awrite( + attribute_id, dataType, att.get>().data()); + break; + case DT::BOOL: { + bool b = att.get(); + status = H5Awrite(attribute_id, dataType, &b); + break; + } + case DT::UNDEFINED: + default: + throw std::runtime_error("[HDF5] Datatype not implemented in HDF5 IO"); } - VERIFY(status == 0, "[HDF5] Internal error: Failed to write attribute " + name + " at " + concrete_h5_file_position(writable)); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to write attribute " + name + " at " + + concrete_h5_file_position(writable)); status = H5Tclose(dataType); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 datatype during Attribute write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 datatype during Attribute " + "write"); status = H5Aclose(attribute_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close attribute " + name + " at " + concrete_h5_file_position(writable) + " during attribute write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close attribute " + name + " at " + + concrete_h5_file_position(writable) + " during attribute write"); status = H5Oclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close " + concrete_h5_file_position(writable) + " during attribute write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close " + + concrete_h5_file_position(writable) + " during attribute write"); status = H5Pclose(fapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during attribute write"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during attribute " + "write"); m_fileNames[writable] = file.name; } -void -HDF5IOHandlerImpl::readDataset(Writable* writable, - Parameter< Operation::READ_DATASET > & parameters) +void HDF5IOHandlerImpl::readDataset( + Writable *writable, Parameter ¶meters) { - auto res = getFile( writable ); - File file = res ? res.value() : getFile( writable->parent ).value(); + auto res = getFile(writable); + File file = res ? res.value() : getFile(writable->parent).value(); hid_t dataset_id, memspace, filespace; herr_t status; - dataset_id = H5Dopen(file.id, - concrete_h5_file_position(writable).c_str(), - H5P_DEFAULT); - VERIFY(dataset_id >= 0, "[HDF5] Internal error: Failed to open HDF5 dataset during dataset read"); + dataset_id = H5Dopen( + file.id, concrete_h5_file_position(writable).c_str(), H5P_DEFAULT); + VERIFY( + dataset_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 dataset during dataset " + "read"); - std::vector< hsize_t > start; - for( auto const& val : parameters.offset ) + std::vector start; + for (auto const &val : parameters.offset) start.push_back(static_cast(val)); - std::vector< hsize_t > stride(start.size(), 1); /* contiguous region */ - std::vector< hsize_t > count(start.size(), 1); /* single region */ - std::vector< hsize_t > block; - for( auto const& val : parameters.extent ) - block.push_back(static_cast< hsize_t >(val)); - memspace = H5Screate_simple(static_cast< int >(block.size()), block.data(), nullptr); + std::vector stride(start.size(), 1); /* contiguous region */ + std::vector count(start.size(), 1); /* single region */ + std::vector block; + for (auto const &val : parameters.extent) + block.push_back(static_cast(val)); + memspace = + H5Screate_simple(static_cast(block.size()), block.data(), nullptr); filespace = H5Dget_space(dataset_id); - status = H5Sselect_hyperslab(filespace, - H5S_SELECT_SET, - start.data(), - stride.data(), - count.data(), - block.data()); - VERIFY(status == 0, "[HDF5] Internal error: Failed to select hyperslab during dataset read"); + status = H5Sselect_hyperslab( + filespace, + H5S_SELECT_SET, + start.data(), + stride.data(), + count.data(), + block.data()); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to select hyperslab during dataset " + "read"); - void* data = parameters.data.get(); + void *data = parameters.data.get(); Attribute a(0); a.dtype = parameters.dtype; - switch( a.dtype ) + switch (a.dtype) { using DT = Datatype; - case DT::LONG_DOUBLE: - case DT::DOUBLE: - case DT::FLOAT: - case DT::CLONG_DOUBLE: - case DT::CDOUBLE: - case DT::CFLOAT: - case DT::SHORT: - case DT::INT: - case DT::LONG: - case DT::LONGLONG: - case DT::USHORT: - case DT::UINT: - case DT::ULONG: - case DT::ULONGLONG: - case DT::CHAR: - case DT::UCHAR: - case DT::BOOL: - break; - case DT::UNDEFINED: - throw std::runtime_error("[HDF5] Unknown Attribute datatype (HDF5 Dataset read)"); - default: - throw std::runtime_error("[HDF5] Datatype not implemented in HDF5 IO"); + case DT::LONG_DOUBLE: + case DT::DOUBLE: + case DT::FLOAT: + case DT::CLONG_DOUBLE: + case DT::CDOUBLE: + case DT::CFLOAT: + case DT::SHORT: + case DT::INT: + case DT::LONG: + case DT::LONGLONG: + case DT::USHORT: + case DT::UINT: + case DT::ULONG: + case DT::ULONGLONG: + case DT::CHAR: + case DT::UCHAR: + case DT::BOOL: + break; + case DT::UNDEFINED: + throw std::runtime_error( + "[HDF5] Unknown Attribute datatype (HDF5 Dataset read)"); + default: + throw std::runtime_error("[HDF5] Datatype not implemented in HDF5 IO"); } GetH5DataType getH5DataType({ - { typeid(bool).name(), m_H5T_BOOL_ENUM }, - { typeid(std::complex< float >).name(), m_H5T_CFLOAT }, - { typeid(std::complex< double >).name(), m_H5T_CDOUBLE }, - { typeid(std::complex< long double >).name(), m_H5T_CLONG_DOUBLE }, + {typeid(bool).name(), m_H5T_BOOL_ENUM}, + {typeid(std::complex).name(), m_H5T_CFLOAT}, + {typeid(std::complex).name(), m_H5T_CDOUBLE}, + {typeid(std::complex).name(), m_H5T_CLONG_DOUBLE}, }); hid_t dataType = getH5DataType(a); - VERIFY(dataType >= 0, "[HDF5] Internal error: Failed to get HDF5 datatype during dataset read"); - status = H5Dread(dataset_id, - dataType, - memspace, - filespace, - m_datasetTransferProperty, - data); + VERIFY( + dataType >= 0, + "[HDF5] Internal error: Failed to get HDF5 datatype during dataset " + "read"); + status = H5Dread( + dataset_id, + dataType, + memspace, + filespace, + m_datasetTransferProperty, + data); VERIFY(status == 0, "[HDF5] Internal error: Failed to read dataset"); status = H5Tclose(dataType); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset datatype during dataset read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset datatype during " + "dataset read"); status = H5Sclose(filespace); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset file space during dataset read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset file space during " + "dataset read"); status = H5Sclose(memspace); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset memory space during dataset read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset memory space during " + "dataset read"); status = H5Dclose(dataset_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close dataset during dataset read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close dataset during dataset read"); } -void -HDF5IOHandlerImpl::readAttribute(Writable* writable, - Parameter< Operation::READ_ATT >& parameters) +void HDF5IOHandlerImpl::readAttribute( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[HDF5] Internal error: Writable not marked written during attribute read"); + if (!writable->written) + throw std::runtime_error( + "[HDF5] Internal error: Writable not marked written during " + "attribute read"); - auto res = getFile( writable ); - File file = res ? res.value() : getFile( writable->parent ).value(); + auto res = getFile(writable); + File file = res ? res.value() : getFile(writable->parent).value(); hid_t obj_id, attr_id; herr_t status; hid_t fapl = H5Pcreate(H5P_LINK_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(fapl, true); } #endif - obj_id = H5Oopen(file.id, - concrete_h5_file_position(writable).c_str(), - fapl); - VERIFY(obj_id >= 0, std::string("[HDF5] Internal error: Failed to open HDF5 object '") + - concrete_h5_file_position(writable).c_str() + "' during attribute read"); - std::string const & attr_name = parameters.name; - attr_id = H5Aopen(obj_id, - attr_name.c_str(), - H5P_DEFAULT); - VERIFY(attr_id >= 0, + obj_id = + H5Oopen(file.id, concrete_h5_file_position(writable).c_str(), fapl); + VERIFY( + obj_id >= 0, + std::string("[HDF5] Internal error: Failed to open HDF5 object '") + + concrete_h5_file_position(writable).c_str() + + "' during attribute read"); + std::string const &attr_name = parameters.name; + attr_id = H5Aopen(obj_id, attr_name.c_str(), H5P_DEFAULT); + VERIFY( + attr_id >= 0, std::string("[HDF5] Internal error: Failed to open HDF5 attribute '") + - attr_name + "' (" + - concrete_h5_file_position(writable).c_str() + ") during attribute read"); + attr_name + "' (" + concrete_h5_file_position(writable).c_str() + + ") during attribute read"); hid_t attr_type, attr_space; attr_type = H5Aget_type(attr_id); attr_space = H5Aget_space(attr_id); int ndims = H5Sget_simple_extent_ndims(attr_space); - std::vector< hsize_t > dims(ndims, 0); - std::vector< hsize_t > maxdims(ndims, 0); + std::vector dims(ndims, 0); + std::vector maxdims(ndims, 0); - status = H5Sget_simple_extent_dims(attr_space, - dims.data(), - maxdims.data()); - VERIFY(status == ndims, "[HDF5] Internal error: Failed to get dimensions during attribute read"); + status = H5Sget_simple_extent_dims(attr_space, dims.data(), maxdims.data()); + VERIFY( + status == ndims, + "[HDF5] Internal error: Failed to get dimensions during attribute " + "read"); H5S_class_t attr_class = H5Sget_simple_extent_type(attr_space); Attribute a(0); - if( attr_class == H5S_SCALAR || (attr_class == H5S_SIMPLE && ndims == 1 && dims[0] == 1) ) + if (attr_class == H5S_SCALAR || + (attr_class == H5S_SIMPLE && ndims == 1 && dims[0] == 1)) { - if( H5Tequal(attr_type, H5T_NATIVE_CHAR) ) + if (H5Tequal(attr_type, H5T_NATIVE_CHAR)) { char c; - status = H5Aread(attr_id, - attr_type, - &c); + status = H5Aread(attr_id, attr_type, &c); a = Attribute(c); - } else if( H5Tequal(attr_type, H5T_NATIVE_UCHAR) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_UCHAR)) { unsigned char u; - status = H5Aread(attr_id, - attr_type, - &u); + status = H5Aread(attr_id, attr_type, &u); a = Attribute(u); - } else if( H5Tequal(attr_type, H5T_NATIVE_SHORT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_SHORT)) { short i; - status = H5Aread(attr_id, - attr_type, - &i); + status = H5Aread(attr_id, attr_type, &i); a = Attribute(i); - } else if( H5Tequal(attr_type, H5T_NATIVE_INT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_INT)) { int i; - status = H5Aread(attr_id, - attr_type, - &i); + status = H5Aread(attr_id, attr_type, &i); a = Attribute(i); - } else if( H5Tequal(attr_type, H5T_NATIVE_LONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_LONG)) { long i; - status = H5Aread(attr_id, - attr_type, - &i); + status = H5Aread(attr_id, attr_type, &i); a = Attribute(i); - } else if( H5Tequal(attr_type, H5T_NATIVE_LLONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_LLONG)) { long long i; - status = H5Aread(attr_id, - attr_type, - &i); + status = H5Aread(attr_id, attr_type, &i); a = Attribute(i); - } else if( H5Tequal(attr_type, H5T_NATIVE_USHORT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_USHORT)) { unsigned short u; - status = H5Aread(attr_id, - attr_type, - &u); + status = H5Aread(attr_id, attr_type, &u); a = Attribute(u); - } else if( H5Tequal(attr_type, H5T_NATIVE_UINT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_UINT)) { unsigned int u; - status = H5Aread(attr_id, - attr_type, - &u); + status = H5Aread(attr_id, attr_type, &u); a = Attribute(u); - } else if( H5Tequal(attr_type, H5T_NATIVE_ULONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_ULONG)) { unsigned long u; - status = H5Aread(attr_id, - attr_type, - &u); + status = H5Aread(attr_id, attr_type, &u); a = Attribute(u); - } else if( H5Tequal(attr_type, H5T_NATIVE_ULLONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_ULLONG)) { unsigned long long u; - status = H5Aread(attr_id, - attr_type, - &u); + status = H5Aread(attr_id, attr_type, &u); a = Attribute(u); - } else if( H5Tequal(attr_type, H5T_NATIVE_FLOAT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_FLOAT)) { float f; - status = H5Aread(attr_id, - attr_type, - &f); + status = H5Aread(attr_id, attr_type, &f); a = Attribute(f); - } else if( H5Tequal(attr_type, H5T_NATIVE_DOUBLE) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_DOUBLE)) { double d; - status = H5Aread(attr_id, - attr_type, - &d); + status = H5Aread(attr_id, attr_type, &d); a = Attribute(d); - } else if( H5Tequal(attr_type, H5T_NATIVE_LDOUBLE) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_LDOUBLE)) { long double l; - status = H5Aread(attr_id, - attr_type, - &l); + status = H5Aread(attr_id, attr_type, &l); a = Attribute(l); - } else if( H5Tget_class(attr_type) == H5T_STRING ) + } + else if (H5Tget_class(attr_type) == H5T_STRING) { - if( H5Tis_variable_str(attr_type) ) + if (H5Tis_variable_str(attr_type)) { // refs.: // https://github.com/HDFGroup/hdf5/blob/hdf5-1_12_0/tools/src/h5dump/h5dump_xml.c - hsize_t size = H5Tget_size(attr_type); // not yet the actual string length - std::vector< char > vc(size); // byte buffer to vlen strings - status = H5Aread(attr_id, - attr_type, - vc.data()); - auto c_str = *((char**)vc.data()); // get actual string out + hsize_t size = + H5Tget_size(attr_type); // not yet the actual string length + std::vector vc(size); // byte buffer to vlen strings + status = H5Aread(attr_id, attr_type, vc.data()); + auto c_str = *((char **)vc.data()); // get actual string out a = Attribute(std::string(c_str)); // free dynamically allocated vlen memory from H5Aread H5Dvlen_reclaim(attr_type, attr_space, H5P_DEFAULT, vc.data()); // 1.12+: - //H5Treclaim(attr_type, attr_space, H5P_DEFAULT, vc.data()); - } else + // H5Treclaim(attr_type, attr_space, H5P_DEFAULT, vc.data()); + } + else { hsize_t size = H5Tget_size(attr_type); - std::vector< char > vc(size); - status = H5Aread(attr_id, - attr_type, - vc.data()); - a = Attribute(auxiliary::strip(std::string(vc.data(), size), {'\0'})); + std::vector vc(size); + status = H5Aread(attr_id, attr_type, vc.data()); + a = Attribute( + auxiliary::strip(std::string(vc.data(), size), {'\0'})); } - } else if( H5Tget_class(attr_type) == H5T_ENUM ) + } + else if (H5Tget_class(attr_type) == H5T_ENUM) { bool attrIsBool = false; - if( H5Tget_nmembers(attr_type) == 2 ) + if (H5Tget_nmembers(attr_type) == 2) { - char* m0 = H5Tget_member_name(attr_type, 0); - char* m1 = H5Tget_member_name(attr_type, 1); - if( m0 != nullptr && m1 != nullptr ) - if( (strncmp("TRUE" , m0, 4) == 0) && (strncmp("FALSE", m1, 5) == 0) ) + char *m0 = H5Tget_member_name(attr_type, 0); + char *m1 = H5Tget_member_name(attr_type, 1); + if (m0 != nullptr && m1 != nullptr) + if ((strncmp("TRUE", m0, 4) == 0) && + (strncmp("FALSE", m1, 5) == 0)) attrIsBool = true; H5free_memory(m1); H5free_memory(m0); } - if( attrIsBool ) + if (attrIsBool) { int8_t enumVal; - status = H5Aread(attr_id, - attr_type, - &enumVal); - a = Attribute(static_cast< bool >(enumVal)); - } else - throw unsupported_data_error("[HDF5] Unsupported attribute enumeration"); - } else if( H5Tget_class(attr_type) == H5T_COMPOUND ) + status = H5Aread(attr_id, attr_type, &enumVal); + a = Attribute(static_cast(enumVal)); + } + else + throw unsupported_data_error( + "[HDF5] Unsupported attribute enumeration"); + } + else if (H5Tget_class(attr_type) == H5T_COMPOUND) { bool isComplexType = false; - if( H5Tget_nmembers(attr_type) == 2 ) + if (H5Tget_nmembers(attr_type) == 2) { - char* m0 = H5Tget_member_name(attr_type, 0); - char* m1 = H5Tget_member_name(attr_type, 1); - if( m0 != nullptr && m1 != nullptr ) - if( (strncmp("r" , m0, 1) == 0) && (strncmp("i", m1, 1) == 0) ) + char *m0 = H5Tget_member_name(attr_type, 0); + char *m1 = H5Tget_member_name(attr_type, 1); + if (m0 != nullptr && m1 != nullptr) + if ((strncmp("r", m0, 1) == 0) && + (strncmp("i", m1, 1) == 0)) isComplexType = true; H5free_memory(m1); H5free_memory(m0); @@ -1628,225 +1828,225 @@ HDF5IOHandlerImpl::readAttribute(Writable* writable, // re-implement legacy libSplash attributes for ColDim // see: include/splash/basetypes/ColTypeDim.hpp - bool isLegacyLibSplashAttr = ( - H5Tget_nmembers(attr_type) == 3 && - H5Tget_size(attr_type) == sizeof(hsize_t) * 3 - ); - if( isLegacyLibSplashAttr ) + bool isLegacyLibSplashAttr = + (H5Tget_nmembers(attr_type) == 3 && + H5Tget_size(attr_type) == sizeof(hsize_t) * 3); + if (isLegacyLibSplashAttr) { - char* m0 = H5Tget_member_name(attr_type, 0); - char* m1 = H5Tget_member_name(attr_type, 1); - char* m2 = H5Tget_member_name(attr_type, 2); - if( m0 == nullptr || m1 == nullptr || m2 == nullptr ) + char *m0 = H5Tget_member_name(attr_type, 0); + char *m1 = H5Tget_member_name(attr_type, 1); + char *m2 = H5Tget_member_name(attr_type, 2); + if (m0 == nullptr || m1 == nullptr || m2 == nullptr) + // clang-format off isLegacyLibSplashAttr = false; // NOLINT(bugprone-branch-clone) - else if(strcmp("x", m0) != 0 || strcmp("y", m1) != 0 || strcmp("z", m2) != 0) + // clang-format on + else if ( + strcmp("x", m0) != 0 || strcmp("y", m1) != 0 || + strcmp("z", m2) != 0) isLegacyLibSplashAttr = false; H5free_memory(m2); H5free_memory(m1); H5free_memory(m0); } - if( isLegacyLibSplashAttr ) + if (isLegacyLibSplashAttr) { - std::vector< hsize_t > vc(3, 0); - status = H5Aread(attr_id, - attr_type, - vc.data()); + std::vector vc(3, 0); + status = H5Aread(attr_id, attr_type, vc.data()); a = Attribute(vc); - } else if( isComplexType ) + } + else if (isComplexType) { size_t complexSize = H5Tget_member_offset(attr_type, 1); - if( complexSize == sizeof(float) ) + if (complexSize == sizeof(float)) { - std::complex< float > cf; + std::complex cf; status = H5Aread(attr_id, attr_type, &cf); a = Attribute(cf); } - else if( complexSize == sizeof(double) ) + else if (complexSize == sizeof(double)) { - std::complex< double > cd; + std::complex cd; status = H5Aread(attr_id, attr_type, &cd); a = Attribute(cd); } - else if( complexSize == sizeof(long double) ) + else if (complexSize == sizeof(long double)) { - std::complex< long double > cld; + std::complex cld; status = H5Aread(attr_id, attr_type, &cld); a = Attribute(cld); } else - throw unsupported_data_error("[HDF5] Unknown complex type representation"); + throw unsupported_data_error( + "[HDF5] Unknown complex type representation"); } else - throw unsupported_data_error("[HDF5] Compound attribute type not supported"); + throw unsupported_data_error( + "[HDF5] Compound attribute type not supported"); } else - throw std::runtime_error("[HDF5] Unsupported scalar attribute type"); - } else if( attr_class == H5S_SIMPLE ) + throw std::runtime_error( + "[HDF5] Unsupported scalar attribute type"); + } + else if (attr_class == H5S_SIMPLE) { - if( ndims != 1 ) - throw std::runtime_error("[HDF5] Unsupported attribute (array with ndims != 1)"); + if (ndims != 1) + throw std::runtime_error( + "[HDF5] Unsupported attribute (array with ndims != 1)"); - if( H5Tequal(attr_type, H5T_NATIVE_CHAR) ) + if (H5Tequal(attr_type, H5T_NATIVE_CHAR)) { - std::vector< char > vc(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vc.data()); + std::vector vc(dims[0], 0); + status = H5Aread(attr_id, attr_type, vc.data()); a = Attribute(vc); - } else if( H5Tequal(attr_type, H5T_NATIVE_UCHAR) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_UCHAR)) { - std::vector< unsigned char > vu(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vu.data()); + std::vector vu(dims[0], 0); + status = H5Aread(attr_id, attr_type, vu.data()); a = Attribute(vu); - } else if( H5Tequal(attr_type, H5T_NATIVE_SHORT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_SHORT)) { - std::vector< short > vint16(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vint16.data()); + std::vector vint16(dims[0], 0); + status = H5Aread(attr_id, attr_type, vint16.data()); a = Attribute(vint16); - } else if( H5Tequal(attr_type, H5T_NATIVE_INT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_INT)) { - std::vector< int > vint32(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vint32.data()); + std::vector vint32(dims[0], 0); + status = H5Aread(attr_id, attr_type, vint32.data()); a = Attribute(vint32); - } else if( H5Tequal(attr_type, H5T_NATIVE_LONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_LONG)) { - std::vector< long > vint64(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vint64.data()); + std::vector vint64(dims[0], 0); + status = H5Aread(attr_id, attr_type, vint64.data()); a = Attribute(vint64); - } else if( H5Tequal(attr_type, H5T_NATIVE_LLONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_LLONG)) { - std::vector< long long > vint64(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vint64.data()); + std::vector vint64(dims[0], 0); + status = H5Aread(attr_id, attr_type, vint64.data()); a = Attribute(vint64); - } else if( H5Tequal(attr_type, H5T_NATIVE_USHORT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_USHORT)) { - std::vector< unsigned short > vuint16(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vuint16.data()); + std::vector vuint16(dims[0], 0); + status = H5Aread(attr_id, attr_type, vuint16.data()); a = Attribute(vuint16); - } else if( H5Tequal(attr_type, H5T_NATIVE_UINT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_UINT)) { - std::vector< unsigned int > vuint32(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vuint32.data()); + std::vector vuint32(dims[0], 0); + status = H5Aread(attr_id, attr_type, vuint32.data()); a = Attribute(vuint32); - } else if( H5Tequal(attr_type, H5T_NATIVE_ULONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_ULONG)) { - std::vector< unsigned long > vuint64(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vuint64.data()); + std::vector vuint64(dims[0], 0); + status = H5Aread(attr_id, attr_type, vuint64.data()); a = Attribute(vuint64); - } else if( H5Tequal(attr_type, H5T_NATIVE_ULLONG) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_ULLONG)) { - std::vector< unsigned long long > vuint64(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vuint64.data()); + std::vector vuint64(dims[0], 0); + status = H5Aread(attr_id, attr_type, vuint64.data()); a = Attribute(vuint64); - } else if( H5Tequal(attr_type, H5T_NATIVE_FLOAT) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_FLOAT)) { - std::vector< float > vf(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vf.data()); + std::vector vf(dims[0], 0); + status = H5Aread(attr_id, attr_type, vf.data()); a = Attribute(vf); - } else if( H5Tequal(attr_type, H5T_NATIVE_DOUBLE) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_DOUBLE)) { - if( dims[0] == 7 && attr_name == "unitDimension" ) + if (dims[0] == 7 && attr_name == "unitDimension") { - std::array< double, 7 > ad; - status = H5Aread(attr_id, - attr_type, - &ad); + std::array ad; + status = H5Aread(attr_id, attr_type, &ad); a = Attribute(ad); - } else + } + else { - std::vector< double > vd(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vd.data()); + std::vector vd(dims[0], 0); + status = H5Aread(attr_id, attr_type, vd.data()); a = Attribute(vd); } - } else if( H5Tequal(attr_type, H5T_NATIVE_LDOUBLE) ) + } + else if (H5Tequal(attr_type, H5T_NATIVE_LDOUBLE)) { - std::vector< long double > vld(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vld.data()); + std::vector vld(dims[0], 0); + status = H5Aread(attr_id, attr_type, vld.data()); a = Attribute(vld); - } else if( H5Tequal(attr_type, m_H5T_CFLOAT) ) + } + else if (H5Tequal(attr_type, m_H5T_CFLOAT)) { - std::vector< std::complex< float > > vcf(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vcf.data()); + std::vector> vcf(dims[0], 0); + status = H5Aread(attr_id, attr_type, vcf.data()); a = Attribute(vcf); - } else if( H5Tequal(attr_type, m_H5T_CDOUBLE) ) + } + else if (H5Tequal(attr_type, m_H5T_CDOUBLE)) { - std::vector< std::complex< double > > vcd(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vcd.data()); + std::vector> vcd(dims[0], 0); + status = H5Aread(attr_id, attr_type, vcd.data()); a = Attribute(vcd); - } else if( H5Tequal(attr_type, m_H5T_CLONG_DOUBLE) ) + } + else if (H5Tequal(attr_type, m_H5T_CLONG_DOUBLE)) { - std::vector< std::complex< long double > > vcld(dims[0], 0); - status = H5Aread(attr_id, - attr_type, - vcld.data()); + std::vector> vcld(dims[0], 0); + status = H5Aread(attr_id, attr_type, vcld.data()); a = Attribute(vcld); - } else if( H5Tget_class(attr_type) == H5T_STRING ) + } + else if (H5Tget_class(attr_type) == H5T_STRING) { - std::vector< std::string > vs; - if( H5Tis_variable_str(attr_type) ) + std::vector vs; + if (H5Tis_variable_str(attr_type)) { - std::vector< char * > vc(dims[0]); - status = H5Aread(attr_id, - attr_type, - vc.data()); - VERIFY(status == 0, - "[HDF5] Internal error: Failed to read attribute " + attr_name + - " at " + concrete_h5_file_position(writable)); - for( auto const& val : vc ) + std::vector vc(dims[0]); + status = H5Aread(attr_id, attr_type, vc.data()); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to read attribute " + + attr_name + " at " + + concrete_h5_file_position(writable)); + for (auto const &val : vc) vs.push_back(auxiliary::strip(std::string(val), {'\0'})); - status = H5Dvlen_reclaim(attr_type, - attr_space, - H5P_DEFAULT, - vc.data()); - } else + status = H5Dvlen_reclaim( + attr_type, attr_space, H5P_DEFAULT, vc.data()); + } + else { size_t length = H5Tget_size(attr_type); - std::vector< char > c(dims[0] * length); - status = H5Aread(attr_id, - attr_type, - c.data()); - for( hsize_t i = 0; i < dims[0]; ++i ) - vs.push_back(auxiliary::strip(std::string(c.data() + i*length, length), {'\0'})); + std::vector c(dims[0] * length); + status = H5Aread(attr_id, attr_type, c.data()); + for (hsize_t i = 0; i < dims[0]; ++i) + vs.push_back(auxiliary::strip( + std::string(c.data() + i * length, length), {'\0'})); } a = Attribute(vs); - } else - throw std::runtime_error("[HDF5] Unsupported simple attribute type"); - } else + } + else + throw std::runtime_error( + "[HDF5] Unsupported simple attribute type"); + } + else throw std::runtime_error("[HDF5] Unsupported attribute class"); - VERIFY(status == 0, "[HDF5] Internal error: Failed to read attribute " + attr_name + " at " + concrete_h5_file_position(writable)); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to read attribute " + attr_name + + " at " + concrete_h5_file_position(writable)); status = H5Tclose(attr_type); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close attribute datatype during attribute read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close attribute datatype during " + "attribute read"); status = H5Sclose(attr_space); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close attribute file space during attribute read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close attribute file space during " + "attribute read"); auto dtype = parameters.dtype; *dtype = a.dtype; @@ -1854,211 +2054,259 @@ HDF5IOHandlerImpl::readAttribute(Writable* writable, *resource = a.getResource(); status = H5Aclose(attr_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close attribute " + attr_name + " at " + concrete_h5_file_position(writable) + " during attribute read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close attribute " + attr_name + + " at " + concrete_h5_file_position(writable) + + " during attribute read"); status = H5Oclose(obj_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close " + concrete_h5_file_position(writable) + " during attribute read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close " + + concrete_h5_file_position(writable) + " during attribute read"); status = H5Pclose(fapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 attribute during attribute read"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 attribute during " + "attribute read"); } -void -HDF5IOHandlerImpl::listPaths(Writable* writable, - Parameter< Operation::LIST_PATHS > & parameters) +void HDF5IOHandlerImpl::listPaths( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[HDF5] Internal error: Writable not marked written during path listing"); + if (!writable->written) + throw std::runtime_error( + "[HDF5] Internal error: Writable not marked written during path " + "listing"); - auto res = getFile( writable ); - File file = res ? res.value() : getFile( writable->parent ).value(); + auto res = getFile(writable); + File file = res ? res.value() : getFile(writable->parent).value(); hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(gapl, true); } #endif - hid_t node_id = H5Gopen(file.id, - concrete_h5_file_position(writable).c_str(), - gapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during path listing"); + hid_t node_id = + H5Gopen(file.id, concrete_h5_file_position(writable).c_str(), gapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during path listing"); H5G_info_t group_info; herr_t status = H5Gget_info(node_id, &group_info); - VERIFY(status == 0, "[HDF5] Internal error: Failed to get HDF5 group info for " + concrete_h5_file_position(writable) + " during path listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to get HDF5 group info for " + + concrete_h5_file_position(writable) + " during path listing"); auto paths = parameters.paths; - for( hsize_t i = 0; i < group_info.nlinks; ++i ) + for (hsize_t i = 0; i < group_info.nlinks; ++i) { - if( H5G_GROUP == H5Gget_objtype_by_idx(node_id, i) ) + if (H5G_GROUP == H5Gget_objtype_by_idx(node_id, i)) { ssize_t name_length = H5Gget_objname_by_idx(node_id, i, nullptr, 0); - std::vector< char > name(name_length+1); - H5Gget_objname_by_idx(node_id, i, name.data(), name_length+1); + std::vector name(name_length + 1); + H5Gget_objname_by_idx(node_id, i, name.data(), name_length + 1); paths->push_back(std::string(name.data(), name_length)); } } status = H5Gclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group " + concrete_h5_file_position(writable) + " during path listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group " + + concrete_h5_file_position(writable) + " during path listing"); status = H5Pclose(gapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during path listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during path " + "listing"); } -void -HDF5IOHandlerImpl::listDatasets(Writable* writable, - Parameter< Operation::LIST_DATASETS >& parameters) +void HDF5IOHandlerImpl::listDatasets( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[HDF5] Internal error: Writable not marked written during dataset listing"); + if (!writable->written) + throw std::runtime_error( + "[HDF5] Internal error: Writable not marked written during dataset " + "listing"); - auto res = getFile( writable ); - File file = res ? res.value() : getFile( writable->parent ).value(); + auto res = getFile(writable); + File file = res ? res.value() : getFile(writable->parent).value(); hid_t gapl = H5Pcreate(H5P_GROUP_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(gapl, true); } #endif - hid_t node_id = H5Gopen(file.id, - concrete_h5_file_position(writable).c_str(), - gapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during dataset listing"); + hid_t node_id = + H5Gopen(file.id, concrete_h5_file_position(writable).c_str(), gapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during dataset " + "listing"); H5G_info_t group_info; herr_t status = H5Gget_info(node_id, &group_info); - VERIFY(status == 0, "[HDF5] Internal error: Failed to get HDF5 group info for " + concrete_h5_file_position(writable) + " during dataset listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to get HDF5 group info for " + + concrete_h5_file_position(writable) + " during dataset listing"); auto datasets = parameters.datasets; - for( hsize_t i = 0; i < group_info.nlinks; ++i ) + for (hsize_t i = 0; i < group_info.nlinks; ++i) { - if( H5G_DATASET == H5Gget_objtype_by_idx(node_id, i) ) + if (H5G_DATASET == H5Gget_objtype_by_idx(node_id, i)) { ssize_t name_length = H5Gget_objname_by_idx(node_id, i, nullptr, 0); - std::vector< char > name(name_length+1); - H5Gget_objname_by_idx(node_id, i, name.data(), name_length+1); + std::vector name(name_length + 1); + H5Gget_objname_by_idx(node_id, i, name.data(), name_length + 1); datasets->push_back(std::string(name.data(), name_length)); } } status = H5Gclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 group " + concrete_h5_file_position(writable) + " during dataset listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 group " + + concrete_h5_file_position(writable) + " during dataset listing"); status = H5Pclose(gapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during dataset listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during dataset " + "listing"); } -void HDF5IOHandlerImpl::listAttributes(Writable* writable, - Parameter< Operation::LIST_ATTS >& parameters) +void HDF5IOHandlerImpl::listAttributes( + Writable *writable, Parameter ¶meters) { - if( !writable->written ) - throw std::runtime_error("[HDF5] Internal error: Writable not marked written during attribute listing"); + if (!writable->written) + throw std::runtime_error( + "[HDF5] Internal error: Writable not marked written during " + "attribute listing"); - auto res = getFile( writable ); - File file = res ? res.value() : getFile( writable->parent ).value(); + auto res = getFile(writable); + File file = res ? res.value() : getFile(writable->parent).value(); hid_t node_id; hid_t fapl = H5Pcreate(H5P_LINK_ACCESS); -#if H5_VERSION_GE(1,10,0) && openPMD_HAVE_MPI - if( m_hdf5_collective_metadata ) +#if H5_VERSION_GE(1, 10, 0) && openPMD_HAVE_MPI + if (m_hdf5_collective_metadata) { H5Pset_all_coll_metadata_ops(fapl, true); } #endif - node_id = H5Oopen(file.id, - concrete_h5_file_position(writable).c_str(), - fapl); - VERIFY(node_id >= 0, "[HDF5] Internal error: Failed to open HDF5 group during attribute listing"); + node_id = + H5Oopen(file.id, concrete_h5_file_position(writable).c_str(), fapl); + VERIFY( + node_id >= 0, + "[HDF5] Internal error: Failed to open HDF5 group during attribute " + "listing"); herr_t status; -#if H5_VERSION_GE(1,12,0) +#if H5_VERSION_GE(1, 12, 0) H5O_info2_t object_info; status = H5Oget_info3(node_id, &object_info, H5O_INFO_NUM_ATTRS); #else H5O_info_t object_info; status = H5Oget_info(node_id, &object_info); #endif - VERIFY(status == 0, "[HDF5] Internal error: Failed to get HDF5 object info for " + concrete_h5_file_position(writable) + " during attribute listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to get HDF5 object info for " + + concrete_h5_file_position(writable) + " during attribute listing"); auto attributes = parameters.attributes; - for( hsize_t i = 0; i < object_info.num_attrs; ++i ) + for (hsize_t i = 0; i < object_info.num_attrs; ++i) { - ssize_t name_length = H5Aget_name_by_idx(node_id, - ".", - H5_INDEX_CRT_ORDER, - H5_ITER_INC, - i, - nullptr, - 0, - H5P_DEFAULT); - std::vector< char > name(name_length+1); - H5Aget_name_by_idx(node_id, - ".", - H5_INDEX_CRT_ORDER, - H5_ITER_INC, - i, - name.data(), - name_length+1, - H5P_DEFAULT); + ssize_t name_length = H5Aget_name_by_idx( + node_id, + ".", + H5_INDEX_CRT_ORDER, + H5_ITER_INC, + i, + nullptr, + 0, + H5P_DEFAULT); + std::vector name(name_length + 1); + H5Aget_name_by_idx( + node_id, + ".", + H5_INDEX_CRT_ORDER, + H5_ITER_INC, + i, + name.data(), + name_length + 1, + H5P_DEFAULT); attributes->push_back(std::string(name.data(), name_length)); } status = H5Oclose(node_id); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 object during attribute listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 object during attribute " + "listing"); status = H5Pclose(fapl); - VERIFY(status == 0, "[HDF5] Internal error: Failed to close HDF5 property during dataset listing"); + VERIFY( + status == 0, + "[HDF5] Internal error: Failed to close HDF5 property during dataset " + "listing"); } -std::optional< HDF5IOHandlerImpl::File > -HDF5IOHandlerImpl::getFile( Writable * writable ) +std::optional +HDF5IOHandlerImpl::getFile(Writable *writable) { - auto it = m_fileNames.find( writable ); - if( it == m_fileNames.end() ) + auto it = m_fileNames.find(writable); + if (it == m_fileNames.end()) { - return std::optional< File >(); + return std::optional(); } - auto it2 = m_fileNamesWithID.find( it->second ); - if( it2 == m_fileNamesWithID.end() ) + auto it2 = m_fileNamesWithID.find(it->second); + if (it2 == m_fileNamesWithID.end()) { - return std::optional< File >(); + return std::optional(); } File res; res.name = it->second; res.id = it2->second; - return std::make_optional( std::move( res ) ); + return std::make_optional(std::move(res)); } #endif #if openPMD_HAVE_HDF5 -HDF5IOHandler::HDF5IOHandler(std::string path, Access at, json::TracingJSON config) - : AbstractIOHandler(std::move(path), at), - m_impl{new HDF5IOHandlerImpl(this, std::move(config))} -{ } +HDF5IOHandler::HDF5IOHandler( + std::string path, Access at, json::TracingJSON config) + : AbstractIOHandler(std::move(path), at) + , m_impl{new HDF5IOHandlerImpl(this, std::move(config))} +{} HDF5IOHandler::~HDF5IOHandler() = default; -std::future< void > -HDF5IOHandler::flush() +std::future HDF5IOHandler::flush() { return m_impl->flush(); } #else -HDF5IOHandler::HDF5IOHandler(std::string path, Access at, json::TracingJSON /* config */) - : AbstractIOHandler(std::move(path), at) +HDF5IOHandler::HDF5IOHandler( + std::string path, Access at, json::TracingJSON /* config */) + : AbstractIOHandler(std::move(path), at) { throw std::runtime_error("openPMD-api built without HDF5 support"); } HDF5IOHandler::~HDF5IOHandler() = default; -std::future< void > -HDF5IOHandler::flush() +std::future HDF5IOHandler::flush() { - return std::future< void >(); + return std::future(); } #endif -} // openPMD +} // namespace openPMD diff --git a/src/IO/HDF5/ParallelHDF5IOHandler.cpp b/src/IO/HDF5/ParallelHDF5IOHandler.cpp index 59178825c5..c0ce9e49e6 100644 --- a/src/IO/HDF5/ParallelHDF5IOHandler.cpp +++ b/src/IO/HDF5/ParallelHDF5IOHandler.cpp @@ -23,62 +23,75 @@ #include "openPMD/auxiliary/Environment.hpp" #if openPMD_HAVE_MPI -# include +#include #endif #include #include - namespace openPMD { #if openPMD_HAVE_HDF5 && openPMD_HAVE_MPI -# if openPMD_USE_VERIFY -# define VERIFY(CONDITION, TEXT) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } -# else -# define VERIFY(CONDITION, TEXT) do{ (void)sizeof(CONDITION); } while( 0 ) -# endif +#if openPMD_USE_VERIFY +#define VERIFY(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } +#else +#define VERIFY(CONDITION, TEXT) \ + do \ + { \ + (void)sizeof(CONDITION); \ + } while (0) +#endif ParallelHDF5IOHandler::ParallelHDF5IOHandler( - std::string path, Access at, MPI_Comm comm, json::TracingJSON config ) - : AbstractIOHandler(std::move(path), at, comm), - m_impl{new ParallelHDF5IOHandlerImpl(this, comm, std::move(config))} -{ } + std::string path, Access at, MPI_Comm comm, json::TracingJSON config) + : AbstractIOHandler(std::move(path), at, comm) + , m_impl{new ParallelHDF5IOHandlerImpl(this, comm, std::move(config))} +{} ParallelHDF5IOHandler::~ParallelHDF5IOHandler() = default; -std::future< void > -ParallelHDF5IOHandler::flush() +std::future ParallelHDF5IOHandler::flush() { return m_impl->flush(); } ParallelHDF5IOHandlerImpl::ParallelHDF5IOHandlerImpl( - AbstractIOHandler* handler, MPI_Comm comm, json::TracingJSON config ) - : HDF5IOHandlerImpl{handler, std::move(config)}, - m_mpiComm{comm}, - m_mpiInfo{MPI_INFO_NULL} /* MPI 3.0+: MPI_INFO_ENV */ + AbstractIOHandler *handler, MPI_Comm comm, json::TracingJSON config) + : HDF5IOHandlerImpl{handler, std::move(config)} + , m_mpiComm{comm} + , m_mpiInfo{MPI_INFO_NULL} /* MPI 3.0+: MPI_INFO_ENV */ { m_datasetTransferProperty = H5Pcreate(H5P_DATASET_XFER); m_fileAccessProperty = H5Pcreate(H5P_FILE_ACCESS); m_fileCreateProperty = H5Pcreate(H5P_FILE_CREATE); -#if H5_VERSION_GE(1,10,1) - auto const hdf5_spaced_allocation = auxiliary::getEnvString( "OPENPMD_HDF5_PAGED_ALLOCATION", "ON" ); - if( hdf5_spaced_allocation == "ON" ) { - auto const strPageSize = auxiliary::getEnvString( "OPENPMD_HDF5_PAGED_ALLOCATION_SIZE", "33554432" ); +#if H5_VERSION_GE(1, 10, 1) + auto const hdf5_spaced_allocation = + auxiliary::getEnvString("OPENPMD_HDF5_PAGED_ALLOCATION", "ON"); + if (hdf5_spaced_allocation == "ON") + { + auto const strPageSize = auxiliary::getEnvString( + "OPENPMD_HDF5_PAGED_ALLOCATION_SIZE", "33554432"); std::stringstream tstream(strPageSize); hsize_t page_size; tstream >> page_size; - H5Pset_file_space_strategy(m_fileCreateProperty, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)0); + H5Pset_file_space_strategy( + m_fileCreateProperty, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)0); H5Pset_file_space_page_size(m_fileCreateProperty, page_size); } #endif - auto const hdf5_defer_metadata = auxiliary::getEnvString( "OPENPMD_HDF5_DEFER_METADATA", "ON" ); - if( hdf5_defer_metadata == "ON" ) { - auto const strMetaSize = auxiliary::getEnvString( "OPENPMD_HDF5_DEFER_METADATA_SIZE", "33554432" ); + auto const hdf5_defer_metadata = + auxiliary::getEnvString("OPENPMD_HDF5_DEFER_METADATA", "ON"); + if (hdf5_defer_metadata == "ON") + { + auto const strMetaSize = auxiliary::getEnvString( + "OPENPMD_HDF5_DEFER_METADATA_SIZE", "33554432"); std::stringstream tstream(strMetaSize); hsize_t meta_size; tstream >> meta_size; @@ -96,81 +109,96 @@ ParallelHDF5IOHandlerImpl::ParallelHDF5IOHandlerImpl( } H5FD_mpio_xfer_t xfer_mode = H5FD_MPIO_COLLECTIVE; - auto const hdf5_collective = auxiliary::getEnvString( "OPENPMD_HDF5_INDEPENDENT", "ON" ); - if( hdf5_collective == "ON" ) + auto const hdf5_collective = + auxiliary::getEnvString("OPENPMD_HDF5_INDEPENDENT", "ON"); + if (hdf5_collective == "ON") xfer_mode = H5FD_MPIO_INDEPENDENT; else { - VERIFY(hdf5_collective == "OFF", "[HDF5] Internal error: OPENPMD_HDF5_INDEPENDENT property must be either ON or OFF"); + VERIFY( + hdf5_collective == "OFF", + "[HDF5] Internal error: OPENPMD_HDF5_INDEPENDENT property must be " + "either ON or OFF"); } herr_t status; status = H5Pset_dxpl_mpio(m_datasetTransferProperty, xfer_mode); -#if H5_VERSION_GE(1,10,0) - status = H5Pset_all_coll_metadata_ops(m_fileAccessProperty, m_hdf5_collective_metadata); - VERIFY(status >= 0, "[HDF5] Internal error: Failed to set metadata read HDF5 file access property"); - - status = H5Pset_coll_metadata_write(m_fileAccessProperty, m_hdf5_collective_metadata); - VERIFY(status >= 0, "[HDF5] Internal error: Failed to set metadata write HDF5 file access property"); +#if H5_VERSION_GE(1, 10, 0) + status = H5Pset_all_coll_metadata_ops( + m_fileAccessProperty, m_hdf5_collective_metadata); + VERIFY( + status >= 0, + "[HDF5] Internal error: Failed to set metadata read HDF5 file access " + "property"); + + status = H5Pset_coll_metadata_write( + m_fileAccessProperty, m_hdf5_collective_metadata); + VERIFY( + status >= 0, + "[HDF5] Internal error: Failed to set metadata write HDF5 file access " + "property"); #endif - auto const strByte = auxiliary::getEnvString( "OPENPMD_HDF5_ALIGNMENT", "1" ); + auto const strByte = auxiliary::getEnvString("OPENPMD_HDF5_ALIGNMENT", "1"); std::stringstream sstream(strByte); hsize_t bytes; sstream >> bytes; - auto const strThreshold = auxiliary::getEnvString( "OPENPMD_HDF5_THRESHOLD", "0" ); + auto const strThreshold = + auxiliary::getEnvString("OPENPMD_HDF5_THRESHOLD", "0"); std::stringstream tstream(strThreshold); hsize_t threshold; tstream >> threshold; - if ( bytes > 1 ) - H5Pset_alignment(m_fileAccessProperty, threshold, bytes); + if (bytes > 1) + H5Pset_alignment(m_fileAccessProperty, threshold, bytes); - VERIFY(status >= 0, "[HDF5] Internal error: Failed to set HDF5 dataset transfer property"); + VERIFY( + status >= 0, + "[HDF5] Internal error: Failed to set HDF5 dataset transfer property"); status = H5Pset_fapl_mpio(m_fileAccessProperty, m_mpiComm, m_mpiInfo); - VERIFY(status >= 0, "[HDF5] Internal error: Failed to set HDF5 file access property"); + VERIFY( + status >= 0, + "[HDF5] Internal error: Failed to set HDF5 file access property"); } ParallelHDF5IOHandlerImpl::~ParallelHDF5IOHandlerImpl() { herr_t status; - while( !m_openFileIDs.empty() ) + while (!m_openFileIDs.empty()) { auto file = m_openFileIDs.begin(); status = H5Fclose(*file); - if( status < 0 ) - std::cerr << "Internal error: Failed to close HDF5 file (parallel)\n"; + if (status < 0) + std::cerr + << "Internal error: Failed to close HDF5 file (parallel)\n"; m_openFileIDs.erase(file); } } #else -# if openPMD_HAVE_MPI -ParallelHDF5IOHandler::ParallelHDF5IOHandler(std::string path, - Access at, - MPI_Comm comm, - json::TracingJSON /* config */) - : AbstractIOHandler(std::move(path), at, comm) +#if openPMD_HAVE_MPI +ParallelHDF5IOHandler::ParallelHDF5IOHandler( + std::string path, Access at, MPI_Comm comm, json::TracingJSON /* config */) + : AbstractIOHandler(std::move(path), at, comm) { throw std::runtime_error("openPMD-api built without HDF5 support"); } -# else -ParallelHDF5IOHandler::ParallelHDF5IOHandler(std::string path, - Access at, - json::TracingJSON /* config */) - : AbstractIOHandler(std::move(path), at) +#else +ParallelHDF5IOHandler::ParallelHDF5IOHandler( + std::string path, Access at, json::TracingJSON /* config */) + : AbstractIOHandler(std::move(path), at) { - throw std::runtime_error("openPMD-api built without parallel support and without HDF5 support"); + throw std::runtime_error( + "openPMD-api built without parallel support and without HDF5 support"); } -# endif +#endif ParallelHDF5IOHandler::~ParallelHDF5IOHandler() = default; -std::future< void > -ParallelHDF5IOHandler::flush() +std::future ParallelHDF5IOHandler::flush() { - return std::future< void >(); + return std::future(); } #endif -} // openPMD +} // namespace openPMD diff --git a/src/IO/IOTask.cpp b/src/IO/IOTask.cpp index f5bd6c655d..36c4f9c786 100644 --- a/src/IO/IOTask.cpp +++ b/src/IO/IOTask.cpp @@ -24,49 +24,48 @@ #include // std::cerr - namespace openPMD { -Writable* -getWritable(Attributable* a) -{ return &a->writable(); } +Writable *getWritable(Attributable *a) +{ + return &a->writable(); +} -template<> -void Parameter< Operation::CREATE_DATASET >::warnUnusedParameters< - json::TracingJSON >( - json::TracingJSON & config, - std::string const & currentBackendName, - std::string const & warningMessage ) +template <> +void Parameter::warnUnusedParameters< + json::TracingJSON>( + json::TracingJSON &config, + std::string const ¤tBackendName, + std::string const &warningMessage) { /* * Fake-read non-backend-specific options. Some backends don't read those * and we don't want to have warnings for them. */ - for( std::string const & key : { "resizable" } ) + for (std::string const &key : {"resizable"}) { - config[ key ]; + config[key]; } auto shadow = config.invertShadow(); // The backends are supposed to deal with this // Only global options here - for( auto const & backendKey : json::backendKeys ) + for (auto const &backendKey : json::backendKeys) { - if( backendKey != currentBackendName ) + if (backendKey != currentBackendName) { - shadow.erase( backendKey ); + shadow.erase(backendKey); } } - if( shadow.size() > 0 ) + if (shadow.size() > 0) { - switch( config.originallySpecifiedAs ) + switch (config.originallySpecifiedAs) { case json::SupportedLanguages::JSON: std::cerr << warningMessage << shadow.dump() << std::endl; break; - case json::SupportedLanguages::TOML: - { - auto asToml = json::jsonToToml( shadow ); + case json::SupportedLanguages::TOML: { + auto asToml = json::jsonToToml(shadow); std::cerr << warningMessage << asToml << std::endl; break; } @@ -76,10 +75,9 @@ void Parameter< Operation::CREATE_DATASET >::warnUnusedParameters< namespace internal { - std::string - operationAsString( Operation op ) + std::string operationAsString(Operation op) { - switch( op ) + switch (op) { case Operation::CREATE_FILE: return "CREATE_FILE"; @@ -155,5 +153,5 @@ namespace internal break; } } -} -} // openPMD +} // namespace internal +} // namespace openPMD diff --git a/src/IO/InvalidatableFile.cpp b/src/IO/InvalidatableFile.cpp index 723c46b3fe..8d4b688673 100644 --- a/src/IO/InvalidatableFile.cpp +++ b/src/IO/InvalidatableFile.cpp @@ -21,70 +21,62 @@ #include "openPMD/IO/InvalidatableFile.hpp" - -openPMD::InvalidatableFile::InvalidatableFile( std::string s ) : - fileState { std::make_shared< FileState >( s ) } +openPMD::InvalidatableFile::InvalidatableFile(std::string s) + : fileState{std::make_shared(s)} {} - -void openPMD::InvalidatableFile::invalidate( ) +void openPMD::InvalidatableFile::invalidate() { fileState->valid = false; } - -bool openPMD::InvalidatableFile::valid( ) const +bool openPMD::InvalidatableFile::valid() const { return fileState->valid; } - -openPMD::InvalidatableFile & -openPMD::InvalidatableFile::operator=( std::string s ) +openPMD::InvalidatableFile &openPMD::InvalidatableFile::operator=(std::string s) { - if( fileState ) + if (fileState) { fileState->name = s; } else { - fileState = std::make_shared< FileState >( s ); + fileState = std::make_shared(s); } return *this; } - -bool -openPMD::InvalidatableFile::operator==( const openPMD::InvalidatableFile & f ) const +bool openPMD::InvalidatableFile::operator==( + const openPMD::InvalidatableFile &f) const { return this->fileState == f.fileState; } - -std::string & openPMD::InvalidatableFile::operator*( ) const +std::string &openPMD::InvalidatableFile::operator*() const { return fileState->name; } - -std::string * openPMD::InvalidatableFile::operator->( ) const +std::string *openPMD::InvalidatableFile::operator->() const { return &fileState->name; } - -openPMD::InvalidatableFile::operator bool( ) const +openPMD::InvalidatableFile::operator bool() const { - return fileState.operator bool( ); + return fileState.operator bool(); } - -openPMD::InvalidatableFile::FileState::FileState( std::string s ) : - name { std::move( s ) } +openPMD::InvalidatableFile::FileState::FileState(std::string s) + : name{std::move(s)} {} -std::hash< openPMD::InvalidatableFile >::result_type -std::hash< openPMD::InvalidatableFile >::operator()( const openPMD::InvalidatableFile & s ) const noexcept +std::hash::result_type +std::hash::operator()( + const openPMD::InvalidatableFile &s) const noexcept { - return std::hash< shared_ptr< openPMD::InvalidatableFile::FileState>> {}( s.fileState ); + return std::hash>{}( + s.fileState); } diff --git a/src/IO/JSON/JSONFilePosition.cpp b/src/IO/JSON/JSONFilePosition.cpp index 536000036c..3232d52430 100644 --- a/src/IO/JSON/JSONFilePosition.cpp +++ b/src/IO/JSON/JSONFilePosition.cpp @@ -2,10 +2,8 @@ #include - namespace openPMD { - JSONFilePosition::JSONFilePosition( json::json_pointer ptr): - id( std::move( ptr ) ) - {} -} +JSONFilePosition::JSONFilePosition(json::json_pointer ptr) : id(std::move(ptr)) +{} +} // namespace openPMD diff --git a/src/IO/JSON/JSONIOHandler.cpp b/src/IO/JSON/JSONIOHandler.cpp index ff95774440..158c5454ed 100644 --- a/src/IO/JSON/JSONIOHandler.cpp +++ b/src/IO/JSON/JSONIOHandler.cpp @@ -21,24 +21,16 @@ #include "openPMD/IO/JSON/JSONIOHandler.hpp" - namespace openPMD { - JSONIOHandler::~JSONIOHandler( ) = default; +JSONIOHandler::~JSONIOHandler() = default; - JSONIOHandler::JSONIOHandler( - std::string path, - Access at - ) : - AbstractIOHandler { - path, - at - }, - m_impl { JSONIOHandlerImpl { this } } - {} +JSONIOHandler::JSONIOHandler(std::string path, Access at) + : AbstractIOHandler{path, at}, m_impl{JSONIOHandlerImpl{this}} +{} - std::future< void > JSONIOHandler::flush( ) - { - return m_impl.flush( ); - } -} // openPMD +std::future JSONIOHandler::flush() +{ + return m_impl.flush(); +} +} // namespace openPMD diff --git a/src/IO/JSON/JSONIOHandlerImpl.cpp b/src/IO/JSON/JSONIOHandlerImpl.cpp index 4de27707a4..267594c8ff 100644 --- a/src/IO/JSON/JSONIOHandlerImpl.cpp +++ b/src/IO/JSON/JSONIOHandlerImpl.cpp @@ -19,1807 +19,1433 @@ * If not, see . */ +#include "openPMD/IO/JSON/JSONIOHandlerImpl.hpp" +#include "openPMD/Datatype.hpp" +#include "openPMD/DatatypeHelpers.hpp" #include "openPMD/auxiliary/Filesystem.hpp" #include "openPMD/auxiliary/Memory.hpp" #include "openPMD/auxiliary/StringManip.hpp" #include "openPMD/backend/Writable.hpp" -#include "openPMD/Datatype.hpp" -#include "openPMD/DatatypeHelpers.hpp" -#include "openPMD/IO/JSON/JSONIOHandlerImpl.hpp" #include #include #include - namespace openPMD { #if openPMD_USE_VERIFY -# define VERIFY( CONDITION, TEXT ) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } +#define VERIFY(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } #else -# define VERIFY( CONDITION, TEXT ) do{ (void)sizeof(CONDITION); } while( 0 ); +#define VERIFY(CONDITION, TEXT) \ + do \ + { \ + (void)sizeof(CONDITION); \ + } while (0); #endif -#define VERIFY_ALWAYS( CONDITION, TEXT ) { if(!(CONDITION)) throw std::runtime_error((TEXT)); } - +#define VERIFY_ALWAYS(CONDITION, TEXT) \ + { \ + if (!(CONDITION)) \ + throw std::runtime_error((TEXT)); \ + } - JSONIOHandlerImpl::JSONIOHandlerImpl( AbstractIOHandler * handler ) : - AbstractIOHandlerImpl( handler ) - {} +JSONIOHandlerImpl::JSONIOHandlerImpl(AbstractIOHandler *handler) + : AbstractIOHandlerImpl(handler) +{} +JSONIOHandlerImpl::~JSONIOHandlerImpl() +{ + // we must not throw in a destructor + try + { + flush(); + } + catch (std::exception const &ex) + { + std::cerr << "[~JSONIOHandlerImpl] An error occurred: " << ex.what() + << std::endl; + } + catch (...) + { + std::cerr << "[~JSONIOHandlerImpl] An error occurred." << std::endl; + } +} - JSONIOHandlerImpl::~JSONIOHandlerImpl( ) +std::future JSONIOHandlerImpl::flush() +{ + AbstractIOHandlerImpl::flush(); + for (auto const &file : m_dirty) { - // we must not throw in a destructor - try - { - flush( ); - } - catch( std::exception const & ex ) - { - std::cerr << "[~JSONIOHandlerImpl] An error occurred: " << ex.what() << std::endl; - } - catch( ... ) - { - std::cerr << "[~JSONIOHandlerImpl] An error occurred." << std::endl; - } + putJsonContents(file, false); } + m_dirty.clear(); + return std::future(); +} +void JSONIOHandlerImpl::createFile( + Writable *writable, Parameter const ¶meters) +{ + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[JSON] Creating a file in read-only mode is not possible."); - std::future< void > JSONIOHandlerImpl::flush( ) + if (!writable->written) { - AbstractIOHandlerImpl::flush( ); - for( auto const & file: m_dirty ) + std::string name = parameters.name; + if (!auxiliary::ends_with(name, ".json")) { - putJsonContents( - file, - false - ); + name += ".json"; } - m_dirty.clear( ); - return std::future< void >( ); - } + auto res_pair = getPossiblyExisting(name); + File shared_name = File(name); + VERIFY_ALWAYS( + !(m_handler->m_backendAccess == Access::READ_WRITE && + (!std::get<2>(res_pair) || + auxiliary::file_exists(fullPath(std::get<0>(res_pair))))), + "[JSON] Can only overwrite existing file in CREATE mode."); - void JSONIOHandlerImpl::createFile( - Writable * writable, - Parameter< Operation::CREATE_FILE > const & parameters - ) - { - VERIFY_ALWAYS(m_handler->m_backendAccess != Access::READ_ONLY, - "[JSON] Creating a file in read-only mode is not possible." ); + if (!std::get<2>(res_pair)) + { + auto file = std::get<0>(res_pair); + m_dirty.erase(file); + m_jsonVals.erase(file); + file.invalidate(); + } - if( !writable->written ) + std::string const dir(m_handler->directory); + if (!auxiliary::directory_exists(dir)) { - std::string name = parameters.name; - if( !auxiliary::ends_with( - name, - ".json" - ) ) - { - name += ".json"; - } + auto success = auxiliary::create_directories(dir); + VERIFY(success, "[JSON] Could not create directory."); + } - auto res_pair = getPossiblyExisting( name ); - File shared_name = File( name ); - VERIFY_ALWAYS( !(m_handler->m_backendAccess == Access::READ_WRITE && - ( !std::get< 2 >( res_pair ) || - auxiliary::file_exists( fullPath( std::get< 0 >( res_pair ) ) ) ) ), - "[JSON] Can only overwrite existing file in CREATE mode." ); + associateWithFile(writable, shared_name); + this->m_dirty.emplace(shared_name); + // make sure to overwrite! + this->m_jsonVals[shared_name] = std::make_shared(); - if( !std::get< 2 >( res_pair ) ) - { - auto file = std::get< 0 >( res_pair ); - m_dirty.erase( file ); - m_jsonVals.erase( file ); - file.invalidate( ); - } + writable->written = true; + writable->abstractFilePosition = std::make_shared(); + } +} - std::string const dir( m_handler->directory ); - if( !auxiliary::directory_exists( dir ) ) - { - auto success = auxiliary::create_directories( dir ); - VERIFY( success, - "[JSON] Could not create directory." ); - } +void JSONIOHandlerImpl::createPath( + Writable *writable, Parameter const ¶meter) +{ + std::string path = parameter.path; + /* Sanitize: + * The JSON API does not like to have slashes in the end. + */ + if (auxiliary::ends_with(path, "/")) + { + path = auxiliary::replace_last(path, "/", ""); + } - associateWithFile( - writable, - shared_name - ); - this->m_dirty - .emplace( shared_name ); - // make sure to overwrite! - this->m_jsonVals[shared_name] = - std::make_shared< nlohmann::json >( ); + auto file = refreshFileFromParent(writable); + auto *jsonVal = &*obtainJsonContents(file); + if (!auxiliary::starts_with(path, "/")) + { // path is relative + auto filepos = setAndGetFilePosition(writable, false); - writable->written = true; - writable->abstractFilePosition = - std::make_shared< JSONFilePosition >( ); - } + jsonVal = &(*jsonVal)[filepos->id]; + ensurePath(jsonVal, path); + path = filepos->id.to_string() + "/" + path; + } + else + { + + ensurePath(jsonVal, path); } + m_dirty.emplace(file); + writable->written = true; + writable->abstractFilePosition = + std::make_shared(nlohmann::json::json_pointer(path)); +} - void JSONIOHandlerImpl::createPath( - Writable * writable, - Parameter< Operation::CREATE_PATH > const & parameter - ) +void JSONIOHandlerImpl::createDataset( + Writable *writable, Parameter const ¶meter) +{ + if (m_handler->m_backendAccess == Access::READ_ONLY) { - std::string path = parameter.path; - /* Sanitize: - * The JSON API does not like to have slashes in the end. - */ - if( auxiliary::ends_with( - path, - "/" - ) ) - { - path = auxiliary::replace_last( - path, - "/", - "" - ); - } + throw std::runtime_error( + "[JSON] Creating a dataset in a file opened as read only is not " + "possible."); + } + if (!writable->written) + { + /* Sanitize name */ + std::string name = removeSlashes(parameter.name); - auto file = refreshFileFromParent( writable ); - - auto * jsonVal = &*obtainJsonContents( file ); - if( !auxiliary::starts_with( - path, - "/" - ) ) - { // path is relative - auto filepos = setAndGetFilePosition( - writable, - false - ); - - jsonVal = &( *jsonVal )[filepos->id]; - ensurePath( - jsonVal, - path - ); - path = - filepos->id - .to_string( ) + "/" + path; + auto file = refreshFileFromParent(writable); + setAndGetFilePosition(writable); + auto &jsonVal = obtainJsonContents(writable); + // be sure to have a JSON object, not a list + if (jsonVal.empty()) + { + jsonVal = nlohmann::json::object(); } - else + setAndGetFilePosition(writable, name); + auto &dset = jsonVal[name]; + dset["datatype"] = datatypeToString(parameter.dtype); + switch (parameter.dtype) { - - ensurePath( - jsonVal, - path - ); + case Datatype::CFLOAT: + case Datatype::CDOUBLE: + case Datatype::CLONG_DOUBLE: { + auto complexExtent = parameter.extent; + complexExtent.push_back(2); + dset["data"] = initializeNDArray(complexExtent); + break; + } + default: + dset["data"] = initializeNDArray(parameter.extent); + break; } - - m_dirty.emplace( file ); writable->written = true; - writable->abstractFilePosition = - std::make_shared< JSONFilePosition >( nlohmann::json::json_pointer( path ) ); + m_dirty.emplace(file); } +} - - void JSONIOHandlerImpl::createDataset( - Writable * writable, - Parameter< Operation::CREATE_DATASET > const & parameter - ) +namespace +{ + void mergeInto(nlohmann::json &into, nlohmann::json &from); + void mergeInto(nlohmann::json &into, nlohmann::json &from) { - if(m_handler->m_backendAccess == Access::READ_ONLY ) + if (!from.is_array()) { - throw std::runtime_error( "[JSON] Creating a dataset in a file opened as read only is not possible." ); + into = from; // copy } - if( !writable->written ) + else { - /* Sanitize name */ - std::string name = removeSlashes( parameter.name ); - - auto file = refreshFileFromParent( writable ); - setAndGetFilePosition( writable ); - auto & jsonVal = obtainJsonContents( writable ); - // be sure to have a JSON object, not a list - if( jsonVal.empty( ) ) - { - jsonVal = nlohmann::json::object( ); - } - setAndGetFilePosition( - writable, - name - ); - auto & dset = jsonVal[name]; - dset["datatype"] = datatypeToString( parameter.dtype ); - switch( parameter.dtype ) + size_t size = from.size(); + for (size_t i = 0; i < size; ++i) { - case Datatype::CFLOAT: - case Datatype::CDOUBLE: - case Datatype::CLONG_DOUBLE: + if (!from[i].is_null()) { - auto complexExtent = parameter.extent; - complexExtent.push_back( 2 ); - dset["data"] = initializeNDArray( complexExtent ); - break; + mergeInto(into[i], from[i]); } - default: - dset["data"] = initializeNDArray( parameter.extent ); - break; } - writable->written = true; - m_dirty.emplace( file ); } } +} // namespace - namespace +void JSONIOHandlerImpl::extendDataset( + Writable *writable, Parameter const ¶meters) +{ + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[JSON] Cannot extend a dataset in read-only mode.") + setAndGetFilePosition(writable); + refreshFileFromParent(writable); + auto &j = obtainJsonContents(writable); + + try { - void - mergeInto( nlohmann::json & into, nlohmann::json & from ); - void - mergeInto( nlohmann::json & into, nlohmann::json & from ) + auto datasetExtent = getExtent(j); + VERIFY_ALWAYS( + datasetExtent.size() == parameters.extent.size(), + "[JSON] Cannot change dimensionality of a dataset") + for (size_t currentdim = 0; currentdim < parameters.extent.size(); + currentdim++) { - if( !from.is_array() ) - { - into = from; // copy - } - else - { - size_t size = from.size(); - for( size_t i = 0; i < size; ++i ) - { - if( !from[ i ].is_null() ) - { - mergeInto( into[ i ], from[ i ] ); - } - } - } + VERIFY_ALWAYS( + datasetExtent[currentdim] <= parameters.extent[currentdim], + "[JSON] Cannot shrink the extent of a dataset") } - } // namespace - - void - JSONIOHandlerImpl::extendDataset( - Writable * writable, - Parameter< Operation::EXTEND_DATASET > const & parameters ) + } + catch (json::basic_json::type_error &) { - VERIFY_ALWAYS( - m_handler->m_backendAccess != Access::READ_ONLY, - "[JSON] Cannot extend a dataset in read-only mode." ) - setAndGetFilePosition( writable ); - refreshFileFromParent( writable ); - auto & j = obtainJsonContents( writable ); + throw std::runtime_error( + "[JSON] The specified location contains no valid dataset"); + } + switch (stringToDatatype(j["datatype"].get())) + { + case Datatype::CFLOAT: + case Datatype::CDOUBLE: + case Datatype::CLONG_DOUBLE: { + // @todo test complex resizing + auto complexExtent = parameters.extent; + complexExtent.push_back(2); + nlohmann::json newData = initializeNDArray(complexExtent); + nlohmann::json &oldData = j["data"]; + mergeInto(newData, oldData); + j["data"] = newData; + break; + } + default: + nlohmann::json newData = initializeNDArray(parameters.extent); + nlohmann::json &oldData = j["data"]; + mergeInto(newData, oldData); + j["data"] = newData; + break; + } + writable->written = true; +} - try - { - auto datasetExtent = getExtent( j ); - VERIFY_ALWAYS( datasetExtent.size( ) == - parameters.extent - .size( ), - "[JSON] Cannot change dimensionality of a dataset" ) - for( size_t currentdim = 0; - currentdim < - parameters.extent - .size( ); - currentdim++ ) - { - VERIFY_ALWAYS( datasetExtent[currentdim] <= - parameters.extent[currentdim], - "[JSON] Cannot shrink the extent of a dataset" ) - } - } catch( json::basic_json::type_error & ) +namespace +{ + // pre-declare since this one is recursive + ChunkTable chunksInJSON(nlohmann::json const &); + ChunkTable chunksInJSON(nlohmann::json const &j) + { + /* + * Idea: + * Iterate (n-1)-dimensional hyperslabs line by line and query + * their chunks recursively. + * If two or more successive (n-1)-dimensional slabs return the + * same chunktable, they can be merged as one chunk. + * + * Notice that this approach is simple, relatively easily + * implemented, but not ideal, since chunks that overlap in some + * dimensions may be ripped apart: + * + * 0123 + * 0 ____ + * 1 ____ + * 2 **__ + * 3 **__ + * 4 **__ + * 5 **__ + * 6 **__ + * 7 **_* + * 8 ___* + * 9 ___* + * + * Since both of the drawn chunks overlap on line 7, this approach + * will return 4 chunks: + * offset - extent + * (2,0) - (4,2) + * (7,0) - (1,2) + * (7,3) - (1,1) + * (8,3) - (2,1) + * + * Hence, in a second phase, the mergeChunks function below will + * merge things back up. + */ + if (!j.is_array()) { - throw std::runtime_error( - "[JSON] The specified location contains no valid dataset" ); + return ChunkTable{WrittenChunkInfo(Offset{}, Extent{})}; } - switch( stringToDatatype( j[ "datatype" ].get< std::string >() ) ) + ChunkTable res; + size_t it = 0; + size_t end = j.size(); + while (it < end) { - case Datatype::CFLOAT: - case Datatype::CDOUBLE: - case Datatype::CLONG_DOUBLE: + // skip empty slots + while (it < end && j[it].is_null()) { - // @todo test complex resizing - auto complexExtent = parameters.extent; - complexExtent.push_back( 2 ); - nlohmann::json newData = initializeNDArray( complexExtent ); - nlohmann::json & oldData = j[ "data" ]; - mergeInto( newData, oldData ); - j[ "data" ] = newData; - break; + ++it; } - default: - nlohmann::json newData = initializeNDArray( parameters.extent ); - nlohmann::json & oldData = j[ "data" ]; - mergeInto( newData, oldData ); - j[ "data" ] = newData; - break; - } - writable->written = true; - } - - namespace - { - // pre-declare since this one is recursive - ChunkTable - chunksInJSON( nlohmann::json const & ); - ChunkTable - chunksInJSON( nlohmann::json const & j ) - { - /* - * Idea: - * Iterate (n-1)-dimensional hyperslabs line by line and query - * their chunks recursively. - * If two or more successive (n-1)-dimensional slabs return the - * same chunktable, they can be merged as one chunk. - * - * Notice that this approach is simple, relatively easily - * implemented, but not ideal, since chunks that overlap in some - * dimensions may be ripped apart: - * - * 0123 - * 0 ____ - * 1 ____ - * 2 **__ - * 3 **__ - * 4 **__ - * 5 **__ - * 6 **__ - * 7 **_* - * 8 ___* - * 9 ___* - * - * Since both of the drawn chunks overlap on line 7, this approach - * will return 4 chunks: - * offset - extent - * (2,0) - (4,2) - * (7,0) - (1,2) - * (7,3) - (1,1) - * (8,3) - (2,1) - * - * Hence, in a second phase, the mergeChunks function below will - * merge things back up. - */ - if( !j.is_array() ) + if (it == end) { - return ChunkTable{ WrittenChunkInfo( Offset{}, Extent{} ) }; + break; } - ChunkTable res; - size_t it = 0; - size_t end = j.size(); - while( it < end ) + // get chunking at current position + // and additionally, number of successive rows with the same + // recursive results + size_t const offset = it; + ChunkTable referenceTable = chunksInJSON(j[it]); + ++it; + for (; it < end; ++it) { - // skip empty slots - while( it < end && j[ it ].is_null() ) + if (j[it].is_null()) { - ++it; + break; } - if( it == end ) + ChunkTable currentTable = chunksInJSON(j[it]); + if (currentTable != referenceTable) { break; } - // get chunking at current position - // and additionally, number of successive rows with the same - // recursive results - size_t const offset = it; - ChunkTable referenceTable = chunksInJSON( j[ it ] ); - ++it; - for( ; it < end; ++it ) + } + size_t const extent = it - offset; // sic! no -1 + // now we know the number of successive rows with same rec. + // results, let's extend these results to include dimension 0 + for (auto const &chunk : referenceTable) + { + Offset o = {offset}; + Extent e = {extent}; + for (auto entry : chunk.offset) { - if( j[ it ].is_null() ) - { - break; - } - ChunkTable currentTable = chunksInJSON( j[ it ] ); - if( currentTable != referenceTable ) - { - break; - } + o.push_back(entry); } - size_t const extent = it - offset; // sic! no -1 - // now we know the number of successive rows with same rec. - // results, let's extend these results to include dimension 0 - for( auto const & chunk : referenceTable ) + for (auto entry : chunk.extent) { - Offset o = { offset }; - Extent e = { extent }; - for( auto entry : chunk.offset ) - { - o.push_back( entry ); - } - for( auto entry : chunk.extent ) - { - e.push_back( entry ); - } - res.emplace_back( - std::move( o ), std::move( e ), chunk.sourceID ); + e.push_back(entry); } + res.emplace_back(std::move(o), std::move(e), chunk.sourceID); } - return res; } + return res; + } + /* + * Check whether two chunks can be merged to form a large one + * and optionally return that larger chunk + */ + std::optional + mergeChunks(WrittenChunkInfo const &chunk1, WrittenChunkInfo const &chunk2) + { /* - * Check whether two chunks can be merged to form a large one - * and optionally return that larger chunk + * Idea: + * If two chunks can be merged into one, they agree on offsets and + * extents in all but exactly one dimension dim. + * At dimension dim, the offset of chunk 2 is equal to the offset + * of chunk 1 plus its extent -- or vice versa. */ - std::optional< WrittenChunkInfo > - mergeChunks( - WrittenChunkInfo const & chunk1, - WrittenChunkInfo const & chunk2 ) - { - /* - * Idea: - * If two chunks can be merged into one, they agree on offsets and - * extents in all but exactly one dimension dim. - * At dimension dim, the offset of chunk 2 is equal to the offset - * of chunk 1 plus its extent -- or vice versa. - */ - unsigned dimensionality = chunk1.extent.size(); - for( unsigned dim = 0; dim < dimensionality; ++dim ) + unsigned dimensionality = chunk1.extent.size(); + for (unsigned dim = 0; dim < dimensionality; ++dim) + { + WrittenChunkInfo const *c1(&chunk1), *c2(&chunk2); + // check if one chunk is the extension of the other at + // dimension dim + // first, let's put things in order + if (c1->offset[dim] > c2->offset[dim]) { - WrittenChunkInfo const *c1( &chunk1 ), *c2( &chunk2 ); - // check if one chunk is the extension of the other at - // dimension dim - // first, let's put things in order - if( c1->offset[ dim ] > c2->offset[ dim ] ) - { - std::swap( c1, c2 ); - } - // now, c1 begins at the lower of both offsets - // next check, that both chunks border one another exactly - if( c2->offset[ dim ] != c1->offset[ dim ] + c1->extent[ dim ] ) + std::swap(c1, c2); + } + // now, c1 begins at the lower of both offsets + // next check, that both chunks border one another exactly + if (c2->offset[dim] != c1->offset[dim] + c1->extent[dim]) + { + continue; + } + // we've got a candidate + // verify that all other dimensions have equal values + auto equalValues = [dimensionality, dim, c1, c2]() { + for (unsigned j = 0; j < dimensionality; ++j) { - continue; - } - // we've got a candidate - // verify that all other dimensions have equal values - auto equalValues = [ dimensionality, dim, c1, c2 ]() { - for( unsigned j = 0; j < dimensionality; ++j ) + if (j == dim) { - if( j == dim ) - { - continue; - } - if( c1->offset[ j ] != c2->offset[ j ] || - c1->extent[ j ] != c2->extent[ j ] ) - { - return false; - } + continue; + } + if (c1->offset[j] != c2->offset[j] || + c1->extent[j] != c2->extent[j]) + { + return false; } - return true; - }; - if( !equalValues() ) - { - continue; } - // we can merge the chunks - Offset offset( c1->offset ); - Extent extent( c1->extent ); - extent[ dim ] += c2->extent[ dim ]; - return std::make_optional( - WrittenChunkInfo( offset, extent ) ); + return true; + }; + if (!equalValues()) + { + continue; } - return std::optional< WrittenChunkInfo >(); + // we can merge the chunks + Offset offset(c1->offset); + Extent extent(c1->extent); + extent[dim] += c2->extent[dim]; + return std::make_optional(WrittenChunkInfo(offset, extent)); } + return std::optional(); + } - /* - * Merge chunks in the chunktable until no chunks are left that can be - * merged. - */ - void - mergeChunks( ChunkTable & table ) + /* + * Merge chunks in the chunktable until no chunks are left that can be + * merged. + */ + void mergeChunks(ChunkTable &table) + { + bool stillChanging; + do { - bool stillChanging; - do - { - stillChanging = false; - auto innerLoops = [ &table ]() { - /* - * Iterate over pairs of chunks in the table. - * When a pair that can be merged is found, merge it, - * delete the original two chunks from the table, - * put the new one in and return. - */ - for( auto i = table.begin(); i < table.end(); ++i ) + stillChanging = false; + auto innerLoops = [&table]() { + /* + * Iterate over pairs of chunks in the table. + * When a pair that can be merged is found, merge it, + * delete the original two chunks from the table, + * put the new one in and return. + */ + for (auto i = table.begin(); i < table.end(); ++i) + { + for (auto j = i + 1; j < table.end(); ++j) { - for( auto j = i + 1; j < table.end(); ++j ) + std::optional merged = + mergeChunks(*i, *j); + if (merged) { - std::optional< WrittenChunkInfo > merged = - mergeChunks( *i, *j ); - if( merged ) - { - // erase order is important due to iterator - // invalidation - table.erase( j ); - table.erase( i ); - table.emplace_back( - std::move( merged.value() ) ); - return true; - } + // erase order is important due to iterator + // invalidation + table.erase(j); + table.erase(i); + table.emplace_back(std::move(merged.value())); + return true; } } - return false; - }; - stillChanging = innerLoops(); - } while( stillChanging ); - } - } // namespace + } + return false; + }; + stillChanging = innerLoops(); + } while (stillChanging); + } +} // namespace - void - JSONIOHandlerImpl::availableChunks( - Writable * writable, - Parameter< Operation::AVAILABLE_CHUNKS > & parameters ) +void JSONIOHandlerImpl::availableChunks( + Writable *writable, Parameter ¶meters) +{ + refreshFileFromParent(writable); + auto filePosition = setAndGetFilePosition(writable); + auto &j = obtainJsonContents(writable)["data"]; + *parameters.chunks = chunksInJSON(j); + mergeChunks(*parameters.chunks); +} + +void JSONIOHandlerImpl::openFile( + Writable *writable, Parameter const ¶meter) +{ + if (!auxiliary::directory_exists(m_handler->directory)) { - refreshFileFromParent( writable ); - auto filePosition = setAndGetFilePosition( writable ); - auto & j = obtainJsonContents( writable )[ "data" ]; - *parameters.chunks = chunksInJSON( j ); - mergeChunks( *parameters.chunks ); + throw no_such_file_error( + "[JSON] Supplied directory is not valid: " + m_handler->directory); } - void JSONIOHandlerImpl::openFile( - Writable * writable, - Parameter< Operation::OPEN_FILE > const & parameter - ) + std::string name = parameter.name; + if (!auxiliary::ends_with(name, ".json")) { - if( !auxiliary::directory_exists( m_handler->directory ) ) - { - throw no_such_file_error( - "[JSON] Supplied directory is not valid: " + m_handler->directory - ); - } + name += ".json"; + } - std::string name = parameter.name; - if( !auxiliary::ends_with( - name, - ".json" - ) ) - { - name += ".json"; - } + auto file = std::get<0>(getPossiblyExisting(name)); - auto file = std::get< 0 >( getPossiblyExisting( name ) ); + associateWithFile(writable, file); - associateWithFile( - writable, - file - ); + writable->written = true; + writable->abstractFilePosition = std::make_shared(); +} - writable->written = true; - writable->abstractFilePosition = - std::make_shared< JSONFilePosition >( ); +void JSONIOHandlerImpl::closeFile( + Writable *writable, Parameter const &) +{ + auto fileIterator = m_files.find(writable); + if (fileIterator != m_files.end()) + { + putJsonContents(fileIterator->second); + // do not invalidate the file + // it still exists, it is just not open + m_files.erase(fileIterator); } +} +void JSONIOHandlerImpl::openPath( + Writable *writable, Parameter const ¶meters) +{ + auto file = refreshFileFromParent(writable); + + nlohmann::json *j = &obtainJsonContents(writable->parent); + auto path = removeSlashes(parameters.path); + path = path.empty() ? filepositionOf(writable->parent) + : filepositionOf(writable->parent) + "/" + path; - void JSONIOHandlerImpl::closeFile( - Writable * writable, - Parameter< Operation::CLOSE_FILE > const & - ) + if (writable->abstractFilePosition) { - auto fileIterator = m_files.find( writable ); - if ( fileIterator != m_files.end( ) ) - { - putJsonContents( fileIterator->second ); - // do not invalidate the file - // it still exists, it is just not open - m_files.erase( fileIterator ); - } + *setAndGetFilePosition(writable, false) = + JSONFilePosition(json::json_pointer(path)); } - - - void JSONIOHandlerImpl::openPath( - Writable * writable, - Parameter< Operation::OPEN_PATH > const & parameters - ) + else { - auto file = refreshFileFromParent( writable ); - - nlohmann::json * j = &obtainJsonContents( writable->parent ); - auto path = removeSlashes( parameters.path ); - path = - path.empty( ) - ? filepositionOf( writable->parent ) - : filepositionOf( writable->parent ) + "/" + path; + writable->abstractFilePosition = + std::make_shared(json::json_pointer(path)); + } - if( writable->abstractFilePosition ) - { - *setAndGetFilePosition( - writable, - false - ) = JSONFilePosition( json::json_pointer( path ) ); - } - else - { - writable->abstractFilePosition = - std::make_shared< JSONFilePosition >( json::json_pointer( path ) ); - } + ensurePath(j, removeSlashes(parameters.path)); - ensurePath( - j, - removeSlashes( parameters.path ) - ); + writable->written = true; +} - writable->written = true; +void JSONIOHandlerImpl::openDataset( + Writable *writable, Parameter ¶meters) +{ + refreshFileFromParent(writable); + auto name = removeSlashes(parameters.name); + auto &datasetJson = obtainJsonContents(writable->parent)[name]; + /* + * If the dataset has been opened previously, the path needs not be + * set again. + */ + if (!writable->abstractFilePosition) + { + setAndGetFilePosition(writable, name); } + *parameters.dtype = + Datatype(stringToDatatype(datasetJson["datatype"].get())); + *parameters.extent = getExtent(datasetJson); + writable->written = true; +} - void JSONIOHandlerImpl::openDataset( - Writable * writable, - Parameter< Operation::OPEN_DATASET > & parameters - ) - { - refreshFileFromParent( writable ); - auto name = removeSlashes( parameters.name ); - auto & datasetJson = obtainJsonContents( writable->parent )[name]; - /* - * If the dataset has been opened previously, the path needs not be - * set again. - */ - if(! writable->abstractFilePosition ) - { - setAndGetFilePosition( - writable, - name - ); - } +void JSONIOHandlerImpl::deleteFile( + Writable *writable, Parameter const ¶meters) +{ + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[JSON] Cannot delete files in read-only mode") - *parameters.dtype = - Datatype( stringToDatatype( datasetJson["datatype"].get< std::string >( ) ) ); - *parameters.extent = getExtent( datasetJson ); - writable->written = true; + if (!writable->written) + { + return; } + auto filename = auxiliary::ends_with(parameters.name, ".json") + ? parameters.name + : parameters.name + ".json"; - void JSONIOHandlerImpl::deleteFile( - Writable * writable, - Parameter< Operation::DELETE_FILE > const & parameters - ) + auto tuple = getPossiblyExisting(filename); + if (!std::get<2>(tuple)) { - VERIFY_ALWAYS(m_handler->m_backendAccess != Access::READ_ONLY, - "[JSON] Cannot delete files in read-only mode" ) - - if( !writable->written ) - { - return; - } + // file is already in the system + auto file = std::get<0>(tuple); + m_dirty.erase(file); + m_jsonVals.erase(file); + file.invalidate(); + } - auto filename = auxiliary::ends_with( - parameters.name, - ".json" - ) ? parameters.name : parameters.name + ".json"; + std::remove(fullPath(filename).c_str()); - auto tuple = getPossiblyExisting( filename ); - if( !std::get< 2 >( tuple ) ) - { - // file is already in the system - auto file = std::get< 0 >( tuple ); - m_dirty.erase( file ); - m_jsonVals.erase( file ); - file.invalidate( ); - } + writable->written = false; +} - std::remove( fullPath( filename ).c_str( ) ); +void JSONIOHandlerImpl::deletePath( + Writable *writable, Parameter const ¶meters) +{ + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[JSON] Cannot delete paths in read-only mode") - writable->written = false; + if (!writable->written) + { + return; } - - void JSONIOHandlerImpl::deletePath( - Writable * writable, - Parameter< Operation::DELETE_PATH > const & parameters - ) + VERIFY_ALWAYS( + !auxiliary::starts_with(parameters.path, '/'), + "[JSON] Paths passed for deletion should be relative, the given path " + "is absolute (starts with '/')") + auto file = refreshFileFromParent(writable); + auto filepos = setAndGetFilePosition(writable, false); + auto path = removeSlashes(parameters.path); + VERIFY(!path.empty(), "[JSON] No path passed for deletion.") + nlohmann::json *j; + if (path == ".") { - VERIFY_ALWAYS(m_handler->m_backendAccess != Access::READ_ONLY, - "[JSON] Cannot delete paths in read-only mode" ) - - if( !writable->written ) + auto s = filepos->id.to_string(); + if (s == "/") { - return; + throw std::runtime_error("[JSON] Cannot delete the root group"); } - VERIFY_ALWAYS( !auxiliary::starts_with( - parameters.path, - '/' - ), - "[JSON] Paths passed for deletion should be relative, the given path is absolute (starts with '/')" ) - auto file = refreshFileFromParent( writable ); - auto filepos = setAndGetFilePosition( - writable, - false - ); - auto path = removeSlashes( parameters.path ); - VERIFY( !path.empty( ), - "[JSON] No path passed for deletion." ) - nlohmann::json * j; - if( path == "." ) - { - auto - s = - filepos->id - .to_string( ); - if( s == "/" ) - { - throw std::runtime_error( "[JSON] Cannot delete the root group" ); - } + auto i = s.rfind('/'); + path = s; + path.replace(0, i + 1, ""); + // path should now be equal to the name of the current group + // go up one group - auto i = s.rfind( '/' ); - path = s; - path.replace( - 0, - i + 1, - "" - ); - // path should now be equal to the name of the current group - // go up one group - - // go to parent directory - // parent exists since we have verified that the current - // directory is != root - parentDir( s ); - j = - &( *obtainJsonContents( file ) )[nlohmann::json::json_pointer( s )]; - } - else - { - if( auxiliary::starts_with( - path, - "./" - ) ) - { - path = auxiliary::replace_first( - path, - "./", - "" - ); - } - j = &obtainJsonContents( writable ); - } - nlohmann::json * lastPointer = j; - bool needToDelete = true; - auto splitPath = auxiliary::split( - path, - "/" - ); - // be careful not to create the group by accident - // the loop will execute at least once - for( auto const & folder: splitPath ) - { - auto it = j->find( folder ); - if( it == j->end( ) ) - { - needToDelete = false; - break; - } - else - { - lastPointer = j; - j = &it.value( ); - } - } - if( needToDelete ) - { - lastPointer->erase( - splitPath[splitPath.size( ) - 1] - ); - } - - putJsonContents( file ); - writable->abstractFilePosition - .reset( ); - writable->written = false; + // go to parent directory + // parent exists since we have verified that the current + // directory is != root + parentDir(s); + j = &(*obtainJsonContents(file))[nlohmann::json::json_pointer(s)]; } - - - void JSONIOHandlerImpl::deleteDataset( - Writable * writable, - Parameter< Operation::DELETE_DATASET > const & parameters - ) + else { - VERIFY_ALWAYS(m_handler->m_backendAccess != Access::READ_ONLY, - "[JSON] Cannot delete datasets in read-only mode" ) - - if( !writable->written ) + if (auxiliary::starts_with(path, "./")) { - return; + path = auxiliary::replace_first(path, "./", ""); } - - auto filepos = setAndGetFilePosition( - writable, - false - ); - - auto file = refreshFileFromParent( writable ); - auto dataset = removeSlashes( parameters.name ); - nlohmann::json * parent; - if( dataset == "." ) + j = &obtainJsonContents(writable); + } + nlohmann::json *lastPointer = j; + bool needToDelete = true; + auto splitPath = auxiliary::split(path, "/"); + // be careful not to create the group by accident + // the loop will execute at least once + for (auto const &folder : splitPath) + { + auto it = j->find(folder); + if (it == j->end()) { - auto - s = - filepos->id - .to_string( ); - if( s.empty( ) ) - { - throw std::runtime_error( "[JSON] Invalid position for a dataset in the JSON file." ); - } - dataset = s; - auto i = dataset.rfind( '/' ); - dataset.replace( - 0, - i + 1, - "" - ); - - parentDir( s ); - parent = - &( *obtainJsonContents( file ) )[nlohmann::json::json_pointer( s )]; + needToDelete = false; + break; } else { - parent = &obtainJsonContents( writable ); + lastPointer = j; + j = &it.value(); } - parent->erase( dataset ); - putJsonContents( file ); - writable->written = false; - writable->abstractFilePosition - .reset( ); } - - - void JSONIOHandlerImpl::deleteAttribute( - Writable * writable, - Parameter< Operation::DELETE_ATT > const & parameters - ) + if (needToDelete) { - VERIFY_ALWAYS(m_handler->m_backendAccess != Access::READ_ONLY, - "[JSON] Cannot delete attributes in read-only mode" ) - if( !writable->written ) - { - return; - } - setAndGetFilePosition( writable ); - auto file = refreshFileFromParent( writable ); - auto & j = obtainJsonContents( writable ); - j.erase( parameters.name ); - putJsonContents( file ); + lastPointer->erase(splitPath[splitPath.size() - 1]); } + putJsonContents(file); + writable->abstractFilePosition.reset(); + writable->written = false; +} - void JSONIOHandlerImpl::writeDataset( - Writable * writable, - Parameter< Operation::WRITE_DATASET > const & parameters - ) - { - VERIFY_ALWAYS(m_handler->m_backendAccess != Access::READ_ONLY, - "[JSON] Cannot write data in read-only mode." ); - - auto pos = setAndGetFilePosition( writable ); - auto file = refreshFileFromParent( writable ); - auto & j = obtainJsonContents( writable ); - - verifyDataset( - parameters, - j - ); - - switchType< DatasetWriter >( parameters.dtype, j, parameters ); +void JSONIOHandlerImpl::deleteDataset( + Writable *writable, Parameter const ¶meters) +{ + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[JSON] Cannot delete datasets in read-only mode") - writable->written = true; - putJsonContents( file ); + if (!writable->written) + { + return; } + auto filepos = setAndGetFilePosition(writable, false); - void JSONIOHandlerImpl::writeAttribute( - Writable * writable, - Parameter< Operation::WRITE_ATT > const & parameter - ) + auto file = refreshFileFromParent(writable); + auto dataset = removeSlashes(parameters.name); + nlohmann::json *parent; + if (dataset == ".") { - if(m_handler->m_backendAccess == Access::READ_ONLY ) + auto s = filepos->id.to_string(); + if (s.empty()) { - throw std::runtime_error( "[JSON] Creating a dataset in a file opened as read only is not possible." ); + throw std::runtime_error( + "[JSON] Invalid position for a dataset in the JSON file."); } + dataset = s; + auto i = dataset.rfind('/'); + dataset.replace(0, i + 1, ""); - /* Sanitize name */ - std::string name = removeSlashes( parameter.name ); - - auto file = refreshFileFromParent( writable ); - auto jsonVal = obtainJsonContents( file ); - auto filePosition = setAndGetFilePosition( writable ); - if( ( *jsonVal )[filePosition->id]["attributes"].empty( ) ) - { - ( *jsonVal )[filePosition->id]["attributes"] = - nlohmann::json::object( ); - } - nlohmann::json value; - switchType< AttributeWriter >( - parameter.dtype, value, parameter.resource ); - ( *jsonVal )[filePosition->id]["attributes"][parameter.name] = { - { - "datatype", - datatypeToString( parameter.dtype ) - }, - { - "value", - value - } - }; - writable->written = true; - m_dirty.emplace( file ); + parentDir(s); + parent = &(*obtainJsonContents(file))[nlohmann::json::json_pointer(s)]; } - - - void JSONIOHandlerImpl::readDataset( - Writable * writable, - Parameter< Operation::READ_DATASET > & parameters - ) + else { - refreshFileFromParent( writable ); - setAndGetFilePosition( writable ); - auto & j = obtainJsonContents( writable ); - verifyDataset( - parameters, - j - ); - - try - { - switchType< DatasetReader >( - parameters.dtype, j[ "data" ], parameters ); - } catch( json::basic_json::type_error & ) - { - throw std::runtime_error( "[JSON] The given path does not contain a valid dataset." ); - } + parent = &obtainJsonContents(writable); + } + parent->erase(dataset); + putJsonContents(file); + writable->written = false; + writable->abstractFilePosition.reset(); +} + +void JSONIOHandlerImpl::deleteAttribute( + Writable *writable, Parameter const ¶meters) +{ + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[JSON] Cannot delete attributes in read-only mode") + if (!writable->written) + { + return; } + setAndGetFilePosition(writable); + auto file = refreshFileFromParent(writable); + auto &j = obtainJsonContents(writable); + j.erase(parameters.name); + putJsonContents(file); +} + +void JSONIOHandlerImpl::writeDataset( + Writable *writable, Parameter const ¶meters) +{ + VERIFY_ALWAYS( + m_handler->m_backendAccess != Access::READ_ONLY, + "[JSON] Cannot write data in read-only mode."); + auto pos = setAndGetFilePosition(writable); + auto file = refreshFileFromParent(writable); + auto &j = obtainJsonContents(writable); - void JSONIOHandlerImpl::readAttribute( - Writable * writable, - Parameter< Operation::READ_ATT > & parameters - ) - { - VERIFY_ALWAYS( writable->written, - "[JSON] Attributes have to be written before reading." ) - refreshFileFromParent( writable ); - auto name = removeSlashes( parameters.name ); - auto & jsonLoc = obtainJsonContents( writable )["attributes"]; - setAndGetFilePosition( writable ); - std::string error_msg("[JSON] No such attribute '"); - error_msg.append(name) - .append("' in the given location '") - .append(jsonLoc.dump()) - .append("'."); - VERIFY_ALWAYS( hasKey( - jsonLoc, - name - ), error_msg ) - auto & j = jsonLoc[name]; - try - { - *parameters.dtype = - Datatype( stringToDatatype( j["datatype"].get< std::string >( ) ) ); - switchType< AttributeReader >( - *parameters.dtype, j[ "value" ], parameters ); - } catch( json::type_error & ) - { - throw std::runtime_error( "[JSON] The given location does not contain a properly formatted attribute" ); - } - } + verifyDataset(parameters, j); + switchType(parameters.dtype, j, parameters); - void JSONIOHandlerImpl::listPaths( - Writable * writable, - Parameter< Operation::LIST_PATHS > & parameters - ) + writable->written = true; + putJsonContents(file); +} + +void JSONIOHandlerImpl::writeAttribute( + Writable *writable, Parameter const ¶meter) +{ + if (m_handler->m_backendAccess == Access::READ_ONLY) { - VERIFY_ALWAYS( writable->written, - "[JSON] Values have to be written before reading a directory" ); - auto & j = obtainJsonContents( writable ); - setAndGetFilePosition( writable ); - refreshFileFromParent( writable ); - parameters.paths - ->clear( ); - for( auto it = j.begin( ); it != j.end( ); it++ ) - { - if( isGroup( it ) ) - { - parameters.paths - ->push_back( it.key( ) ); - } - } + throw std::runtime_error( + "[JSON] Creating a dataset in a file opened as read only is not " + "possible."); } + /* Sanitize name */ + std::string name = removeSlashes(parameter.name); - void JSONIOHandlerImpl::listDatasets( - Writable * writable, - Parameter< Operation::LIST_DATASETS > & parameters - ) + auto file = refreshFileFromParent(writable); + auto jsonVal = obtainJsonContents(file); + auto filePosition = setAndGetFilePosition(writable); + if ((*jsonVal)[filePosition->id]["attributes"].empty()) { - VERIFY_ALWAYS( writable->written, - "[JSON] Datasets have to be written before reading." ) - refreshFileFromParent( writable ); - auto filePosition = setAndGetFilePosition( writable ); - auto & j = obtainJsonContents( writable ); - parameters.datasets - ->clear( ); - for( auto it = j.begin( ); it != j.end( ); it++ ) - { - if( isDataset( it.value() ) ) - { - parameters.datasets - ->push_back( it.key( ) ); - } - } + (*jsonVal)[filePosition->id]["attributes"] = nlohmann::json::object(); } + nlohmann::json value; + switchType(parameter.dtype, value, parameter.resource); + (*jsonVal)[filePosition->id]["attributes"][parameter.name] = { + {"datatype", datatypeToString(parameter.dtype)}, {"value", value}}; + writable->written = true; + m_dirty.emplace(file); +} + +void JSONIOHandlerImpl::readDataset( + Writable *writable, Parameter ¶meters) +{ + refreshFileFromParent(writable); + setAndGetFilePosition(writable); + auto &j = obtainJsonContents(writable); + verifyDataset(parameters, j); - - void JSONIOHandlerImpl::listAttributes( - Writable * writable, - Parameter< Operation::LIST_ATTS > & parameters - ) + try { - VERIFY_ALWAYS( writable->written, - "[JSON] Attributes have to be written before reading." ) - refreshFileFromParent( writable ); - auto filePosition = setAndGetFilePosition( writable ); - auto & j = obtainJsonContents( writable )["attributes"]; - for( auto it = j.begin( ); it != j.end( ); it++ ) - { - parameters.attributes - ->push_back( it.key( ) ); - } + switchType(parameters.dtype, j["data"], parameters); } - - - std::shared_ptr< JSONIOHandlerImpl::FILEHANDLE > - JSONIOHandlerImpl::getFilehandle( - File fileName, - Access access - ) + catch (json::basic_json::type_error &) { - VERIFY_ALWAYS( fileName.valid( ), - "[JSON] Tried opening a file that has been overwritten or deleted." ) - auto path = fullPath( std::move( fileName ) ); - auto fs = std::make_shared< std::fstream >( ); - switch( access ) - { - case Access::CREATE: - case Access::READ_WRITE: - fs->open( - path, - std::ios_base::out | std::ios_base::trunc - ); - break; - case Access::READ_ONLY: - fs->open( - path, - std::ios_base::in - ); - break; - } - VERIFY( fs->good( ), - "[JSON] Failed opening a file" ); - return fs; + throw std::runtime_error( + "[JSON] The given path does not contain a valid dataset."); } +} - - std::string JSONIOHandlerImpl::fullPath( File fileName ) +void JSONIOHandlerImpl::readAttribute( + Writable *writable, Parameter ¶meters) +{ + VERIFY_ALWAYS( + writable->written, + "[JSON] Attributes have to be written before reading.") + refreshFileFromParent(writable); + auto name = removeSlashes(parameters.name); + auto &jsonLoc = obtainJsonContents(writable)["attributes"]; + setAndGetFilePosition(writable); + std::string error_msg("[JSON] No such attribute '"); + error_msg.append(name) + .append("' in the given location '") + .append(jsonLoc.dump()) + .append("'."); + VERIFY_ALWAYS(hasKey(jsonLoc, name), error_msg) + auto &j = jsonLoc[name]; + try { - return fullPath( *fileName ); + *parameters.dtype = + Datatype(stringToDatatype(j["datatype"].get())); + switchType(*parameters.dtype, j["value"], parameters); } + catch (json::type_error &) + { + throw std::runtime_error( + "[JSON] The given location does not contain a properly formatted " + "attribute"); + } +} - - std::string JSONIOHandlerImpl::fullPath( std::string const & fileName ) +void JSONIOHandlerImpl::listPaths( + Writable *writable, Parameter ¶meters) +{ + VERIFY_ALWAYS( + writable->written, + "[JSON] Values have to be written before reading a directory"); + auto &j = obtainJsonContents(writable); + setAndGetFilePosition(writable); + refreshFileFromParent(writable); + parameters.paths->clear(); + for (auto it = j.begin(); it != j.end(); it++) { - if( auxiliary::ends_with( - m_handler->directory, - "/" - ) ) - { - return m_handler->directory + fileName; - } - else + if (isGroup(it)) { - return m_handler->directory + "/" + fileName; + parameters.paths->push_back(it.key()); } } +} - - void JSONIOHandlerImpl::parentDir( std::string & s ) +void JSONIOHandlerImpl::listDatasets( + Writable *writable, Parameter ¶meters) +{ + VERIFY_ALWAYS( + writable->written, "[JSON] Datasets have to be written before reading.") + refreshFileFromParent(writable); + auto filePosition = setAndGetFilePosition(writable); + auto &j = obtainJsonContents(writable); + parameters.datasets->clear(); + for (auto it = j.begin(); it != j.end(); it++) { - auto i = s.rfind( '/' ); - if( i != std::string::npos ) + if (isDataset(it.value())) { - s.replace( - i, - s.size( ) - i, - "" - ); - s.shrink_to_fit( ); + parameters.datasets->push_back(it.key()); } } +} - - std::string JSONIOHandlerImpl::filepositionOf( Writable * writable ) +void JSONIOHandlerImpl::listAttributes( + Writable *writable, Parameter ¶meters) +{ + VERIFY_ALWAYS( + writable->written, + "[JSON] Attributes have to be written before reading.") + refreshFileFromParent(writable); + auto filePosition = setAndGetFilePosition(writable); + auto &j = obtainJsonContents(writable)["attributes"]; + for (auto it = j.begin(); it != j.end(); it++) { - return std::dynamic_pointer_cast< JSONFilePosition >( writable->abstractFilePosition )->id - .to_string( ); + parameters.attributes->push_back(it.key()); } +} - - template< - typename T, - typename Visitor - > - void JSONIOHandlerImpl::syncMultidimensionalJson( - nlohmann::json & j, - Offset const & offset, - Extent const & extent, - Extent const & multiplicator, - Visitor visitor, - T * data, - size_t currentdim - ) +std::shared_ptr +JSONIOHandlerImpl::getFilehandle(File fileName, Access access) +{ + VERIFY_ALWAYS( + fileName.valid(), + "[JSON] Tried opening a file that has been overwritten or deleted.") + auto path = fullPath(std::move(fileName)); + auto fs = std::make_shared(); + switch (access) { - // Offset only relevant for JSON, the array data is contiguous - auto off = offset[currentdim]; - // maybe rewrite iteratively, using a stack that stores for each level the - // current iteration value i - - if( currentdim == offset.size( ) - 1 ) - { - for( std::size_t i = 0; i < extent[currentdim]; ++i ) - { - visitor( - j[i + off], - data[i] - ); - } - } - else - { - for( std::size_t i = 0; i < extent[currentdim]; ++i ) - { - syncMultidimensionalJson< - T, - Visitor - >( - j[i + off], - offset, - extent, - multiplicator, - visitor, - data + i * multiplicator[currentdim], - currentdim + 1 - ); - } - } + case Access::CREATE: + case Access::READ_WRITE: + fs->open(path, std::ios_base::out | std::ios_base::trunc); + break; + case Access::READ_ONLY: + fs->open(path, std::ios_base::in); + break; } + VERIFY(fs->good(), "[JSON] Failed opening a file"); + return fs; +} +std::string JSONIOHandlerImpl::fullPath(File fileName) +{ + return fullPath(*fileName); +} - // multiplicators: an array [m_0,...,m_n] s.t. - // data[i_0]...[i_n] = data[m_0*i_0+...+m_n*i_n] - // (m_n = 1) - Extent JSONIOHandlerImpl::getMultiplicators( Extent const & extent ) +std::string JSONIOHandlerImpl::fullPath(std::string const &fileName) +{ + if (auxiliary::ends_with(m_handler->directory, "/")) { - Extent res( extent ); - Extent::value_type n = 1; - size_t i = extent.size( ); - do - { - --i; - res[i] = n; - n *= extent[i]; - } - while( i > 0 ); - return res; + return m_handler->directory + fileName; } + else + { + return m_handler->directory + "/" + fileName; + } +} - - nlohmann::json JSONIOHandlerImpl::initializeNDArray( Extent const & extent ) +void JSONIOHandlerImpl::parentDir(std::string &s) +{ + auto i = s.rfind('/'); + if (i != std::string::npos) { - // idea: begin from the innermost shale and copy the result into the - // outer shales - nlohmann::json accum; - nlohmann::json old; - auto * accum_ptr = & accum; - auto * old_ptr = & old; - for( auto it = extent.rbegin( ); it != extent.rend( ); it++ ) - { - std::swap(old_ptr, accum_ptr); - *accum_ptr = nlohmann::json {}; - for( Extent::value_type i = 0; i < *it; i++ ) - { - (*accum_ptr)[i] = *old_ptr; // copy boi - } - } - return *accum_ptr; + s.replace(i, s.size() - i, ""); + s.shrink_to_fit(); } +} +std::string JSONIOHandlerImpl::filepositionOf(Writable *writable) +{ + return std::dynamic_pointer_cast( + writable->abstractFilePosition) + ->id.to_string(); +} + +template +void JSONIOHandlerImpl::syncMultidimensionalJson( + nlohmann::json &j, + Offset const &offset, + Extent const &extent, + Extent const &multiplicator, + Visitor visitor, + T *data, + size_t currentdim) +{ + // Offset only relevant for JSON, the array data is contiguous + auto off = offset[currentdim]; + // maybe rewrite iteratively, using a stack that stores for each level the + // current iteration value i - Extent JSONIOHandlerImpl::getExtent( nlohmann::json & j ) + if (currentdim == offset.size() - 1) { - Extent res; - nlohmann::json * ptr = &j["data"]; - while( ptr->is_array( ) ) + for (std::size_t i = 0; i < extent[currentdim]; ++i) { - res.push_back( ptr->size( ) ); - ptr = &( *ptr )[0]; + visitor(j[i + off], data[i]); } - switch( stringToDatatype( j["datatype"].get() ) ) + } + else + { + for (std::size_t i = 0; i < extent[currentdim]; ++i) { - case Datatype::CFLOAT: - case Datatype::CDOUBLE: - case Datatype::CLONG_DOUBLE: - // the last "dimension" is only the two entries for the complex - // number, so remove that again - res.erase( res.end() - 1 ); - break; - default: - break; + syncMultidimensionalJson( + j[i + off], + offset, + extent, + multiplicator, + visitor, + data + i * multiplicator[currentdim], + currentdim + 1); } - return res; } +} - - std::string JSONIOHandlerImpl::removeSlashes( std::string s ) +// multiplicators: an array [m_0,...,m_n] s.t. +// data[i_0]...[i_n] = data[m_0*i_0+...+m_n*i_n] +// (m_n = 1) +Extent JSONIOHandlerImpl::getMultiplicators(Extent const &extent) +{ + Extent res(extent); + Extent::value_type n = 1; + size_t i = extent.size(); + do { - if( auxiliary::starts_with( - s, - '/' - ) ) - { - s = auxiliary::replace_first( - s, - "/", - "" - ); - } - if( auxiliary::ends_with( - s, - '/' - ) ) + --i; + res[i] = n; + n *= extent[i]; + } while (i > 0); + return res; +} + +nlohmann::json JSONIOHandlerImpl::initializeNDArray(Extent const &extent) +{ + // idea: begin from the innermost shale and copy the result into the + // outer shales + nlohmann::json accum; + nlohmann::json old; + auto *accum_ptr = &accum; + auto *old_ptr = &old; + for (auto it = extent.rbegin(); it != extent.rend(); it++) + { + std::swap(old_ptr, accum_ptr); + *accum_ptr = nlohmann::json{}; + for (Extent::value_type i = 0; i < *it; i++) { - s = auxiliary::replace_last( - s, - "/", - "" - ); + (*accum_ptr)[i] = *old_ptr; // copy boi } - return s; } + return *accum_ptr; +} - - template< typename KeyT > - bool JSONIOHandlerImpl::hasKey( - nlohmann::json & j, - KeyT && key - ) +Extent JSONIOHandlerImpl::getExtent(nlohmann::json &j) +{ + Extent res; + nlohmann::json *ptr = &j["data"]; + while (ptr->is_array()) { - return j.find( std::forward< KeyT >( key ) ) != j.end( ); + res.push_back(ptr->size()); + ptr = &(*ptr)[0]; } - - - void JSONIOHandlerImpl::ensurePath( - nlohmann::json * jsonp, - std::string path - ) + switch (stringToDatatype(j["datatype"].get())) { - auto groups = auxiliary::split( - path, - "/" - ); - for( std::string & group: groups ) - { - // Enforce a JSON object - // the library will automatically create a list if the first - // key added to it is parseable as an int - jsonp = &( *jsonp )[group]; - if (jsonp->is_null()) - { - *jsonp = nlohmann::json::object(); - } - } + case Datatype::CFLOAT: + case Datatype::CDOUBLE: + case Datatype::CLONG_DOUBLE: + // the last "dimension" is only the two entries for the complex + // number, so remove that again + res.erase(res.end() - 1); + break; + default: + break; } + return res; +} - - std::tuple< - File, - std::unordered_map< - Writable *, - File - >::iterator, - bool - > JSONIOHandlerImpl::getPossiblyExisting( std::string file ) +std::string JSONIOHandlerImpl::removeSlashes(std::string s) +{ + if (auxiliary::starts_with(s, '/')) { + s = auxiliary::replace_first(s, "/", ""); + } + if (auxiliary::ends_with(s, '/')) + { + s = auxiliary::replace_last(s, "/", ""); + } + return s; +} - auto it = std::find_if( - m_files.begin( ), - m_files.end( ), - [file]( - std::unordered_map< - Writable *, - File - >::value_type const & entry - ) - { - return *entry.second == file && - entry.second - .valid( ); - } - ); +template +bool JSONIOHandlerImpl::hasKey(nlohmann::json &j, KeyT &&key) +{ + return j.find(std::forward(key)) != j.end(); +} - bool newlyCreated; - File name; - if( it == m_files.end( ) ) - { - name = file; - newlyCreated = true; - } - else - { - name = it->second; - newlyCreated = false; - } - return std::tuple< - File, - std::unordered_map< - Writable *, - File - >::iterator, - bool - >( - std::move( name ), - it, - newlyCreated - ); - } - - - std::shared_ptr< nlohmann::json > - JSONIOHandlerImpl::obtainJsonContents( File file ) - { - VERIFY_ALWAYS( file.valid( ), - "[JSON] File has been overwritten or deleted before reading" ); - auto it = m_jsonVals.find( file ); - if( it != m_jsonVals.end( ) ) +void JSONIOHandlerImpl::ensurePath(nlohmann::json *jsonp, std::string path) +{ + auto groups = auxiliary::split(path, "/"); + for (std::string &group : groups) + { + // Enforce a JSON object + // the library will automatically create a list if the first + // key added to it is parseable as an int + jsonp = &(*jsonp)[group]; + if (jsonp->is_null()) { - return it->second; + *jsonp = nlohmann::json::object(); } - // read from file - auto fh = getFilehandle( - file, - Access::READ_ONLY - ); - std::shared_ptr< nlohmann::json > - res = std::make_shared< nlohmann::json >( ); - *fh >> *res; - VERIFY( fh->good( ), - "[JSON] Failed reading from a file." ); - m_jsonVals.emplace( - file, - res - ); - return res; } +} +std::tuple::iterator, bool> +JSONIOHandlerImpl::getPossiblyExisting(std::string file) +{ - nlohmann::json & - JSONIOHandlerImpl::obtainJsonContents( Writable * writable ) + auto it = std::find_if( + m_files.begin(), + m_files.end(), + [file](std::unordered_map::value_type const &entry) { + return *entry.second == file && entry.second.valid(); + }); + + bool newlyCreated; + File name; + if (it == m_files.end()) { - auto file = refreshFileFromParent( writable ); - auto filePosition = setAndGetFilePosition( - writable, - false - ); - return ( *obtainJsonContents( file ) )[filePosition->id]; + name = file; + newlyCreated = true; } + else + { + name = it->second; + newlyCreated = false; + } + return std:: + tuple::iterator, bool>( + std::move(name), it, newlyCreated); +} - - void JSONIOHandlerImpl::putJsonContents( - File filename, - bool unsetDirty // = true - ) +std::shared_ptr JSONIOHandlerImpl::obtainJsonContents(File file) +{ + VERIFY_ALWAYS( + file.valid(), + "[JSON] File has been overwritten or deleted before reading"); + auto it = m_jsonVals.find(file); + if (it != m_jsonVals.end()) { - VERIFY_ALWAYS( filename.valid( ), - "[JSON] File has been overwritten/deleted before writing" ); - auto it = m_jsonVals.find( filename ); - if( it != m_jsonVals.end( ) ) + return it->second; + } + // read from file + auto fh = getFilehandle(file, Access::READ_ONLY); + std::shared_ptr res = std::make_shared(); + *fh >> *res; + VERIFY(fh->good(), "[JSON] Failed reading from a file."); + m_jsonVals.emplace(file, res); + return res; +} + +nlohmann::json &JSONIOHandlerImpl::obtainJsonContents(Writable *writable) +{ + auto file = refreshFileFromParent(writable); + auto filePosition = setAndGetFilePosition(writable, false); + return (*obtainJsonContents(file))[filePosition->id]; +} + +void JSONIOHandlerImpl::putJsonContents( + File filename, + bool unsetDirty // = true +) +{ + VERIFY_ALWAYS( + filename.valid(), + "[JSON] File has been overwritten/deleted before writing"); + auto it = m_jsonVals.find(filename); + if (it != m_jsonVals.end()) + { + auto fh = getFilehandle(filename, Access::CREATE); + (*it->second)["platform_byte_widths"] = platformSpecifics(); + *fh << *it->second << std::endl; + VERIFY(fh->good(), "[JSON] Failed writing data to disk.") + m_jsonVals.erase(it); + if (unsetDirty) { - auto fh = getFilehandle( - filename, - Access::CREATE - ); - ( *it->second )["platform_byte_widths"] = platformSpecifics( ); - *fh << *it->second << std::endl; - VERIFY( fh->good( ), - "[JSON] Failed writing data to disk." ) - m_jsonVals.erase( it ); - if( unsetDirty ) - { - m_dirty.erase( filename ); - } + m_dirty.erase(filename); } - } +} - - std::shared_ptr< JSONFilePosition > - JSONIOHandlerImpl::setAndGetFilePosition( - Writable * writable, - std::string extend - ) +std::shared_ptr +JSONIOHandlerImpl::setAndGetFilePosition(Writable *writable, std::string extend) +{ + std::string path; + if (writable->abstractFilePosition) { - std::string path; - if( writable->abstractFilePosition ) - { - // do NOT reuse the old pointer, we want to change the file position - // only for the writable! - path = filepositionOf( writable ) + "/" + extend; - } - else if( writable->parent ) + // do NOT reuse the old pointer, we want to change the file position + // only for the writable! + path = filepositionOf(writable) + "/" + extend; + } + else if (writable->parent) + { + path = filepositionOf(writable->parent) + "/" + extend; + } + else + { // we are root + path = extend; + if (!auxiliary::starts_with(path, "/")) { - path = filepositionOf( writable->parent ) + "/" + extend; + path = "/" + path; } - else - { // we are root - path = extend; - if( !auxiliary::starts_with( - path, - "/" - ) ) - { - path = "/" + path; - } - } - auto - res = - std::make_shared< JSONFilePosition >( json::json_pointer( path ) ); - - writable->abstractFilePosition = res; - - return res; } + auto res = std::make_shared(json::json_pointer(path)); + writable->abstractFilePosition = res; - std::shared_ptr< JSONFilePosition > - JSONIOHandlerImpl::setAndGetFilePosition( - Writable * writable, - bool write - ) - { - std::shared_ptr< AbstractFilePosition > res; + return res; +} +std::shared_ptr +JSONIOHandlerImpl::setAndGetFilePosition(Writable *writable, bool write) +{ + std::shared_ptr res; - if( writable->abstractFilePosition ) - { - res = writable->abstractFilePosition; - } - else if( writable->parent ) - { - res = - writable->parent - ->abstractFilePosition; - } - else - { // we are root - res = std::make_shared< JSONFilePosition >( ); - } - if( write ) - { - writable->abstractFilePosition = res; - } - return std::dynamic_pointer_cast< JSONFilePosition >( res ); + if (writable->abstractFilePosition) + { + res = writable->abstractFilePosition; } + else if (writable->parent) + { + res = writable->parent->abstractFilePosition; + } + else + { // we are root + res = std::make_shared(); + } + if (write) + { + writable->abstractFilePosition = res; + } + return std::dynamic_pointer_cast(res); +} - - File JSONIOHandlerImpl::refreshFileFromParent( Writable * writable ) +File JSONIOHandlerImpl::refreshFileFromParent(Writable *writable) +{ + if (writable->parent) { - if( writable->parent ) - { - auto - file = - m_files.find( writable->parent ) - ->second; - associateWithFile( - writable, - file - ); - return file; - } - else - { - return m_files.find( writable ) - ->second; - } + auto file = m_files.find(writable->parent)->second; + associateWithFile(writable, file); + return file; + } + else + { + return m_files.find(writable)->second; } +} +void JSONIOHandlerImpl::associateWithFile(Writable *writable, File file) +{ + // make sure to overwrite + m_files[writable] = std::move(file); +} - void JSONIOHandlerImpl::associateWithFile( - Writable * writable, - File file - ) +bool JSONIOHandlerImpl::isDataset(nlohmann::json const &j) +{ + if (!j.is_object()) { - // make sure to overwrite - m_files[writable] = std::move( file ); + return false; } + auto i = j.find("data"); + return i != j.end() && i.value().is_array(); +} - - bool JSONIOHandlerImpl::isDataset( nlohmann::json const & j ) +bool JSONIOHandlerImpl::isGroup(nlohmann::json::const_iterator it) +{ + auto &j = it.value(); + if (it.key() == "attributes" || it.key() == "platform_byte_widths" || + !j.is_object()) { - if( !j.is_object( ) ) - { - return false; - } - auto i = j.find( "data" ); - return i != j.end( ) && i.value( ).is_array(); + return false; } + auto i = j.find("data"); + return i == j.end() || !i.value().is_array(); +} +template +void JSONIOHandlerImpl::verifyDataset( + Param const ¶meters, nlohmann::json &j) +{ + VERIFY_ALWAYS( + isDataset(j), + "[JSON] Specified dataset does not exist or is not a dataset."); - bool JSONIOHandlerImpl::isGroup( nlohmann::json::const_iterator it ) + try { - auto & j = it.value(); - if( it.key() == "attributes" || it.key() == "platform_byte_widths" || !j.is_object( ) ) + auto datasetExtent = getExtent(j); + VERIFY_ALWAYS( + datasetExtent.size() == parameters.extent.size(), + "[JSON] Read/Write request does not fit the dataset's dimension"); + for (unsigned int dimension = 0; dimension < parameters.extent.size(); + dimension++) { - return false; + VERIFY_ALWAYS( + parameters.offset[dimension] + parameters.extent[dimension] <= + datasetExtent[dimension], + "[JSON] Read/Write request exceeds the dataset's size"); } - auto i = j.find( "data" ); - return i == j.end( ) || !i.value( ).is_array(); + Datatype dt = stringToDatatype(j["datatype"].get()); + VERIFY_ALWAYS( + dt == parameters.dtype, + "[JSON] Read/Write request does not fit the dataset's type"); } - - - template< typename Param > - void JSONIOHandlerImpl::verifyDataset( - Param const & parameters, - nlohmann::json & j - ) + catch (json::basic_json::type_error &) { - VERIFY_ALWAYS( isDataset(j), - "[JSON] Specified dataset does not exist or is not a dataset." ); + throw std::runtime_error( + "[JSON] The given path does not contain a valid dataset."); + } +} - try - { - auto datasetExtent = getExtent( j ); - VERIFY_ALWAYS( datasetExtent.size( ) == - parameters.extent - .size( ), - "[JSON] Read/Write request does not fit the dataset's dimension" ); - for( unsigned int dimension = 0; - dimension < - parameters.extent - .size( ); - dimension++ ) - { - VERIFY_ALWAYS( parameters.offset[dimension] + - parameters.extent[dimension] <= - datasetExtent[dimension], - "[JSON] Read/Write request exceeds the dataset's size" ); - } - Datatype - dt = stringToDatatype( j["datatype"].get< std::string >( ) ); - VERIFY_ALWAYS( dt == parameters.dtype, - "[JSON] Read/Write request does not fit the dataset's type" ); - } catch( json::basic_json::type_error & ) - { - throw std::runtime_error( "[JSON] The given path does not contain a valid dataset." ); - } +nlohmann::json JSONIOHandlerImpl::platformSpecifics() +{ + nlohmann::json res; + static Datatype datatypes[] = { + Datatype::CHAR, + Datatype::UCHAR, + Datatype::SHORT, + Datatype::INT, + Datatype::LONG, + Datatype::LONGLONG, + Datatype::USHORT, + Datatype::UINT, + Datatype::ULONG, + Datatype::ULONGLONG, + Datatype::FLOAT, + Datatype::DOUBLE, + Datatype::LONG_DOUBLE, + Datatype::CFLOAT, + Datatype::CDOUBLE, + Datatype::CLONG_DOUBLE, + Datatype::BOOL}; + for (auto it = std::begin(datatypes); it != std::end(datatypes); it++) + { + res[datatypeToString(*it)] = toBytes(*it); } + return res; +} +template +void JSONIOHandlerImpl::DatasetWriter::call( + nlohmann::json &json, const Parameter ¶meters) +{ + CppToJSON ctj; + syncMultidimensionalJson( + json["data"], + parameters.offset, + parameters.extent, + getMultiplicators(parameters.extent), + [&ctj](nlohmann::json &j, T const &data) { j = ctj(data); }, + static_cast(parameters.data.get())); +} + +template +void JSONIOHandlerImpl::DatasetReader::call( + nlohmann::json &json, Parameter ¶meters) +{ + JsonToCpp jtc; + syncMultidimensionalJson( + json, + parameters.offset, + parameters.extent, + getMultiplicators(parameters.extent), + [&jtc](nlohmann::json &j, T &data) { data = jtc(j); }, + static_cast(parameters.data.get())); +} + +template +void JSONIOHandlerImpl::AttributeWriter::call( + nlohmann::json &value, Attribute::resource const &resource) +{ + CppToJSON ctj; + value = ctj(std::get(resource)); +} - nlohmann::json JSONIOHandlerImpl::platformSpecifics( ) - { - nlohmann::json res; - static Datatype datatypes[] = { - Datatype::CHAR, - Datatype::UCHAR, - Datatype::SHORT, - Datatype::INT, - Datatype::LONG, - Datatype::LONGLONG, - Datatype::USHORT, - Datatype::UINT, - Datatype::ULONG, - Datatype::ULONGLONG, - Datatype::FLOAT, - Datatype::DOUBLE, - Datatype::LONG_DOUBLE, - Datatype::CFLOAT, - Datatype::CDOUBLE, - Datatype::CLONG_DOUBLE, - Datatype::BOOL - }; - for( auto it = std::begin( datatypes ); - it != std::end( datatypes ); - it++ ) - { - res[datatypeToString( *it )] = toBytes( *it ); - } - return res; - } +template +void JSONIOHandlerImpl::AttributeReader::call( + nlohmann::json &json, Parameter ¶meters) +{ + JsonToCpp jtc; + *parameters.resource = jtc(json); +} +template +nlohmann::json JSONIOHandlerImpl::CppToJSON::operator()(const T &val) +{ + return nlohmann::json(val); +} - template< typename T > - void JSONIOHandlerImpl::DatasetWriter::call( - nlohmann::json & json, - const Parameter< Operation::WRITE_DATASET > & parameters - ) +template +nlohmann::json JSONIOHandlerImpl::CppToJSON>::operator()( + const std::vector &v) +{ + nlohmann::json j; + CppToJSON ctj; + for (auto const &a : v) { - CppToJSON< T > ctj; - syncMultidimensionalJson( - json["data"], - parameters.offset, - parameters.extent, - getMultiplicators( parameters.extent ), - [&ctj]( - nlohmann::json & j, - T const & data - ) - { - j = ctj( data ); - }, - static_cast(parameters.data - .get( )) - ); - } - - - template< typename T > - void JSONIOHandlerImpl::DatasetReader::call( - nlohmann::json & json, - Parameter< Operation::READ_DATASET > & parameters - ) - { - JsonToCpp< - T - > jtc; - syncMultidimensionalJson( - json, - parameters.offset, - parameters.extent, - getMultiplicators( parameters.extent ), - [&jtc]( - nlohmann::json & j, - T & data - ) - { - data = jtc( j ); - }, - static_cast(parameters.data - .get( )) - ); + j.emplace_back(ctj(a)); } + return j; +} - - template< typename T > - void JSONIOHandlerImpl::AttributeWriter::call( - nlohmann::json & value, - Attribute::resource const & resource - ) +template +nlohmann::json JSONIOHandlerImpl::CppToJSON>::operator()( + const std::array &v) +{ + nlohmann::json j; + CppToJSON ctj; + for (auto const &a : v) { - CppToJSON< T > ctj; - value = ctj( std::get< T >( resource ) ); + j.emplace_back(ctj(a)); } + return j; +} +template +T JSONIOHandlerImpl::JsonToCpp::operator()(nlohmann::json const &json) +{ + return json.get(); +} - template< typename T > - void JSONIOHandlerImpl::AttributeReader::call( - nlohmann::json & json, - Parameter< Operation::READ_ATT > & parameters - ) +template +std::vector JSONIOHandlerImpl::JsonToCpp>::operator()( + nlohmann::json const &json) +{ + std::vector v; + JsonToCpp jtp; + for (auto const &j : json) { - JsonToCpp< - T - > jtc; - *parameters.resource = jtc( - json - ); + v.emplace_back(jtp(j)); } + return v; +} - - template< typename T > - nlohmann::json - JSONIOHandlerImpl::CppToJSON< T >::operator()( const T & val ) +template +std::array JSONIOHandlerImpl::JsonToCpp>::operator()( + nlohmann::json const &json) +{ + std::array a; + JsonToCpp jtp; + size_t i = 0; + for (auto const &j : json) { - return nlohmann::json( val ); + a[i] = jtp(j); + i++; } - - - template< typename T > - nlohmann::json - JSONIOHandlerImpl::CppToJSON< std::vector< T > >::operator()( const std::vector< T > & v ) + return a; +} + +template +T JSONIOHandlerImpl::JsonToCpp< + T, + typename std::enable_if::value>::type>:: +operator()(nlohmann::json const &j) +{ + try { - nlohmann::json j; - CppToJSON< T > ctj; - for( auto const & a: v ) - { - j.emplace_back( ctj( a ) ); - } - return j; - } - - - template< typename T, int n > - nlohmann::json JSONIOHandlerImpl::CppToJSON< - std::array< - T, - n - > - >::operator()( - const std::array< - T, - n - > & v - ) - { - nlohmann::json j; - CppToJSON< T > ctj; - for( auto const & a: v ) - { - j.emplace_back( ctj( a ) ); - } - return j; + return j.get(); } - - - template< - typename T, - typename Dummy - > - T JSONIOHandlerImpl::JsonToCpp< - T, - Dummy - >::operator()( nlohmann::json const & json ) - { return json.get< T >( ); } - - - template< typename T > - std::vector< T > - JSONIOHandlerImpl::JsonToCpp< std::vector< T > >::operator()( nlohmann::json const & json ) + catch (...) { - std::vector< T > v; - JsonToCpp< T > jtp; - for( auto const & j: json ) - { - v.emplace_back( jtp( j ) ); - } - return v; - } - - - template< typename T, int n > - std::array< - T, - n - > JSONIOHandlerImpl::JsonToCpp< - std::array< - T, - n - > - >::operator()( nlohmann::json const & json ) - { - std::array< - T, - n - > a; - JsonToCpp< T > jtp; - size_t i = 0; - for( auto const & j: json ) - { - a[i] = jtp( j ); - i++; - } - return a; - } - - - template< - typename T - > - T JSONIOHandlerImpl::JsonToCpp< - T, - typename std::enable_if< - std::is_floating_point< - T - >::value - >::type - >::operator()( nlohmann::json const & j ) { - try { - return j.get(); - } catch (...) { - return std::numeric_limits::quiet_NaN(); - } + return std::numeric_limits::quiet_NaN(); } +} } // namespace openPMD diff --git a/src/Iteration.cpp b/src/Iteration.cpp index 698e4b10e0..eb5e43d189 100644 --- a/src/Iteration.cpp +++ b/src/Iteration.cpp @@ -30,164 +30,160 @@ #include #include - namespace openPMD { using internal::CloseStatus; using internal::DeferredParseAccess; -Iteration::Iteration() : Attributable{ nullptr } +Iteration::Iteration() : Attributable{nullptr} { - Attributable::setData( m_iterationData ); - setTime(static_cast< double >(0)); - setDt(static_cast< double >(1)); + Attributable::setData(m_iterationData); + setTime(static_cast(0)); + setDt(static_cast(1)); setTimeUnitSI(1); - meshes.writable().ownKeyWithinParent = { "meshes" }; - particles.writable().ownKeyWithinParent = { "particles" }; + meshes.writable().ownKeyWithinParent = {"meshes"}; + particles.writable().ownKeyWithinParent = {"particles"}; } -template< typename T > -Iteration& -Iteration::setTime(T newTime) +template +Iteration &Iteration::setTime(T newTime) { - static_assert(std::is_floating_point< T >::value, "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); setAttribute("time", newTime); return *this; } -template< typename T > -Iteration& -Iteration::setDt(T newDt) +template +Iteration &Iteration::setDt(T newDt) { - static_assert(std::is_floating_point< T >::value, "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); setAttribute("dt", newDt); return *this; } -double -Iteration::timeUnitSI() const +double Iteration::timeUnitSI() const { - return getAttribute("timeUnitSI").get< double >(); + return getAttribute("timeUnitSI").get(); } -Iteration& -Iteration::setTimeUnitSI(double newTimeUnitSI) +Iteration &Iteration::setTimeUnitSI(double newTimeUnitSI) { setAttribute("timeUnitSI", newTimeUnitSI); return *this; } -using iterator_t = Container< Iteration, uint64_t >::iterator; +using iterator_t = Container::iterator; -Iteration & -Iteration::close( bool _flush ) +Iteration &Iteration::close(bool _flush) { - auto & it = get(); + auto &it = get(); StepStatus flag = getStepStatus(); // update close status - switch( it.m_closed ) - { - case CloseStatus::Open: - case CloseStatus::ClosedInFrontend: + switch (it.m_closed) + { + case CloseStatus::Open: + case CloseStatus::ClosedInFrontend: + it.m_closed = CloseStatus::ClosedInFrontend; + break; + case CloseStatus::ClosedTemporarily: + // should we bother to reopen? + if (dirtyRecursive()) + { + // let's reopen it.m_closed = CloseStatus::ClosedInFrontend; - break; - case CloseStatus::ClosedTemporarily: - // should we bother to reopen? - if( dirtyRecursive() ) - { - // let's reopen - it.m_closed = CloseStatus::ClosedInFrontend; - } - else - { - // don't reopen - it.m_closed = CloseStatus::ClosedInBackend; - } - break; - case CloseStatus::ParseAccessDeferred: - case CloseStatus::ClosedInBackend: - // just keep it like it is - // (this means that closing an iteration that has not been parsed - // yet keeps it re-openable) - break; + } + else + { + // don't reopen + it.m_closed = CloseStatus::ClosedInBackend; + } + break; + case CloseStatus::ParseAccessDeferred: + case CloseStatus::ClosedInBackend: + // just keep it like it is + // (this means that closing an iteration that has not been parsed + // yet keeps it re-openable) + break; } - if( _flush ) + if (_flush) { - if( flag == StepStatus::DuringStep ) + if (flag == StepStatus::DuringStep) { endStep(); - setStepStatus( StepStatus::NoStep ); + setStepStatus(StepStatus::NoStep); } else { // flush things manually Series s = retrieveSeries(); // figure out my iteration number - auto begin = s.indexOf( *this ); + auto begin = s.indexOf(*this); auto end = begin; ++end; - s.flush_impl( begin, end, FlushLevel::UserFlush ); + s.flush_impl(begin, end, FlushLevel::UserFlush); } } else { - if( flag == StepStatus::DuringStep ) + if (flag == StepStatus::DuringStep) { - throw std::runtime_error( "Using deferred Iteration::close " - "unimplemented in auto-stepping mode." ); + throw std::runtime_error( + "Using deferred Iteration::close " + "unimplemented in auto-stepping mode."); } } return *this; } -Iteration & -Iteration::open() +Iteration &Iteration::open() { - auto & it = get(); - if( it.m_closed == CloseStatus::ParseAccessDeferred ) + auto &it = get(); + if (it.m_closed == CloseStatus::ParseAccessDeferred) { it.m_closed = CloseStatus::Open; } runDeferredParseAccess(); Series s = retrieveSeries(); // figure out my iteration number - auto begin = s.indexOf( *this ); - s.openIteration( begin->first, *this ); + auto begin = s.indexOf(*this); + s.openIteration(begin->first, *this); IOHandler()->flush(); return *this; } -bool -Iteration::closed() const +bool Iteration::closed() const { - switch( get().m_closed ) - { - case CloseStatus::ParseAccessDeferred: - case CloseStatus::Open: - /* - * Temporarily closing a file is something that the openPMD API - * does for optimization purposes. - * Logically to the user, it is still open. - */ - case CloseStatus::ClosedTemporarily: - return false; - case CloseStatus::ClosedInFrontend: - case CloseStatus::ClosedInBackend: - return true; + switch (get().m_closed) + { + case CloseStatus::ParseAccessDeferred: + case CloseStatus::Open: + /* + * Temporarily closing a file is something that the openPMD API + * does for optimization purposes. + * Logically to the user, it is still open. + */ + case CloseStatus::ClosedTemporarily: + return false; + case CloseStatus::ClosedInFrontend: + case CloseStatus::ClosedInBackend: + return true; } - throw std::runtime_error( "Unreachable!" ); + throw std::runtime_error("Unreachable!"); } -bool -Iteration::closedByWriter() const +bool Iteration::closedByWriter() const { using bool_type = unsigned char; - if( containsAttribute( "closed" ) ) + if (containsAttribute("closed")) { - return getAttribute( "closed" ).get< bool_type >() == 0u ? false : true; + return getAttribute("closed").get() == 0u ? false : true; } else { @@ -195,35 +191,36 @@ Iteration::closedByWriter() const } } -void -Iteration::flushFileBased(std::string const& filename, uint64_t i) +void Iteration::flushFileBased(std::string const &filename, uint64_t i) { /* Find the root point [Series] of this file, * meshesPath and particlesPath are stored there */ Series s = retrieveSeries(); - if( !written() ) + if (!written()) { /* create file */ - Parameter< Operation::CREATE_FILE > fCreate; + Parameter fCreate; fCreate.name = filename; IOHandler()->enqueue(IOTask(&s.writable(), fCreate)); /* create basePath */ - Parameter< Operation::CREATE_PATH > pCreate; + Parameter pCreate; pCreate.path = auxiliary::replace_first(s.basePath(), "%T/", ""); IOHandler()->enqueue(IOTask(&s.iterations, pCreate)); /* create iteration path */ pCreate.path = std::to_string(i); IOHandler()->enqueue(IOTask(this, pCreate)); - } else + } + else { // operations for create mode - if((IOHandler()->m_frontendAccess == Access::CREATE ) && - ( (IOHandler()->backendName() == "MPI_ADIOS1") || (IOHandler()->backendName() == "ADIOS1") ) ) + if ((IOHandler()->m_frontendAccess == Access::CREATE) && + ((IOHandler()->backendName() == "MPI_ADIOS1") || + (IOHandler()->backendName() == "ADIOS1"))) { - Parameter< Operation::OPEN_FILE > fOpen; + Parameter fOpen; fOpen.name = filename; fOpen.encoding = IterationEncoding::fileBased; IOHandler()->enqueue(IOTask(&s.writable(), fOpen)); @@ -234,19 +231,18 @@ Iteration::flushFileBased(std::string const& filename, uint64_t i) // operations for read/read-write mode /* open file */ - s.openIteration( i, *this ); + s.openIteration(i, *this); } flush(); } -void -Iteration::flushGroupBased(uint64_t i) +void Iteration::flushGroupBased(uint64_t i) { - if( !written() ) + if (!written()) { /* create iteration path */ - Parameter< Operation::CREATE_PATH > pCreate; + Parameter pCreate; pCreate.path = std::to_string(i); IOHandler()->enqueue(IOTask(this, pCreate)); } @@ -254,45 +250,44 @@ Iteration::flushGroupBased(uint64_t i) flush(); } -void -Iteration::flushVariableBased( uint64_t i ) +void Iteration::flushVariableBased(uint64_t i) { - if( !written() ) + if (!written()) { /* create iteration path */ - Parameter< Operation::OPEN_PATH > pOpen; + Parameter pOpen; pOpen.path = ""; - IOHandler()->enqueue( IOTask( this, pOpen ) ); - this->setAttribute( "snapshot", i ); + IOHandler()->enqueue(IOTask(this, pOpen)); + this->setAttribute("snapshot", i); } flush(); } -void -Iteration::flush() +void Iteration::flush() { - if(IOHandler()->m_frontendAccess == Access::READ_ONLY ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) { - for( auto& m : meshes ) + for (auto &m : meshes) m.second.flush(m.first); - for( auto& species : particles ) + for (auto &species : particles) species.second.flush(species.first); - } else + } + else { /* Find the root point [Series] of this file, * meshesPath and particlesPath are stored there */ Series s = retrieveSeries(); - if( !meshes.empty() || s.containsAttribute("meshesPath") ) + if (!meshes.empty() || s.containsAttribute("meshesPath")) { - if( !s.containsAttribute("meshesPath") ) + if (!s.containsAttribute("meshesPath")) { s.setMeshesPath("meshes/"); s.flushMeshesPath(); } meshes.flush(s.meshesPath()); - for( auto& m : meshes ) + for (auto &m : meshes) m.second.flush(m.first); } else @@ -300,15 +295,15 @@ Iteration::flush() meshes.dirty() = false; } - if( !particles.empty() || s.containsAttribute("particlesPath") ) + if (!particles.empty() || s.containsAttribute("particlesPath")) { - if( !s.containsAttribute("particlesPath") ) + if (!s.containsAttribute("particlesPath")) { s.setParticlesPath("particles/"); s.flushParticlesPath(); } particles.flush(s.particlesPath()); - for( auto& species : particles ) + for (auto &species : particles) species.second.flush(species.first); } else @@ -320,146 +315,147 @@ Iteration::flush() } } -void Iteration::deferParseAccess( DeferredParseAccess dr ) +void Iteration::deferParseAccess(DeferredParseAccess dr) { get().m_deferredParseAccess = - std::make_optional< DeferredParseAccess >( std::move( dr ) ); + std::make_optional(std::move(dr)); } void Iteration::read() { - auto & it = get(); - if( !it.m_deferredParseAccess.has_value() ) + auto &it = get(); + if (!it.m_deferredParseAccess.has_value()) { return; } - auto const & deferred = it.m_deferredParseAccess.value(); - if( deferred.fileBased ) + auto const &deferred = it.m_deferredParseAccess.value(); + if (deferred.fileBased) { - readFileBased( deferred.filename, deferred.path ); + readFileBased(deferred.filename, deferred.path); } else { - readGorVBased( deferred.path ); + readGorVBased(deferred.path); } // reset this thing - it.m_deferredParseAccess = std::optional< DeferredParseAccess >(); + it.m_deferredParseAccess = std::optional(); } -void Iteration::reread( std::string const & path ) +void Iteration::reread(std::string const &path) { - if( get().m_deferredParseAccess.has_value() ) + if (get().m_deferredParseAccess.has_value()) { throw std::runtime_error( "[Iteration] Internal control flow error: Trying to reread an " - "iteration that has not yet been read for its first time." ); + "iteration that has not yet been read for its first time."); } - read_impl( path ); + read_impl(path); } void Iteration::readFileBased( - std::string filePath, std::string const & groupPath ) + std::string filePath, std::string const &groupPath) { auto series = retrieveSeries(); - series.readOneIterationFileBased( filePath ); + series.readOneIterationFileBased(filePath); - read_impl( groupPath ); + read_impl(groupPath); } -void Iteration::readGorVBased( std::string const & groupPath ) +void Iteration::readGorVBased(std::string const &groupPath) { - read_impl(groupPath ); + read_impl(groupPath); } -void Iteration::read_impl( std::string const & groupPath ) +void Iteration::read_impl(std::string const &groupPath) { - Parameter< Operation::OPEN_PATH > pOpen; + Parameter pOpen; pOpen.path = groupPath; - IOHandler()->enqueue( IOTask( this, pOpen ) ); + IOHandler()->enqueue(IOTask(this, pOpen)); using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "dt"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::FLOAT ) - setDt(Attribute(*aRead.resource).get< float >()); - else if( *aRead.dtype == DT::DOUBLE ) - setDt(Attribute(*aRead.resource).get< double >()); - else if( *aRead.dtype == DT::LONG_DOUBLE ) - setDt(Attribute(*aRead.resource).get< long double >()); + if (*aRead.dtype == DT::FLOAT) + setDt(Attribute(*aRead.resource).get()); + else if (*aRead.dtype == DT::DOUBLE) + setDt(Attribute(*aRead.resource).get()); + else if (*aRead.dtype == DT::LONG_DOUBLE) + setDt(Attribute(*aRead.resource).get()); else throw std::runtime_error("Unexpected Attribute datatype for 'dt'"); aRead.name = "time"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::FLOAT ) - setTime(Attribute(*aRead.resource).get< float >()); - else if( *aRead.dtype == DT::DOUBLE ) - setTime(Attribute(*aRead.resource).get< double >()); - else if( *aRead.dtype == DT::LONG_DOUBLE ) - setTime(Attribute(*aRead.resource).get< long double >()); + if (*aRead.dtype == DT::FLOAT) + setTime(Attribute(*aRead.resource).get()); + else if (*aRead.dtype == DT::DOUBLE) + setTime(Attribute(*aRead.resource).get()); + else if (*aRead.dtype == DT::LONG_DOUBLE) + setTime(Attribute(*aRead.resource).get()); else throw std::runtime_error("Unexpected Attribute datatype for 'time'"); aRead.name = "timeUnitSI"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::DOUBLE ) - setTimeUnitSI(Attribute(*aRead.resource).get< double >()); + if (*aRead.dtype == DT::DOUBLE) + setTimeUnitSI(Attribute(*aRead.resource).get()); else - throw std::runtime_error("Unexpected Attribute datatype for 'timeUnitSI'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'timeUnitSI'"); /* Find the root point [Series] of this file, * meshesPath and particlesPath are stored there */ Series s = retrieveSeries(); - Parameter< Operation::LIST_PATHS > pList; + Parameter pList; std::string version = s.openPMD(); bool hasMeshes = false; bool hasParticles = false; - if( version == "1.0.0" || version == "1.0.1" ) + if (version == "1.0.0" || version == "1.0.1") { IOHandler()->enqueue(IOTask(this, pList)); IOHandler()->flush(); hasMeshes = std::count( - pList.paths->begin(), - pList.paths->end(), - auxiliary::replace_last(s.meshesPath(), "/", "") - ) == 1; - hasParticles = std::count( - pList.paths->begin(), - pList.paths->end(), - auxiliary::replace_last(s.particlesPath(), "/", "") - ) == 1; + pList.paths->begin(), + pList.paths->end(), + auxiliary::replace_last(s.meshesPath(), "/", "")) == 1; + hasParticles = + std::count( + pList.paths->begin(), + pList.paths->end(), + auxiliary::replace_last(s.particlesPath(), "/", "")) == 1; pList.paths->clear(); - } else + } + else { hasMeshes = s.containsAttribute("meshesPath"); hasParticles = s.containsAttribute("particlesPath"); } - if( hasMeshes ) + if (hasMeshes) { pOpen.path = s.meshesPath(); IOHandler()->enqueue(IOTask(&meshes, pOpen)); - meshes.readAttributes( ReadMode::FullyReread ); + meshes.readAttributes(ReadMode::FullyReread); - internal::EraseStaleEntries< decltype( meshes ) > map{ meshes }; + internal::EraseStaleEntries map{meshes}; /* obtain all non-scalar meshes */ IOHandler()->enqueue(IOTask(&meshes, pList)); IOHandler()->flush(); - Parameter< Operation::LIST_ATTS > aList; - for( auto const& mesh_name : *pList.paths ) + Parameter aList; + for (auto const &mesh_name : *pList.paths) { - Mesh& m = map[mesh_name]; + Mesh &m = map[mesh_name]; pOpen.path = mesh_name; aList.attributes->clear(); IOHandler()->enqueue(IOTask(&m, pOpen)); @@ -470,9 +466,9 @@ void Iteration::read_impl( std::string const & groupPath ) auto att_end = aList.attributes->end(); auto value = std::find(att_begin, att_end, "value"); auto shape = std::find(att_begin, att_end, "shape"); - if( value != att_end && shape != att_end ) + if (value != att_end && shape != att_end) { - MeshRecordComponent& mrc = m[MeshRecordComponent::SCALAR]; + MeshRecordComponent &mrc = m[MeshRecordComponent::SCALAR]; mrc.parent() = m.parent(); IOHandler()->enqueue(IOTask(&mrc, pOpen)); IOHandler()->flush(); @@ -482,18 +478,18 @@ void Iteration::read_impl( std::string const & groupPath ) } /* obtain all scalar meshes */ - Parameter< Operation::LIST_DATASETS > dList; + Parameter dList; IOHandler()->enqueue(IOTask(&meshes, dList)); IOHandler()->flush(); - Parameter< Operation::OPEN_DATASET > dOpen; - for( auto const& mesh_name : *dList.datasets ) + Parameter dOpen; + for (auto const &mesh_name : *dList.datasets) { - Mesh& m = map[mesh_name]; + Mesh &m = map[mesh_name]; dOpen.name = mesh_name; IOHandler()->enqueue(IOTask(&m, dOpen)); IOHandler()->flush(); - MeshRecordComponent& mrc = m[MeshRecordComponent::SCALAR]; + MeshRecordComponent &mrc = m[MeshRecordComponent::SCALAR]; mrc.parent() = m.parent(); IOHandler()->enqueue(IOTask(&mrc, dOpen)); IOHandler()->flush(); @@ -508,22 +504,22 @@ void Iteration::read_impl( std::string const & groupPath ) meshes.dirty() = false; } - if( hasParticles ) + if (hasParticles) { pOpen.path = s.particlesPath(); IOHandler()->enqueue(IOTask(&particles, pOpen)); - particles.readAttributes( ReadMode::FullyReread ); + particles.readAttributes(ReadMode::FullyReread); /* obtain all particle species */ pList.paths->clear(); IOHandler()->enqueue(IOTask(&particles, pList)); IOHandler()->flush(); - internal::EraseStaleEntries< decltype( particles ) > map{ particles }; - for( auto const& species_name : *pList.paths ) + internal::EraseStaleEntries map{particles}; + for (auto const &species_name : *pList.paths) { - ParticleSpecies& p = map[species_name]; + ParticleSpecies &p = map[species_name]; pOpen.path = species_name; IOHandler()->enqueue(IOTask(&p, pOpen)); IOHandler()->flush(); @@ -535,47 +531,46 @@ void Iteration::read_impl( std::string const & groupPath ) particles.dirty() = false; } - readAttributes( ReadMode::FullyReread ); + readAttributes(ReadMode::FullyReread); } -AdvanceStatus -Iteration::beginStep() +AdvanceStatus Iteration::beginStep() { using IE = IterationEncoding; auto series = retrieveSeries(); // Initialize file with this to quiet warnings // The following switch is comprehensive - internal::AttributableData * file = nullptr; - switch( series.iterationEncoding() ) + internal::AttributableData *file = nullptr; + switch (series.iterationEncoding()) { - case IE::fileBased: - file = &Attributable::get(); - break; - case IE::groupBased: - case IE::variableBased: - file = &series.get(); - break; + case IE::fileBased: + file = &Attributable::get(); + break; + case IE::groupBased: + case IE::variableBased: + file = &series.get(); + break; } AdvanceStatus status = series.advance( - AdvanceMode::BEGINSTEP, *file, series.indexOf( *this ), *this ); - if( status != AdvanceStatus::OK ) + AdvanceMode::BEGINSTEP, *file, series.indexOf(*this), *this); + if (status != AdvanceStatus::OK) { return status; } // re-read -> new datasets might be available - if( ( series.iterationEncoding() == IE::groupBased || - series.iterationEncoding() == IE::variableBased ) && - ( this->IOHandler()->m_frontendAccess == Access::READ_ONLY || - this->IOHandler()->m_frontendAccess == Access::READ_WRITE ) ) + if ((series.iterationEncoding() == IE::groupBased || + series.iterationEncoding() == IE::variableBased) && + (this->IOHandler()->m_frontendAccess == Access::READ_ONLY || + this->IOHandler()->m_frontendAccess == Access::READ_WRITE)) { bool previous = series.iterations.written(); series.iterations.written() = false; auto oldType = this->IOHandler()->m_frontendAccess; auto newType = - const_cast< Access * >( &this->IOHandler()->m_frontendAccess ); + const_cast(&this->IOHandler()->m_frontendAccess); *newType = Access::READ_WRITE; - series.readGorVBased( false ); + series.readGorVBased(false); *newType = oldType; series.iterations.written() = previous; } @@ -583,86 +578,81 @@ Iteration::beginStep() return status; } -void -Iteration::endStep() +void Iteration::endStep() { using IE = IterationEncoding; auto series = retrieveSeries(); // Initialize file with this to quiet warnings // The following switch is comprehensive - internal::AttributableData * file = nullptr; - switch( series.iterationEncoding() ) + internal::AttributableData *file = nullptr; + switch (series.iterationEncoding()) { - case IE::fileBased: - file = &Attributable::get(); - break; - case IE::groupBased: - case IE::variableBased: - file = &series.get(); - break; + case IE::fileBased: + file = &Attributable::get(); + break; + case IE::groupBased: + case IE::variableBased: + file = &series.get(); + break; } // @todo filebased check - series.advance( - AdvanceMode::ENDSTEP, *file, series.indexOf( *this ), *this ); + series.advance(AdvanceMode::ENDSTEP, *file, series.indexOf(*this), *this); } -StepStatus -Iteration::getStepStatus() +StepStatus Iteration::getStepStatus() { Series s = retrieveSeries(); - switch( s.iterationEncoding() ) + switch (s.iterationEncoding()) { using IE = IterationEncoding; - case IE::fileBased: - return get().m_stepStatus; - case IE::groupBased: - case IE::variableBased: - return s.get().m_stepStatus; - default: - throw std::runtime_error( "[Iteration] unreachable" ); + case IE::fileBased: + return get().m_stepStatus; + case IE::groupBased: + case IE::variableBased: + return s.get().m_stepStatus; + default: + throw std::runtime_error("[Iteration] unreachable"); } } -void -Iteration::setStepStatus( StepStatus status ) +void Iteration::setStepStatus(StepStatus status) { Series s = retrieveSeries(); - switch( s.iterationEncoding() ) + switch (s.iterationEncoding()) { using IE = IterationEncoding; - case IE::fileBased: - get().m_stepStatus = status; - break; - case IE::groupBased: - case IE::variableBased: - s.get().m_stepStatus = status; - break; - default: - throw std::runtime_error( "[Iteration] unreachable" ); + case IE::fileBased: + get().m_stepStatus = status; + break; + case IE::groupBased: + case IE::variableBased: + s.get().m_stepStatus = status; + break; + default: + throw std::runtime_error("[Iteration] unreachable"); } } -bool -Iteration::dirtyRecursive() const +bool Iteration::dirtyRecursive() const { - if( dirty() ) + if (dirty()) { return true; } - if( particles.dirty() || meshes.dirty() ) + if (particles.dirty() || meshes.dirty()) { return true; } - for( auto const & pair : particles ) + for (auto const &pair : particles) { - if( pair.second.dirtyRecursive() ) + if (pair.second.dirtyRecursive()) { return true; } } - for( auto const & pair : meshes ) + for (auto const &pair : meshes) { - if( pair.second.dirtyRecursive() ) + if (pair.second.dirtyRecursive()) { return true; } @@ -670,8 +660,7 @@ Iteration::dirtyRecursive() const return false; } -void -Iteration::linkHierarchy(Writable& w) +void Iteration::linkHierarchy(Writable &w) { Attributable::linkHierarchy(w); meshes.linkHierarchy(this->writable()); @@ -680,19 +669,18 @@ Iteration::linkHierarchy(Writable& w) void Iteration::runDeferredParseAccess() { - if( IOHandler()->m_frontendAccess == Access::CREATE ) + if (IOHandler()->m_frontendAccess == Access::CREATE) { return; } auto oldAccess = IOHandler()->m_frontendAccess; - auto newAccess = - const_cast< Access * >( &IOHandler()->m_frontendAccess ); + auto newAccess = const_cast(&IOHandler()->m_frontendAccess); *newAccess = Access::READ_WRITE; try { read(); } - catch( ... ) + catch (...) { *newAccess = oldAccess; throw; @@ -700,31 +688,19 @@ void Iteration::runDeferredParseAccess() *newAccess = oldAccess; } -template float -Iteration::time< float >() const; -template double -Iteration::time< double >() const; -template long double -Iteration::time< long double >() const; - -template float -Iteration::dt< float >() const; -template double -Iteration::dt< double >() const; -template long double -Iteration::dt< long double >() const; - -template -Iteration& Iteration::setTime< float >(float time); -template -Iteration& Iteration::setTime< double >(double time); -template -Iteration& Iteration::setTime< long double >(long double time); - -template -Iteration& Iteration::setDt< float >(float dt); -template -Iteration& Iteration::setDt< double >(double dt); -template -Iteration& Iteration::setDt< long double >(long double dt); -} // openPMD +template float Iteration::time() const; +template double Iteration::time() const; +template long double Iteration::time() const; + +template float Iteration::dt() const; +template double Iteration::dt() const; +template long double Iteration::dt() const; + +template Iteration &Iteration::setTime(float time); +template Iteration &Iteration::setTime(double time); +template Iteration &Iteration::setTime(long double time); + +template Iteration &Iteration::setDt(float dt); +template Iteration &Iteration::setDt(double dt); +template Iteration &Iteration::setDt(long double dt); +} // namespace openPMD diff --git a/src/IterationEncoding.cpp b/src/IterationEncoding.cpp index e463233b42..b86bb1c672 100644 --- a/src/IterationEncoding.cpp +++ b/src/IterationEncoding.cpp @@ -22,21 +22,20 @@ #include - -std::ostream& -openPMD::operator<<(std::ostream& os, openPMD::IterationEncoding const& ie) +std::ostream & +openPMD::operator<<(std::ostream &os, openPMD::IterationEncoding const &ie) { - switch( ie ) + switch (ie) { - case openPMD::IterationEncoding::fileBased: - os << "fileBased"; - break; - case openPMD::IterationEncoding::groupBased: - os << "groupBased"; - break; - case openPMD::IterationEncoding::variableBased: - os << "variableBased"; - break; + case openPMD::IterationEncoding::fileBased: + os << "fileBased"; + break; + case openPMD::IterationEncoding::groupBased: + os << "groupBased"; + break; + case openPMD::IterationEncoding::variableBased: + os << "variableBased"; + break; } return os; } diff --git a/src/Mesh.cpp b/src/Mesh.cpp index 6b0dc1b2d2..bc0664d2fd 100644 --- a/src/Mesh.cpp +++ b/src/Mesh.cpp @@ -36,225 +36,217 @@ Mesh::Mesh() setGeometry(Geometry::cartesian); setDataOrder(DataOrder::C); - setAxisLabels({"x"}); //empty strings are not allowed in HDF5 - setGridSpacing(std::vector< double >{1}); + setAxisLabels({"x"}); // empty strings are not allowed in HDF5 + setGridSpacing(std::vector{1}); setGridGlobalOffset({0}); setGridUnitSI(1); } -Mesh::Geometry -Mesh::geometry() const +Mesh::Geometry Mesh::geometry() const { std::string ret = geometryString(); - if( "cartesian" == ret ) { return Geometry::cartesian; } - else if( "thetaMode" == ret ) { return Geometry::thetaMode; } - else if( "cylindrical" == ret ) { return Geometry::cylindrical; } - else if( "spherical" == ret ) { return Geometry::spherical; } - else { return Geometry::other; } + if ("cartesian" == ret) + { + return Geometry::cartesian; + } + else if ("thetaMode" == ret) + { + return Geometry::thetaMode; + } + else if ("cylindrical" == ret) + { + return Geometry::cylindrical; + } + else if ("spherical" == ret) + { + return Geometry::spherical; + } + else + { + return Geometry::other; + } } std::string Mesh::geometryString() const { - return getAttribute( "geometry" ).get< std::string >(); + return getAttribute("geometry").get(); } -Mesh& -Mesh::setGeometry(Mesh::Geometry g) +Mesh &Mesh::setGeometry(Mesh::Geometry g) { - switch( g ) + switch (g) { - case Geometry::cartesian: - setAttribute("geometry", std::string("cartesian")); - break; - case Geometry::thetaMode: - setAttribute("geometry", std::string("thetaMode")); - break; - case Geometry::cylindrical: - setAttribute("geometry", std::string("cylindrical")); - break; - case Geometry::spherical: - setAttribute("geometry", std::string("spherical")); - break; - case Geometry::other: - // use the std::string overload to be more specific - setAttribute("geometry", std::string("other")); - break; + case Geometry::cartesian: + setAttribute("geometry", std::string("cartesian")); + break; + case Geometry::thetaMode: + setAttribute("geometry", std::string("thetaMode")); + break; + case Geometry::cylindrical: + setAttribute("geometry", std::string("cylindrical")); + break; + case Geometry::spherical: + setAttribute("geometry", std::string("spherical")); + break; + case Geometry::other: + // use the std::string overload to be more specific + setAttribute("geometry", std::string("other")); + break; } return *this; } -Mesh & Mesh::setGeometry( std::string geometry ) +Mesh &Mesh::setGeometry(std::string geometry) { std::string knownGeometries[] = { - "cartesian", "thetaMode", "cylindrical", "spherical", "other" }; - if( // 1. condition: geometry is not one of the known geometries + "cartesian", "thetaMode", "cylindrical", "spherical", "other"}; + if ( // 1. condition: geometry is not one of the known geometries std::find( - std::begin( knownGeometries ), - std::end( knownGeometries ), - geometry ) == std::end( knownGeometries ) + std::begin(knownGeometries), std::end(knownGeometries), geometry) == + std::end(knownGeometries) // 2. condition: prefix is not already there - && !auxiliary::starts_with( geometry, std::string( "other:" ) ) ) + && !auxiliary::starts_with(geometry, std::string("other:"))) { geometry = "other:" + geometry; } - setAttribute( "geometry", std::move( geometry ) ); + setAttribute("geometry", std::move(geometry)); return *this; } -std::string -Mesh::geometryParameters() const +std::string Mesh::geometryParameters() const { - return getAttribute("geometryParameters").get< std::string >(); + return getAttribute("geometryParameters").get(); } -Mesh& -Mesh::setGeometryParameters(std::string const& gp) +Mesh &Mesh::setGeometryParameters(std::string const &gp) { setAttribute("geometryParameters", gp); return *this; } -Mesh::DataOrder -Mesh::dataOrder() const +Mesh::DataOrder Mesh::dataOrder() const { - return Mesh::DataOrder(getAttribute("dataOrder").get< std::string >().c_str()[0]); + return Mesh::DataOrder( + getAttribute("dataOrder").get().c_str()[0]); } -Mesh& -Mesh::setDataOrder(Mesh::DataOrder dor) +Mesh &Mesh::setDataOrder(Mesh::DataOrder dor) { - setAttribute( - "dataOrder", - std::string(1u, static_cast(dor))); + setAttribute("dataOrder", std::string(1u, static_cast(dor))); return *this; } -std::vector< std::string > -Mesh::axisLabels() const +std::vector Mesh::axisLabels() const { - return getAttribute("axisLabels").get< std::vector< std::string > >(); + return getAttribute("axisLabels").get>(); } -Mesh& -Mesh::setAxisLabels(std::vector< std::string > const & als) +Mesh &Mesh::setAxisLabels(std::vector const &als) { setAttribute("axisLabels", als); return *this; } -template< typename T, typename > -Mesh& -Mesh::setGridSpacing(std::vector< T > const & gs) +template +Mesh &Mesh::setGridSpacing(std::vector const &gs) { - static_assert(std::is_floating_point< T >::value, "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); setAttribute("gridSpacing", gs); return *this; } -template -Mesh& -Mesh::setGridSpacing(std::vector< float > const & gs); -template -Mesh& -Mesh::setGridSpacing(std::vector< double > const & gs); -template -Mesh& -Mesh::setGridSpacing(std::vector< long double > const & gs); - -std::vector< double > -Mesh::gridGlobalOffset() const +template Mesh &Mesh::setGridSpacing(std::vector const &gs); +template Mesh &Mesh::setGridSpacing(std::vector const &gs); +template Mesh &Mesh::setGridSpacing(std::vector const &gs); + +std::vector Mesh::gridGlobalOffset() const { - return getAttribute("gridGlobalOffset").get< std::vector< double> >(); + return getAttribute("gridGlobalOffset").get>(); } -Mesh& -Mesh::setGridGlobalOffset(std::vector< double > const & ggo) +Mesh &Mesh::setGridGlobalOffset(std::vector const &ggo) { setAttribute("gridGlobalOffset", ggo); return *this; } -double -Mesh::gridUnitSI() const +double Mesh::gridUnitSI() const { - return getAttribute("gridUnitSI").get< double >(); + return getAttribute("gridUnitSI").get(); } -Mesh& -Mesh::setGridUnitSI(double gusi) +Mesh &Mesh::setGridUnitSI(double gusi) { setAttribute("gridUnitSI", gusi); return *this; } -Mesh& -Mesh::setUnitDimension(std::map< UnitDimension, double > const& udim) +Mesh &Mesh::setUnitDimension(std::map const &udim) { - if( !udim.empty() ) + if (!udim.empty()) { - std::array< double, 7 > tmpUnitDimension = this->unitDimension(); - for( auto const& entry : udim ) + std::array tmpUnitDimension = this->unitDimension(); + for (auto const &entry : udim) tmpUnitDimension[static_cast(entry.first)] = entry.second; setAttribute("unitDimension", tmpUnitDimension); } return *this; } -template< typename T, typename > -Mesh& -Mesh::setTimeOffset(T to) +template +Mesh &Mesh::setTimeOffset(T to) { - static_assert(std::is_floating_point< T >::value, "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); setAttribute("timeOffset", to); return *this; } -template -Mesh& -Mesh::setTimeOffset( long double ); +template Mesh &Mesh::setTimeOffset(long double); -template -Mesh& -Mesh::setTimeOffset( double ); +template Mesh &Mesh::setTimeOffset(double); -template -Mesh& -Mesh::setTimeOffset( float ); +template Mesh &Mesh::setTimeOffset(float); -void -Mesh::flush_impl(std::string const& name) +void Mesh::flush_impl(std::string const &name) { - if(IOHandler()->m_frontendAccess == Access::READ_ONLY ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) { - for( auto& comp : *this ) + for (auto &comp : *this) comp.second.flush(comp.first); - } else + } + else { - if( !written() ) + if (!written()) { - if( scalar() ) + if (scalar()) { - MeshRecordComponent& mrc = at(RecordComponent::SCALAR); + MeshRecordComponent &mrc = at(RecordComponent::SCALAR); mrc.parent() = parent(); mrc.flush(name); IOHandler()->flush(); - writable().abstractFilePosition = mrc.writable().abstractFilePosition; + writable().abstractFilePosition = + mrc.writable().abstractFilePosition; written() = true; - } else + } + else { - Parameter< Operation::CREATE_PATH > pCreate; + Parameter pCreate; pCreate.path = name; IOHandler()->enqueue(IOTask(this, pCreate)); - for( auto& comp : *this ) + for (auto &comp : *this) comp.second.parent() = &this->writable(); } } - if( scalar() ) + if (scalar()) { - for( auto& comp : *this ) + for (auto &comp : *this) { comp.second.flush(name); writable().abstractFilePosition = @@ -263,7 +255,7 @@ Mesh::flush_impl(std::string const& name) } else { - for( auto& comp : *this ) + for (auto &comp : *this) comp.second.flush(comp.first); } @@ -271,115 +263,127 @@ Mesh::flush_impl(std::string const& name) } } -void -Mesh::read() +void Mesh::read() { - internal::EraseStaleEntries< Mesh & > map{ *this }; + internal::EraseStaleEntries map{*this}; using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "geometry"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) + if (*aRead.dtype == DT::STRING) { - std::string tmpGeometry = Attribute(*aRead.resource).get< std::string >(); - if( "cartesian" == tmpGeometry ) + std::string tmpGeometry = Attribute(*aRead.resource).get(); + if ("cartesian" == tmpGeometry) setGeometry(Geometry::cartesian); - else if( "thetaMode" == tmpGeometry ) + else if ("thetaMode" == tmpGeometry) setGeometry(Geometry::thetaMode); - else if( "cylindrical" == tmpGeometry ) + else if ("cylindrical" == tmpGeometry) setGeometry(Geometry::cylindrical); - else if( "spherical" == tmpGeometry ) + else if ("spherical" == tmpGeometry) setGeometry(Geometry::spherical); else setGeometry(tmpGeometry); } else - throw std::runtime_error("Unexpected Attribute datatype for 'geometry'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'geometry'"); aRead.name = "dataOrder"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::CHAR ) - setDataOrder(static_cast(Attribute(*aRead.resource).get< char >())); - else if( *aRead.dtype == DT::STRING ) + if (*aRead.dtype == DT::CHAR) + setDataOrder( + static_cast(Attribute(*aRead.resource).get())); + else if (*aRead.dtype == DT::STRING) { - std::string tmpDataOrder = Attribute(*aRead.resource).get< std::string >(); - if( tmpDataOrder.size() == 1 ) + std::string tmpDataOrder = + Attribute(*aRead.resource).get(); + if (tmpDataOrder.size() == 1) setDataOrder(static_cast(tmpDataOrder[0])); else - throw std::runtime_error("Unexpected Attribute value for 'dataOrder': " + tmpDataOrder); + throw std::runtime_error( + "Unexpected Attribute value for 'dataOrder': " + tmpDataOrder); } else - throw std::runtime_error("Unexpected Attribute datatype for 'dataOrder'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'dataOrder'"); aRead.name = "axisLabels"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::VEC_STRING || *aRead.dtype == DT::STRING) - setAxisLabels(Attribute(*aRead.resource).get< std::vector< std::string > >()); + if (*aRead.dtype == DT::VEC_STRING || *aRead.dtype == DT::STRING) + setAxisLabels( + Attribute(*aRead.resource).get>()); else - throw std::runtime_error("Unexpected Attribute datatype for 'axisLabels'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'axisLabels'"); aRead.name = "gridSpacing"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); Attribute a = Attribute(*aRead.resource); - if( *aRead.dtype == DT::VEC_FLOAT || *aRead.dtype == DT::FLOAT ) - setGridSpacing(a.get< std::vector< float > >()); - else if( *aRead.dtype == DT::VEC_DOUBLE || *aRead.dtype == DT::DOUBLE ) - setGridSpacing(a.get< std::vector< double > >()); - else if( *aRead.dtype == DT::VEC_LONG_DOUBLE || *aRead.dtype == DT::LONG_DOUBLE ) - setGridSpacing(a.get< std::vector< long double > >()); + if (*aRead.dtype == DT::VEC_FLOAT || *aRead.dtype == DT::FLOAT) + setGridSpacing(a.get>()); + else if (*aRead.dtype == DT::VEC_DOUBLE || *aRead.dtype == DT::DOUBLE) + setGridSpacing(a.get>()); + else if ( + *aRead.dtype == DT::VEC_LONG_DOUBLE || *aRead.dtype == DT::LONG_DOUBLE) + setGridSpacing(a.get>()); else - throw std::runtime_error("Unexpected Attribute datatype for 'gridSpacing'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'gridSpacing'"); aRead.name = "gridGlobalOffset"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::VEC_DOUBLE || *aRead.dtype == DT::DOUBLE ) - setGridGlobalOffset(Attribute(*aRead.resource).get< std::vector< double > >()); + if (*aRead.dtype == DT::VEC_DOUBLE || *aRead.dtype == DT::DOUBLE) + setGridGlobalOffset( + Attribute(*aRead.resource).get>()); else - throw std::runtime_error("Unexpected Attribute datatype for 'gridGlobalOffset'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'gridGlobalOffset'"); aRead.name = "gridUnitSI"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::DOUBLE ) - setGridUnitSI(Attribute(*aRead.resource).get< double >()); + if (*aRead.dtype == DT::DOUBLE) + setGridUnitSI(Attribute(*aRead.resource).get()); else - throw std::runtime_error("Unexpected Attribute datatype for 'gridUnitSI'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'gridUnitSI'"); - if( scalar() ) + if (scalar()) { /* using operator[] will incorrectly update parent */ map.at(MeshRecordComponent::SCALAR).read(); - } else + } + else { - Parameter< Operation::LIST_PATHS > pList; + Parameter pList; IOHandler()->enqueue(IOTask(this, pList)); IOHandler()->flush(); - Parameter< Operation::OPEN_PATH > pOpen; - for( auto const& component : *pList.paths ) + Parameter pOpen; + for (auto const &component : *pList.paths) { - MeshRecordComponent& rc = map[ component ]; + MeshRecordComponent &rc = map[component]; pOpen.path = component; IOHandler()->enqueue(IOTask(&rc, pOpen)); rc.get().m_isConstant = true; rc.read(); } - Parameter< Operation::LIST_DATASETS > dList; + Parameter dList; IOHandler()->enqueue(IOTask(this, dList)); IOHandler()->flush(); - Parameter< Operation::OPEN_DATASET > dOpen; - for( auto const& component : *dList.datasets ) + Parameter dOpen; + for (auto const &component : *dList.datasets) { - MeshRecordComponent & rc = map[ component ]; + MeshRecordComponent &rc = map[component]; dOpen.name = component; IOHandler()->enqueue(IOTask(&rc, dOpen)); IOHandler()->flush(); @@ -392,45 +396,45 @@ Mesh::read() readBase(); - readAttributes( ReadMode::FullyReread ); + readAttributes(ReadMode::FullyReread); } -} // openPMD +} // namespace openPMD -std::ostream& -openPMD::operator<<(std::ostream& os, openPMD::Mesh::Geometry const& go) +std::ostream & +openPMD::operator<<(std::ostream &os, openPMD::Mesh::Geometry const &go) { - switch( go ) + switch (go) { - case openPMD::Mesh::Geometry::cartesian: - os<<"cartesian"; - break; - case openPMD::Mesh::Geometry::thetaMode: - os<<"thetaMode"; - break; - case openPMD::Mesh::Geometry::cylindrical: - os<<"cylindrical"; - break; - case openPMD::Mesh::Geometry::spherical: - os<<"spherical"; - break; - case openPMD::Mesh::Geometry::other: - os<<"other"; - break; + case openPMD::Mesh::Geometry::cartesian: + os << "cartesian"; + break; + case openPMD::Mesh::Geometry::thetaMode: + os << "thetaMode"; + break; + case openPMD::Mesh::Geometry::cylindrical: + os << "cylindrical"; + break; + case openPMD::Mesh::Geometry::spherical: + os << "spherical"; + break; + case openPMD::Mesh::Geometry::other: + os << "other"; + break; } return os; } -std::ostream& -openPMD::operator<<(std::ostream& os, openPMD::Mesh::DataOrder const& dor) +std::ostream & +openPMD::operator<<(std::ostream &os, openPMD::Mesh::DataOrder const &dor) { - switch( dor ) + switch (dor) { - case openPMD::Mesh::DataOrder::C: - os<<'C'; - break; - case openPMD::Mesh::DataOrder::F: - os<<'F'; - break; + case openPMD::Mesh::DataOrder::C: + os << 'C'; + break; + case openPMD::Mesh::DataOrder::F: + os << 'F'; + break; } return os; } diff --git a/src/ParticlePatches.cpp b/src/ParticlePatches.cpp index 0319f408df..952aff37ac 100644 --- a/src/ParticlePatches.cpp +++ b/src/ParticlePatches.cpp @@ -20,55 +20,56 @@ */ #include "openPMD/ParticlePatches.hpp" - namespace openPMD { -size_t -ParticlePatches::numPatches() const +size_t ParticlePatches::numPatches() const { - if( this->empty() ) + if (this->empty()) return 0; return this->at("numParticles").at(RecordComponent::SCALAR).getExtent()[0]; } -void -ParticlePatches::read() +void ParticlePatches::read() { - Parameter< Operation::LIST_PATHS > pList; + Parameter pList; IOHandler()->enqueue(IOTask(this, pList)); IOHandler()->flush(); - Parameter< Operation::OPEN_PATH > pOpen; - for( auto const& record_name : *pList.paths ) + Parameter pOpen; + for (auto const &record_name : *pList.paths) { - PatchRecord& pr = (*this)[record_name]; + PatchRecord &pr = (*this)[record_name]; pOpen.path = record_name; IOHandler()->enqueue(IOTask(&pr, pOpen)); pr.read(); } - Parameter< Operation::LIST_DATASETS > dList; + Parameter dList; IOHandler()->enqueue(IOTask(this, dList)); IOHandler()->flush(); - Parameter< Operation::OPEN_DATASET > dOpen; - for( auto const& component_name : *dList.datasets ) + Parameter dOpen; + for (auto const &component_name : *dList.datasets) { - if( !("numParticles" == component_name || "numParticlesOffset" == component_name) ) - throw std::runtime_error("Unexpected record component" + component_name + "in particlePatch"); + if (!("numParticles" == component_name || + "numParticlesOffset" == component_name)) + throw std::runtime_error( + "Unexpected record component" + component_name + + "in particlePatch"); - PatchRecord& pr = Container< PatchRecord >::operator[](component_name); - PatchRecordComponent& prc = pr[RecordComponent::SCALAR]; + PatchRecord &pr = Container::operator[](component_name); + PatchRecordComponent &prc = pr[RecordComponent::SCALAR]; prc.parent() = pr.parent(); dOpen.name = component_name; IOHandler()->enqueue(IOTask(&pr, dOpen)); IOHandler()->enqueue(IOTask(&prc, dOpen)); IOHandler()->flush(); - if( determineDatatype< uint64_t >() != *dOpen.dtype ) - throw std::runtime_error("Unexpected datatype for " + component_name); + if (determineDatatype() != *dOpen.dtype) + throw std::runtime_error( + "Unexpected datatype for " + component_name); /* allow all attributes to be set */ prc.written() = false; @@ -79,4 +80,4 @@ ParticlePatches::read() prc.read(); } } -} // openPMD +} // namespace openPMD diff --git a/src/ParticleSpecies.cpp b/src/ParticleSpecies.cpp index 0e71845891..8b00413e80 100644 --- a/src/ParticleSpecies.cpp +++ b/src/ParticleSpecies.cpp @@ -26,38 +26,37 @@ #include #include - namespace openPMD { ParticleSpecies::ParticleSpecies() { - particlePatches.writable().ownKeyWithinParent = { "particlePatches" }; + particlePatches.writable().ownKeyWithinParent = {"particlePatches"}; } -void -ParticleSpecies::read() +void ParticleSpecies::read() { /* obtain all non-scalar records */ - Parameter< Operation::LIST_PATHS > pList; + Parameter pList; IOHandler()->enqueue(IOTask(this, pList)); IOHandler()->flush(); - internal::EraseStaleEntries< ParticleSpecies & > map{ *this }; + internal::EraseStaleEntries map{*this}; - Parameter< Operation::OPEN_PATH > pOpen; - Parameter< Operation::LIST_ATTS > aList; + Parameter pOpen; + Parameter aList; bool hasParticlePatches = false; - for( auto const& record_name : *pList.paths ) + for (auto const &record_name : *pList.paths) { - if( record_name == "particlePatches" ) + if (record_name == "particlePatches") { hasParticlePatches = true; pOpen.path = "particlePatches"; IOHandler()->enqueue(IOTask(&particlePatches, pOpen)); particlePatches.read(); - } else + } + else { - Record& r = map[record_name]; + Record &r = map[record_name]; pOpen.path = record_name; aList.attributes->clear(); IOHandler()->enqueue(IOTask(&r, pOpen)); @@ -68,10 +67,10 @@ ParticleSpecies::read() auto att_end = aList.attributes->end(); auto value = std::find(att_begin, att_end, "value"); auto shape = std::find(att_begin, att_end, "shape"); - if( value != att_end && shape != att_end ) + if (value != att_end && shape != att_end) { - internal::EraseStaleEntries< Record & > scalarMap( r ); - RecordComponent& rc = scalarMap[RecordComponent::SCALAR]; + internal::EraseStaleEntries scalarMap(r); + RecordComponent &rc = scalarMap[RecordComponent::SCALAR]; rc.parent() = r.parent(); IOHandler()->enqueue(IOTask(&rc, pOpen)); IOHandler()->flush(); @@ -81,28 +80,29 @@ ParticleSpecies::read() } } - if( !hasParticlePatches ) + if (!hasParticlePatches) { - auto & container = particlePatches.container(); - container.erase( "numParticles" ); - container.erase( "numParticlesOffset" ); + auto &container = particlePatches.container(); + container.erase("numParticles"); + container.erase("numParticlesOffset"); } /* obtain all scalar records */ - Parameter< Operation::LIST_DATASETS > dList; + Parameter dList; IOHandler()->enqueue(IOTask(this, dList)); IOHandler()->flush(); - Parameter< Operation::OPEN_DATASET > dOpen; - for( auto const& record_name : *dList.datasets ) + Parameter dOpen; + for (auto const &record_name : *dList.datasets) { - try { - Record& r = map[record_name]; + try + { + Record &r = map[record_name]; dOpen.name = record_name; IOHandler()->enqueue(IOTask(&r, dOpen)); IOHandler()->flush(); - internal::EraseStaleEntries< Record & > scalarMap( r ); - RecordComponent& rc = scalarMap[RecordComponent::SCALAR]; + internal::EraseStaleEntries scalarMap(r); + RecordComponent &rc = scalarMap[RecordComponent::SCALAR]; rc.parent() = r.parent(); IOHandler()->enqueue(IOTask(&rc, dOpen)); IOHandler()->flush(); @@ -110,84 +110,84 @@ ParticleSpecies::read() rc.resetDataset(Dataset(*dOpen.dtype, *dOpen.extent)); rc.written() = true; r.read(); - } catch( std::runtime_error const & ) + } + catch (std::runtime_error const &) { std::cerr << "WARNING: Skipping invalid openPMD record '" - << record_name << "'" - << std::endl; - while( ! IOHandler()->m_work.empty() ) + << record_name << "'" << std::endl; + while (!IOHandler()->m_work.empty()) IOHandler()->m_work.pop(); - map.forget( record_name ); + map.forget(record_name); //(*this)[record_name].erase(RecordComponent::SCALAR); - //this->erase(record_name); + // this->erase(record_name); } } - readAttributes( ReadMode::FullyReread ); + readAttributes(ReadMode::FullyReread); } namespace { - bool flushParticlePatches( ParticlePatches const & particlePatches ) + bool flushParticlePatches(ParticlePatches const &particlePatches) { - return particlePatches.find("numParticles") != particlePatches.end() - && particlePatches.find("numParticlesOffset") != particlePatches.end() - && particlePatches.size() >= 3; + return particlePatches.find("numParticles") != particlePatches.end() && + particlePatches.find("numParticlesOffset") != + particlePatches.end() && + particlePatches.size() >= 3; } -} +} // namespace -void -ParticleSpecies::flush(std::string const& path) +void ParticleSpecies::flush(std::string const &path) { - if(IOHandler()->m_frontendAccess == Access::READ_ONLY ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) { - for( auto& record : *this ) + for (auto &record : *this) record.second.flush(record.first); - for( auto& patch : particlePatches ) + for (auto &patch : particlePatches) patch.second.flush(patch.first); - } else + } + else { auto it = find("position"); - if ( it != end() ) + if (it != end()) it->second.setUnitDimension({{UnitDimension::L, 1}}); it = find("positionOffset"); - if ( it != end() ) + if (it != end()) it->second.setUnitDimension({{UnitDimension::L, 1}}); - Container< Record >::flush(path); + Container::flush(path); - for( auto& record : *this ) + for (auto &record : *this) record.second.flush(record.first); - if( flushParticlePatches( particlePatches ) ) + if (flushParticlePatches(particlePatches)) { particlePatches.flush("particlePatches"); - for( auto& patch : particlePatches ) + for (auto &patch : particlePatches) patch.second.flush(patch.first); } } } -bool -ParticleSpecies::dirtyRecursive() const +bool ParticleSpecies::dirtyRecursive() const { - if( dirty() ) + if (dirty()) { return true; } - for( auto const & pair : *this ) + for (auto const &pair : *this) { - if( pair.second.dirtyRecursive() ) + if (pair.second.dirtyRecursive()) { return true; } } - if( flushParticlePatches( particlePatches ) ) + if (flushParticlePatches(particlePatches)) { - for( auto const & pair : particlePatches ) + for (auto const &pair : particlePatches) { - if( pair.second.dirtyRecursive() ) + if (pair.second.dirtyRecursive()) { return true; } diff --git a/src/ReadIterations.cpp b/src/ReadIterations.cpp index 92d520a92b..bd70a91366 100644 --- a/src/ReadIterations.cpp +++ b/src/ReadIterations.cpp @@ -27,36 +27,33 @@ namespace openPMD { SeriesIterator::SeriesIterator() : m_series() -{ -} +{} -SeriesIterator::SeriesIterator( Series series ) - : m_series( std::move( series ) ) +SeriesIterator::SeriesIterator(Series series) : m_series(std::move(series)) { auto it = series.get().iterations.begin(); - if( it == series.get().iterations.end() ) + if (it == series.get().iterations.end()) { *this = end(); return; } else { - auto openIteration = [ &it ]() - { + auto openIteration = [&it]() { /* * @todo * Is that really clean? * Use case: See Python ApiTest testListSeries: * Call listSeries twice. */ - if( it->second.get().m_closed != - internal::CloseStatus::ClosedInBackend ) + if (it->second.get().m_closed != + internal::CloseStatus::ClosedInBackend) { it->second.open(); } }; AdvanceStatus status{}; - switch( series.iterationEncoding() ) + switch (series.iterationEncoding()) { case IterationEncoding::fileBased: /* @@ -79,31 +76,31 @@ SeriesIterator::SeriesIterator( Series series ) openIteration(); break; } - if( status == AdvanceStatus::OVER ) + if (status == AdvanceStatus::OVER) { *this = end(); return; } - it->second.setStepStatus( StepStatus::DuringStep ); + it->second.setStepStatus(StepStatus::DuringStep); } m_currentIteration = it->first; } -SeriesIterator & SeriesIterator::operator++() +SeriesIterator &SeriesIterator::operator++() { - if( !m_series.has_value() ) + if (!m_series.has_value()) { *this = end(); return *this; } - Series & series = m_series.value(); - auto & iterations = series.iterations; - auto & currentIteration = iterations[ m_currentIteration ]; - if( !currentIteration.closed() ) + Series &series = m_series.value(); + auto &iterations = series.iterations; + auto ¤tIteration = iterations[m_currentIteration]; + if (!currentIteration.closed()) { currentIteration.close(); } - switch( series.iterationEncoding() ) + switch (series.iterationEncoding()) { using IE = IterationEncoding; case IE::groupBased: @@ -111,47 +108,47 @@ SeriesIterator & SeriesIterator::operator++() // since we are in group-based iteration layout, it does not // matter which iteration we begin a step upon AdvanceStatus status = currentIteration.beginStep(); - if( status == AdvanceStatus::OVER ) + if (status == AdvanceStatus::OVER) { *this = end(); return *this; } - currentIteration.setStepStatus( StepStatus::DuringStep ); + currentIteration.setStepStatus(StepStatus::DuringStep); break; } default: break; } - auto it = iterations.find( m_currentIteration ); + auto it = iterations.find(m_currentIteration); auto itEnd = iterations.end(); - if( it == itEnd ) + if (it == itEnd) { *this = end(); return *this; } ++it; - if( it == itEnd ) + if (it == itEnd) { *this = end(); return *this; } m_currentIteration = it->first; - if( it->second.get().m_closed != internal::CloseStatus::ClosedInBackend ) + if (it->second.get().m_closed != internal::CloseStatus::ClosedInBackend) { it->second.open(); } - switch( series.iterationEncoding() ) + switch (series.iterationEncoding()) { using IE = IterationEncoding; case IE::fileBased: { - auto & iteration = series.iterations[ m_currentIteration ]; + auto &iteration = series.iterations[m_currentIteration]; AdvanceStatus status = iteration.beginStep(); - if( status == AdvanceStatus::OVER ) + if (status == AdvanceStatus::OVER) { *this = end(); return *this; } - iteration.setStepStatus( StepStatus::DuringStep ); + iteration.setStepStatus(StepStatus::DuringStep); break; } default: @@ -163,18 +160,18 @@ SeriesIterator & SeriesIterator::operator++() IndexedIteration SeriesIterator::operator*() { return IndexedIteration( - m_series.value().iterations[ m_currentIteration ], m_currentIteration ); + m_series.value().iterations[m_currentIteration], m_currentIteration); } -bool SeriesIterator::operator==( SeriesIterator const & other ) const +bool SeriesIterator::operator==(SeriesIterator const &other) const { return this->m_currentIteration == other.m_currentIteration && this->m_series.has_value() == other.m_series.has_value(); } -bool SeriesIterator::operator!=( SeriesIterator const & other ) const +bool SeriesIterator::operator!=(SeriesIterator const &other) const { - return !operator==( other ); + return !operator==(other); } SeriesIterator SeriesIterator::end() @@ -182,14 +179,12 @@ SeriesIterator SeriesIterator::end() return SeriesIterator{}; } -ReadIterations::ReadIterations( Series series ) - : m_series( std::move( series ) ) -{ -} +ReadIterations::ReadIterations(Series series) : m_series(std::move(series)) +{} ReadIterations::iterator_t ReadIterations::begin() { - return iterator_t{ m_series }; + return iterator_t{m_series}; } ReadIterations::iterator_t ReadIterations::end() diff --git a/src/Record.cpp b/src/Record.cpp index abe3013ba1..ce18bdccc5 100644 --- a/src/Record.cpp +++ b/src/Record.cpp @@ -24,7 +24,6 @@ #include - namespace openPMD { Record::Record() @@ -32,51 +31,52 @@ Record::Record() setTimeOffset(0.f); } -Record& -Record::setUnitDimension(std::map< UnitDimension, double > const& udim) +Record &Record::setUnitDimension(std::map const &udim) { - if( !udim.empty() ) + if (!udim.empty()) { - std::array< double, 7 > tmpUnitDimension = this->unitDimension(); - for( auto const& entry : udim ) + std::array tmpUnitDimension = this->unitDimension(); + for (auto const &entry : udim) tmpUnitDimension[static_cast(entry.first)] = entry.second; setAttribute("unitDimension", tmpUnitDimension); } return *this; } -void -Record::flush_impl(std::string const& name) +void Record::flush_impl(std::string const &name) { - if(IOHandler()->m_frontendAccess == Access::READ_ONLY ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) { - for( auto& comp : *this ) + for (auto &comp : *this) comp.second.flush(comp.first); - } else + } + else { - if( !written() ) + if (!written()) { - if( scalar() ) + if (scalar()) { - RecordComponent& rc = at(RecordComponent::SCALAR); + RecordComponent &rc = at(RecordComponent::SCALAR); rc.parent() = parent(); rc.flush(name); IOHandler()->flush(); - writable().abstractFilePosition = rc.writable().abstractFilePosition; + writable().abstractFilePosition = + rc.writable().abstractFilePosition; written() = true; - } else + } + else { - Parameter< Operation::CREATE_PATH > pCreate; + Parameter pCreate; pCreate.path = name; IOHandler()->enqueue(IOTask(this, pCreate)); - for( auto& comp : *this ) + for (auto &comp : *this) comp.second.parent() = getWritable(this); } } - if( scalar() ) + if (scalar()) { - for( auto& comp : *this ) + for (auto &comp : *this) { comp.second.flush(name); writable().abstractFilePosition = @@ -85,7 +85,7 @@ Record::flush_impl(std::string const& name) } else { - for( auto& comp : *this ) + for (auto &comp : *this) comp.second.flush(comp.first); } @@ -93,37 +93,37 @@ Record::flush_impl(std::string const& name) } } -void -Record::read() +void Record::read() { - if( scalar() ) + if (scalar()) { /* using operator[] will incorrectly update parent */ this->at(RecordComponent::SCALAR).read(); - } else + } + else { - Parameter< Operation::LIST_PATHS > pList; + Parameter pList; IOHandler()->enqueue(IOTask(this, pList)); IOHandler()->flush(); - Parameter< Operation::OPEN_PATH > pOpen; - for( auto const& component : *pList.paths ) + Parameter pOpen; + for (auto const &component : *pList.paths) { - RecordComponent& rc = (*this)[component]; + RecordComponent &rc = (*this)[component]; pOpen.path = component; IOHandler()->enqueue(IOTask(&rc, pOpen)); rc.get().m_isConstant = true; rc.read(); } - Parameter< Operation::LIST_DATASETS > dList; + Parameter dList; IOHandler()->enqueue(IOTask(this, dList)); IOHandler()->flush(); - Parameter< Operation::OPEN_DATASET > dOpen; - for( auto const& component : *dList.datasets ) + Parameter dOpen; + for (auto const &component : *dList.datasets) { - RecordComponent & rc = ( *this )[ component ]; + RecordComponent &rc = (*this)[component]; dOpen.name = component; IOHandler()->enqueue(IOTask(&rc, dOpen)); IOHandler()->flush(); @@ -136,10 +136,10 @@ Record::read() readBase(); - readAttributes( ReadMode::FullyReread ); + readAttributes(ReadMode::FullyReread); } template <> -BaseRecord::mapped_type& -BaseRecord::operator[](std::string&& key); -} // openPMD +BaseRecord::mapped_type & +BaseRecord::operator[](std::string &&key); +} // namespace openPMD diff --git a/src/RecordComponent.cpp b/src/RecordComponent.cpp index f49c7587c5..1250559abe 100644 --- a/src/RecordComponent.cpp +++ b/src/RecordComponent.cpp @@ -18,21 +18,20 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include "openPMD/auxiliary/Memory.hpp" #include "openPMD/RecordComponent.hpp" #include "openPMD/Dataset.hpp" #include "openPMD/DatatypeHelpers.hpp" #include "openPMD/Error.hpp" -#include "openPMD/Series.hpp" #include "openPMD/IO/Format.hpp" +#include "openPMD/Series.hpp" +#include "openPMD/auxiliary/Memory.hpp" #include +#include +#include #include #include #include -#include -#include - namespace openPMD { @@ -40,203 +39,193 @@ namespace internal { RecordComponentData::RecordComponentData() { - RecordComponent impl{ std::shared_ptr< RecordComponentData >{ - this, []( auto const * ) {} } }; + RecordComponent impl{ + std::shared_ptr{this, [](auto const *) {}}}; impl.setUnitSI(1); impl.resetDataset(Dataset(Datatype::CHAR, {1})); } -} +} // namespace internal -RecordComponent::RecordComponent() : BaseRecordComponent{ nullptr } +RecordComponent::RecordComponent() : BaseRecordComponent{nullptr} { - BaseRecordComponent::setData( m_recordComponentData ); + BaseRecordComponent::setData(m_recordComponentData); } RecordComponent::RecordComponent( - std::shared_ptr< internal::RecordComponentData > data ) - : BaseRecordComponent{ data } - , m_recordComponentData{ std::move( data ) } -{ -} + std::shared_ptr data) + : BaseRecordComponent{data}, m_recordComponentData{std::move(data)} +{} // We need to instantiate this somewhere otherwise there might be linker issues // despite this thing actually being constepxr -constexpr char const * const RecordComponent::SCALAR; +constexpr char const *const RecordComponent::SCALAR; -RecordComponent& -RecordComponent::setUnitSI(double usi) +RecordComponent &RecordComponent::setUnitSI(double usi) { setAttribute("unitSI", usi); return *this; } -RecordComponent & -RecordComponent::resetDataset( Dataset d ) +RecordComponent &RecordComponent::resetDataset(Dataset d) { - auto & rc = get(); - if( written() ) + auto &rc = get(); + if (written()) { - if( d.dtype == Datatype::UNDEFINED ) + if (d.dtype == Datatype::UNDEFINED) { d.dtype = rc.m_dataset.dtype; } - else if( d.dtype != rc.m_dataset.dtype ) + else if (d.dtype != rc.m_dataset.dtype) { throw std::runtime_error( - "Cannot change the datatype of a dataset." ); + "Cannot change the datatype of a dataset."); } rc.m_hasBeenExtended = true; } - if( d.dtype == Datatype::UNDEFINED ) + if (d.dtype == Datatype::UNDEFINED) { throw error::WrongAPIUsage( - "[RecordComponent] Must set specific datatype." ); + "[RecordComponent] Must set specific datatype."); } // if( d.extent.empty() ) // throw std::runtime_error("Dataset extent must be at least 1D."); - if( std::any_of( - d.extent.begin(), - d.extent.end(), - []( Extent::value_type const & i ) { return i == 0u; } ) ) - return makeEmpty( std::move( d ) ); + if (std::any_of( + d.extent.begin(), d.extent.end(), [](Extent::value_type const &i) { + return i == 0u; + })) + return makeEmpty(std::move(d)); rc.m_isEmpty = false; - if( written() ) + if (written()) { - rc.m_dataset.extend( std::move( d.extent ) ); + rc.m_dataset.extend(std::move(d.extent)); } else { - rc.m_dataset = std::move( d ); + rc.m_dataset = std::move(d); } dirty() = true; return *this; } -uint8_t -RecordComponent::getDimensionality() const +uint8_t RecordComponent::getDimensionality() const { return get().m_dataset.rank; } -Extent -RecordComponent::getExtent() const +Extent RecordComponent::getExtent() const { return get().m_dataset.extent; } namespace detail { -struct MakeEmpty -{ - template< typename T > - static RecordComponent& call( - RecordComponent & rc, uint8_t dimensions ) + struct MakeEmpty { - return rc.makeEmpty< T >( dimensions ); - } + template + static RecordComponent &call(RecordComponent &rc, uint8_t dimensions) + { + return rc.makeEmpty(dimensions); + } - template< unsigned int N > - static RecordComponent& call( RecordComponent &, uint8_t ) - { - throw std::runtime_error( - "RecordComponent::makeEmpty: Unknown datatype." ); - } -}; -} + template + static RecordComponent &call(RecordComponent &, uint8_t) + { + throw std::runtime_error( + "RecordComponent::makeEmpty: Unknown datatype."); + } + }; +} // namespace detail -RecordComponent& -RecordComponent::makeEmpty( Datatype dt, uint8_t dimensions ) +RecordComponent &RecordComponent::makeEmpty(Datatype dt, uint8_t dimensions) { - return switchType< detail::MakeEmpty >( dt, *this, dimensions ); + return switchType(dt, *this, dimensions); } -RecordComponent& -RecordComponent::makeEmpty( Dataset d ) +RecordComponent &RecordComponent::makeEmpty(Dataset d) { - auto & rc = get(); - if( written() ) + auto &rc = get(); + if (written()) { - if( !constant() ) + if (!constant()) { throw std::runtime_error( "An empty record component's extent can only be changed" " in case it has been initialized as an empty or constant" - " record component." ); + " record component."); } - if( d.dtype == Datatype::UNDEFINED ) + if (d.dtype == Datatype::UNDEFINED) { d.dtype = rc.m_dataset.dtype; } - else if( d.dtype != rc.m_dataset.dtype ) + else if (d.dtype != rc.m_dataset.dtype) { throw std::runtime_error( - "Cannot change the datatype of a dataset." ); + "Cannot change the datatype of a dataset."); } - rc.m_dataset.extend( std::move( d.extent ) ); + rc.m_dataset.extend(std::move(d.extent)); rc.m_hasBeenExtended = true; } else { - rc.m_dataset = std::move( d ); + rc.m_dataset = std::move(d); } - if( rc.m_dataset.extent.size() == 0 ) - throw std::runtime_error( "Dataset extent must be at least 1D." ); + if (rc.m_dataset.extent.size() == 0) + throw std::runtime_error("Dataset extent must be at least 1D."); rc.m_isEmpty = true; dirty() = true; - if( !written() ) + if (!written()) { - switchType< detail::DefaultValue< RecordComponent > >( - rc.m_dataset.dtype, *this ); + switchType>( + rc.m_dataset.dtype, *this); } return *this; } -bool -RecordComponent::empty() const +bool RecordComponent::empty() const { return get().m_isEmpty; } -void -RecordComponent::flush(std::string const& name) +void RecordComponent::flush(std::string const &name) { - auto & rc = get(); - if( IOHandler()->m_flushLevel == FlushLevel::SkeletonOnly ) + auto &rc = get(); + if (IOHandler()->m_flushLevel == FlushLevel::SkeletonOnly) { rc.m_name = name; return; } - if(IOHandler()->m_frontendAccess == Access::READ_ONLY ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) { - while( !rc.m_chunks.empty() ) + while (!rc.m_chunks.empty()) { IOHandler()->enqueue(rc.m_chunks.front()); rc.m_chunks.pop(); } - } else + } + else { /* * This catches when a user forgets to use resetDataset. */ - if( rc.m_dataset.dtype == Datatype::UNDEFINED ) + if (rc.m_dataset.dtype == Datatype::UNDEFINED) { throw error::WrongAPIUsage( "[RecordComponent] Must set specific datatype (Use " - "resetDataset call)." ); + "resetDataset call)."); } - if( !written() ) + if (!written()) { - if( constant() ) + if (constant()) { - Parameter< Operation::CREATE_PATH > pCreate; + Parameter pCreate; pCreate.path = name; IOHandler()->enqueue(IOTask(this, pCreate)); - Parameter< Operation::WRITE_ATT > aWrite; + Parameter aWrite; aWrite.name = "value"; aWrite.dtype = rc.m_constantValue.dtype; aWrite.resource = rc.m_constantValue.getResource(); @@ -246,9 +235,10 @@ RecordComponent::flush(std::string const& name) aWrite.dtype = a.dtype; aWrite.resource = a.getResource(); IOHandler()->enqueue(IOTask(this, aWrite)); - } else + } + else { - Parameter< Operation::CREATE_DATASET > dCreate; + Parameter dCreate; dCreate.name = name; dCreate.extent = getExtent(); dCreate.dtype = getDatatype(); @@ -257,27 +247,27 @@ RecordComponent::flush(std::string const& name) } } - if( rc.m_hasBeenExtended ) + if (rc.m_hasBeenExtended) { - if( constant() ) + if (constant()) { - Parameter< Operation::WRITE_ATT > aWrite; + Parameter aWrite; aWrite.name = "shape"; - Attribute a( getExtent() ); + Attribute a(getExtent()); aWrite.dtype = a.dtype; aWrite.resource = a.getResource(); - IOHandler()->enqueue( IOTask( this, aWrite ) ); + IOHandler()->enqueue(IOTask(this, aWrite)); } else { - Parameter< Operation::EXTEND_DATASET > pExtend; + Parameter pExtend; pExtend.extent = rc.m_dataset.extent; - IOHandler()->enqueue( IOTask( this, std::move( pExtend ) ) ); + IOHandler()->enqueue(IOTask(this, std::move(pExtend))); rc.m_hasBeenExtended = false; } } - while( !rc.m_chunks.empty() ) + while (!rc.m_chunks.empty()) { IOHandler()->enqueue(rc.m_chunks.front()); rc.m_chunks.pop(); @@ -287,20 +277,18 @@ RecordComponent::flush(std::string const& name) } } -void -RecordComponent::read() +void RecordComponent::read() { readBase(); } -void -RecordComponent::readBase() +void RecordComponent::readBase() { using DT = Datatype; - //auto & rc = get(); - Parameter< Operation::READ_ATT > aRead; + // auto & rc = get(); + Parameter aRead; - if( constant() && !empty() ) + if (constant() && !empty()) { aRead.name = "value"; IOHandler()->enqueue(IOTask(this, aRead)); @@ -309,61 +297,61 @@ RecordComponent::readBase() Attribute a(*aRead.resource); DT dtype = *aRead.dtype; written() = false; - switch( dtype ) + switch (dtype) { - case DT::LONG_DOUBLE: - makeConstant(a.get< long double >()); - break; - case DT::DOUBLE: - makeConstant(a.get< double >()); - break; - case DT::FLOAT: - makeConstant(a.get< float >()); - break; - case DT::CLONG_DOUBLE: - makeConstant(a.get< std::complex< long double > >()); - break; - case DT::CDOUBLE: - makeConstant(a.get< std::complex< double > >()); - break; - case DT::CFLOAT: - makeConstant(a.get< std::complex< float > >()); - break; - case DT::SHORT: - makeConstant(a.get< short >()); - break; - case DT::INT: - makeConstant(a.get< int >()); - break; - case DT::LONG: - makeConstant(a.get< long >()); - break; - case DT::LONGLONG: - makeConstant(a.get< long long >()); - break; - case DT::USHORT: - makeConstant(a.get< unsigned short >()); - break; - case DT::UINT: - makeConstant(a.get< unsigned int >()); - break; - case DT::ULONG: - makeConstant(a.get< unsigned long >()); - break; - case DT::ULONGLONG: - makeConstant(a.get< unsigned long long >()); - break; - case DT::CHAR: - makeConstant(a.get< char >()); - break; - case DT::UCHAR: - makeConstant(a.get< unsigned char >()); - break; - case DT::BOOL: - makeConstant(a.get< bool >()); - break; - default: - throw std::runtime_error("Unexpected constant datatype"); + case DT::LONG_DOUBLE: + makeConstant(a.get()); + break; + case DT::DOUBLE: + makeConstant(a.get()); + break; + case DT::FLOAT: + makeConstant(a.get()); + break; + case DT::CLONG_DOUBLE: + makeConstant(a.get>()); + break; + case DT::CDOUBLE: + makeConstant(a.get>()); + break; + case DT::CFLOAT: + makeConstant(a.get>()); + break; + case DT::SHORT: + makeConstant(a.get()); + break; + case DT::INT: + makeConstant(a.get()); + break; + case DT::LONG: + makeConstant(a.get()); + break; + case DT::LONGLONG: + makeConstant(a.get()); + break; + case DT::USHORT: + makeConstant(a.get()); + break; + case DT::UINT: + makeConstant(a.get()); + break; + case DT::ULONG: + makeConstant(a.get()); + break; + case DT::ULONGLONG: + makeConstant(a.get()); + break; + case DT::CHAR: + makeConstant(a.get()); + break; + case DT::UCHAR: + makeConstant(a.get()); + break; + case DT::BOOL: + makeConstant(a.get()); + break; + default: + throw std::runtime_error("Unexpected constant datatype"); } written() = true; @@ -375,17 +363,15 @@ RecordComponent::readBase() // uint64_t check Datatype const attrDtype = *aRead.dtype; - if( isSame( attrDtype, determineDatatype< std::vector< uint64_t > >() ) - || isSame( attrDtype, determineDatatype< uint64_t >() ) ) - for( auto const& val : a.get< std::vector< uint64_t > >() ) - e.push_back( val ); + if (isSame(attrDtype, determineDatatype>()) || + isSame(attrDtype, determineDatatype())) + for (auto const &val : a.get>()) + e.push_back(val); else { std::ostringstream oss; - oss << "Unexpected datatype (" - << *aRead.dtype - << ") for attribute 'shape' (" - << determineDatatype< uint64_t >() + oss << "Unexpected datatype (" << *aRead.dtype + << ") for attribute 'shape' (" << determineDatatype() << " aka uint64_t)"; throw std::runtime_error(oss.str()); } @@ -398,18 +384,17 @@ RecordComponent::readBase() aRead.name = "unitSI"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::DOUBLE ) - setUnitSI(Attribute(*aRead.resource).get< double >()); + if (*aRead.dtype == DT::DOUBLE) + setUnitSI(Attribute(*aRead.resource).get()); else throw std::runtime_error("Unexpected Attribute datatype for 'unitSI'"); - readAttributes( ReadMode::FullyReread ); + readAttributes(ReadMode::FullyReread); } -bool -RecordComponent::dirtyRecursive() const +bool RecordComponent::dirtyRecursive() const { - if( this->dirty() ) + if (this->dirty()) { return true; } diff --git a/src/Series.cpp b/src/Series.cpp index 8147792d78..2913eeb9b2 100644 --- a/src/Series.cpp +++ b/src/Series.cpp @@ -18,16 +18,16 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include "openPMD/auxiliary/Date.hpp" -#include "openPMD/auxiliary/Filesystem.hpp" -#include "openPMD/auxiliary/JSON_internal.hpp" -#include "openPMD/auxiliary/StringManip.hpp" +#include "openPMD/Series.hpp" +#include "openPMD/Error.hpp" #include "openPMD/IO/AbstractIOHandler.hpp" #include "openPMD/IO/AbstractIOHandlerHelper.hpp" #include "openPMD/IO/Format.hpp" -#include "openPMD/Error.hpp" #include "openPMD/ReadIterations.hpp" -#include "openPMD/Series.hpp" +#include "openPMD/auxiliary/Date.hpp" +#include "openPMD/auxiliary/Filesystem.hpp" +#include "openPMD/auxiliary/JSON_internal.hpp" +#include "openPMD/auxiliary/StringManip.hpp" #include "openPMD/version.hpp" #include @@ -40,14 +40,14 @@ #include #include - namespace openPMD { namespace { /** Remove the filename extension of a given storage format. * - * @param filename String containing the filename, possibly with filename extension. + * @param filename String containing the filename, possibly with + * filename extension. * @param f File format to remove filename extension for. * @return String containing the filename without filename extension. */ @@ -61,29 +61,34 @@ namespace uint64_t iteration{}; //! iteration found in regex pattern (default: 0) // support for std::tie - operator std::tuple< bool &, int &, uint64_t & >() + operator std::tuple() { - return std::tuple< bool &, int &, uint64_t & >{ - isContained, padding, iteration }; + return std::tuple{ + isContained, padding, iteration}; } }; - /** Create a functor to determine if a file can be of a format and matches an iterationEncoding, given the filename on disk. + /** Create a functor to determine if a file can be of a format and matches + * an iterationEncoding, given the filename on disk. * - * @param prefix String containing head (i.e. before %T) of desired filename without filename extension. - * @param padding Amount of padding allowed in iteration number %T. If zero, any amount of padding is matched. - * @param postfix String containing tail (i.e. after %T) of desired filename without filename extension. + * @param prefix String containing head (i.e. before %T) of desired + * filename without filename extension. + * @param padding Amount of padding allowed in iteration number %T. If + * zero, any amount of padding is matched. + * @param postfix String containing tail (i.e. after %T) of desired + * filename without filename extension. * @param f File format to check backend applicability for. * @return Functor returning tuple of bool and int. - * bool is True if file could be of type f and matches the iterationEncoding. False otherwise. - * int is the amount of padding present in the iteration number %T. Is 0 if bool is False. + * bool is True if file could be of type f and matches the + * iterationEncoding. False otherwise. int is the amount of padding present + * in the iteration number %T. Is 0 if bool is False. */ std::function matcher( std::string const &prefix, int padding, std::string const &postfix, Format f); -} // namespace [anonymous] +} // namespace struct Series::ParsedInput { @@ -94,66 +99,65 @@ struct Series::ParsedInput std::string filenamePrefix; std::string filenamePostfix; int filenamePadding = -1; -}; //ParsedInput +}; // ParsedInput -std::string -Series::openPMD() const +std::string Series::openPMD() const { - return getAttribute("openPMD").get< std::string >(); + return getAttribute("openPMD").get(); } -Series& -Series::setOpenPMD(std::string const& o) +Series &Series::setOpenPMD(std::string const &o) { setAttribute("openPMD", o); return *this; } -uint32_t -Series::openPMDextension() const +uint32_t Series::openPMDextension() const { - return getAttribute("openPMDextension").get< uint32_t >(); + return getAttribute("openPMDextension").get(); } -Series& -Series::setOpenPMDextension(uint32_t oe) +Series &Series::setOpenPMDextension(uint32_t oe) { setAttribute("openPMDextension", oe); return *this; } -std::string -Series::basePath() const +std::string Series::basePath() const { - return getAttribute("basePath").get< std::string >(); + return getAttribute("basePath").get(); } -Series& -Series::setBasePath(std::string const& bp) +Series &Series::setBasePath(std::string const &bp) { std::string version = openPMD(); - if( version == "1.0.0" || version == "1.0.1" || version == "1.1.0" ) - throw std::runtime_error("Custom basePath not allowed in openPMD <=1.1.0"); + if (version == "1.0.0" || version == "1.0.1" || version == "1.1.0") + throw std::runtime_error( + "Custom basePath not allowed in openPMD <=1.1.0"); setAttribute("basePath", bp); return *this; } -std::string -Series::meshesPath() const +std::string Series::meshesPath() const { - return getAttribute("meshesPath").get< std::string >(); + return getAttribute("meshesPath").get(); } -Series& -Series::setMeshesPath(std::string const& mp) +Series &Series::setMeshesPath(std::string const &mp) { - auto & series = get(); - if( std::any_of(series.iterations.begin(), series.iterations.end(), - [](Container< Iteration, uint64_t >::value_type const& i){ return i.second.meshes.written(); }) ) - throw std::runtime_error("A files meshesPath can not (yet) be changed after it has been written."); + auto &series = get(); + if (std::any_of( + series.iterations.begin(), + series.iterations.end(), + [](Container::value_type const &i) { + return i.second.meshes.written(); + })) + throw std::runtime_error( + "A files meshesPath can not (yet) be changed after it has been " + "written."); - if( auxiliary::ends_with(mp, '/') ) + if (auxiliary::ends_with(mp, '/')) setAttribute("meshesPath", mp); else setAttribute("meshesPath", mp + "/"); @@ -161,21 +165,25 @@ Series::setMeshesPath(std::string const& mp) return *this; } -std::string -Series::particlesPath() const +std::string Series::particlesPath() const { - return getAttribute("particlesPath").get< std::string >(); + return getAttribute("particlesPath").get(); } -Series& -Series::setParticlesPath(std::string const& pp) +Series &Series::setParticlesPath(std::string const &pp) { - auto & series = get(); - if( std::any_of(series.iterations.begin(), series.iterations.end(), - [](Container< Iteration, uint64_t >::value_type const& i){ return i.second.particles.written(); }) ) - throw std::runtime_error("A files particlesPath can not (yet) be changed after it has been written."); + auto &series = get(); + if (std::any_of( + series.iterations.begin(), + series.iterations.end(), + [](Container::value_type const &i) { + return i.second.particles.written(); + })) + throw std::runtime_error( + "A files particlesPath can not (yet) be changed after it has been " + "written."); - if( auxiliary::ends_with(pp, '/') ) + if (auxiliary::ends_with(pp, '/')) setAttribute("particlesPath", pp); else setAttribute("particlesPath", pp + "/"); @@ -183,179 +191,169 @@ Series::setParticlesPath(std::string const& pp) return *this; } -std::string -Series::author() const +std::string Series::author() const { - return getAttribute("author").get< std::string >(); + return getAttribute("author").get(); } -Series& -Series::setAuthor(std::string const& a) +Series &Series::setAuthor(std::string const &a) { setAttribute("author", a); return *this; } -std::string -Series::software() const +std::string Series::software() const { - return getAttribute("software").get< std::string >(); + return getAttribute("software").get(); } -Series& -Series::setSoftware( std::string const& newName, std::string const& newVersion ) +Series & +Series::setSoftware(std::string const &newName, std::string const &newVersion) { - setAttribute( "software", newName ); - setAttribute( "softwareVersion", newVersion ); + setAttribute("software", newName); + setAttribute("softwareVersion", newVersion); return *this; } -std::string -Series::softwareVersion() const +std::string Series::softwareVersion() const { - return getAttribute("softwareVersion").get< std::string >(); + return getAttribute("softwareVersion").get(); } -Series& -Series::setSoftwareVersion(std::string const& sv) +Series &Series::setSoftwareVersion(std::string const &sv) { setAttribute("softwareVersion", sv); return *this; } -std::string -Series::date() const +std::string Series::date() const { - return getAttribute("date").get< std::string >(); + return getAttribute("date").get(); } -Series& -Series::setDate(std::string const& d) +Series &Series::setDate(std::string const &d) { setAttribute("date", d); return *this; } -std::string -Series::softwareDependencies() const +std::string Series::softwareDependencies() const { - return getAttribute("softwareDependencies").get< std::string >(); + return getAttribute("softwareDependencies").get(); } -Series& +Series & Series::setSoftwareDependencies(std::string const &newSoftwareDependencies) { setAttribute("softwareDependencies", newSoftwareDependencies); return *this; } -std::string -Series::machine() const +std::string Series::machine() const { - return getAttribute("machine").get< std::string >(); + return getAttribute("machine").get(); } -Series& -Series::setMachine(std::string const &newMachine) +Series &Series::setMachine(std::string const &newMachine) { setAttribute("machine", newMachine); return *this; } -IterationEncoding -Series::iterationEncoding() const +IterationEncoding Series::iterationEncoding() const { return get().m_iterationEncoding; } -Series& -Series::setIterationEncoding(IterationEncoding ie) +Series &Series::setIterationEncoding(IterationEncoding ie) { - auto & series = get(); - if( written() ) - throw std::runtime_error("A files iterationEncoding can not (yet) be changed after it has been written."); + auto &series = get(); + if (written()) + throw std::runtime_error( + "A files iterationEncoding can not (yet) be changed after it has " + "been written."); series.m_iterationEncoding = ie; - switch( ie ) + switch (ie) { - case IterationEncoding::fileBased: - setIterationFormat( series.m_name ); - setAttribute( "iterationEncoding", std::string( "fileBased" ) ); - // This checks that the name contains the expansion pattern - // (e.g. %T) and parses it - if( series.m_filenamePadding < 0 ) + case IterationEncoding::fileBased: + setIterationFormat(series.m_name); + setAttribute("iterationEncoding", std::string("fileBased")); + // This checks that the name contains the expansion pattern + // (e.g. %T) and parses it + if (series.m_filenamePadding < 0) + { + if (!reparseExpansionPattern(series.m_name)) { - if( !reparseExpansionPattern( series.m_name ) ) - { - throw error::WrongAPIUsage( - "For fileBased formats the iteration expansion pattern " - "%T must " - "be included in the file name" ); - } + throw error::WrongAPIUsage( + "For fileBased formats the iteration expansion pattern " + "%T must " + "be included in the file name"); } - break; - case IterationEncoding::groupBased: - setIterationFormat(BASEPATH); - setAttribute("iterationEncoding", std::string("groupBased")); - break; - case IterationEncoding::variableBased: - setIterationFormat( - auxiliary::replace_first(basePath(), "/%T/", "")); - setAttribute("iterationEncoding", std::string("variableBased")); - break; + } + break; + case IterationEncoding::groupBased: + setIterationFormat(BASEPATH); + setAttribute("iterationEncoding", std::string("groupBased")); + break; + case IterationEncoding::variableBased: + setIterationFormat(auxiliary::replace_first(basePath(), "/%T/", "")); + setAttribute("iterationEncoding", std::string("variableBased")); + break; } return *this; } -std::string -Series::iterationFormat() const +std::string Series::iterationFormat() const { - return getAttribute("iterationFormat").get< std::string >(); + return getAttribute("iterationFormat").get(); } -Series& -Series::setIterationFormat(std::string const& i) +Series &Series::setIterationFormat(std::string const &i) { - if( written() ) - throw std::runtime_error("A files iterationFormat can not (yet) be changed after it has been written."); + if (written()) + throw std::runtime_error( + "A files iterationFormat can not (yet) be changed after it has " + "been written."); - if( iterationEncoding() == IterationEncoding::groupBased || - iterationEncoding() == IterationEncoding::variableBased ) - if( basePath() != i && (openPMD() == "1.0.1" || openPMD() == "1.0.0") ) - throw std::invalid_argument("iterationFormat must not differ from basePath " + basePath() + " for group- or variableBased data"); + if (iterationEncoding() == IterationEncoding::groupBased || + iterationEncoding() == IterationEncoding::variableBased) + if (basePath() != i && (openPMD() == "1.0.1" || openPMD() == "1.0.0")) + throw std::invalid_argument( + "iterationFormat must not differ from basePath " + basePath() + + " for group- or variableBased data"); setAttribute("iterationFormat", i); return *this; } -std::string -Series::name() const +std::string Series::name() const { return get().m_name; } -Series& -Series::setName(std::string const& n) +Series &Series::setName(std::string const &n) { - auto & series = get(); - if( written() ) - throw std::runtime_error("A files name can not (yet) be changed after it has been written."); + auto &series = get(); + if (written()) + throw std::runtime_error( + "A files name can not (yet) be changed after it has been written."); - if( series.m_iterationEncoding == IterationEncoding::fileBased ) + if (series.m_iterationEncoding == IterationEncoding::fileBased) { // If the filename specifies an expansion pattern, set it. // If not, check if one is already active. // Our filename parser expects an extension, so just add any and ignore // the result for that - if( hasExpansionPattern( n + ".json" ) ) + if (hasExpansionPattern(n + ".json")) { - reparseExpansionPattern( n + ".json" ); + reparseExpansionPattern(n + ".json"); } - else if( series.m_filenamePadding < 0 ) + else if (series.m_filenamePadding < 0) { throw error::WrongAPIUsage( "For fileBased formats the iteration expansion pattern %T must " - "be included in the file name" ); + "be included in the file name"); } else { @@ -368,44 +366,43 @@ Series::setName(std::string const& n) return *this; } -std::string -Series::backend() const +std::string Series::backend() const { return IOHandler()->backendName(); } -void -Series::flush() +void Series::flush() { - auto & series = get(); + auto &series = get(); flush_impl( series.iterations.begin(), series.iterations.end(), - FlushLevel::UserFlush ); + FlushLevel::UserFlush); } -std::unique_ptr< Series::ParsedInput > -Series::parseInput(std::string filepath) +std::unique_ptr Series::parseInput(std::string filepath) { - std::unique_ptr< Series::ParsedInput > input{new Series::ParsedInput}; + std::unique_ptr input{new Series::ParsedInput}; #ifdef _WIN32 - if( auxiliary::contains(filepath, '/') ) + if (auxiliary::contains(filepath, '/')) { - std::cerr << "Filepaths on WINDOWS platforms may not contain slashes '/'! " - << "Replacing with backslashes '\\' unconditionally!" << std::endl; + std::cerr + << "Filepaths on WINDOWS platforms may not contain slashes '/'! " + << "Replacing with backslashes '\\' unconditionally!" << std::endl; filepath = auxiliary::replace_all(filepath, "/", "\\"); } #else - if( auxiliary::contains(filepath, '\\') ) + if (auxiliary::contains(filepath, '\\')) { - std::cerr << "Filepaths on UNIX platforms may not include backslashes '\\'! " - << "Replacing with slashes '/' unconditionally!" << std::endl; + std::cerr + << "Filepaths on UNIX platforms may not include backslashes '\\'! " + << "Replacing with slashes '/' unconditionally!" << std::endl; filepath = auxiliary::replace_all(filepath, "\\", "/"); } #endif auto const pos = filepath.find_last_of(auxiliary::directory_separator); - if( std::string::npos == pos ) + if (std::string::npos == pos) { input->path = "."; input->path.append(1, auxiliary::directory_separator); @@ -422,47 +419,50 @@ Series::parseInput(std::string filepath) std::regex pattern("(.*)%(0[[:digit:]]+)?T(.*)"); std::smatch regexMatch; std::regex_match(input->name, regexMatch, pattern); - if( regexMatch.empty() ) + if (regexMatch.empty()) input->iterationEncoding = IterationEncoding::groupBased; - else if( regexMatch.size() == 4 ) + else if (regexMatch.size() == 4) { input->iterationEncoding = IterationEncoding::fileBased; input->filenamePrefix = regexMatch[1].str(); - std::string const& pad = regexMatch[2]; - if( pad.empty() ) + std::string const &pad = regexMatch[2]; + if (pad.empty()) input->filenamePadding = 0; else { - if( pad.front() != '0' ) - throw std::runtime_error("Invalid iterationEncoding " + input->name); + if (pad.front() != '0') + throw std::runtime_error( + "Invalid iterationEncoding " + input->name); input->filenamePadding = std::stoi(pad); } input->filenamePostfix = regexMatch[3].str(); - } else - throw std::runtime_error("Can not determine iterationFormat from filename " + input->name); + } + else + throw std::runtime_error( + "Can not determine iterationFormat from filename " + input->name); - input->filenamePostfix = cleanFilename(input->filenamePostfix, input->format); + input->filenamePostfix = + cleanFilename(input->filenamePostfix, input->format); input->name = cleanFilename(input->name, input->format); return input; } -bool Series::hasExpansionPattern( std::string filenameWithExtension ) +bool Series::hasExpansionPattern(std::string filenameWithExtension) { - auto input = parseInput( std::move( filenameWithExtension ) ); + auto input = parseInput(std::move(filenameWithExtension)); return input->iterationEncoding == IterationEncoding::fileBased; } -bool Series::reparseExpansionPattern( - std::string filenameWithExtension ) +bool Series::reparseExpansionPattern(std::string filenameWithExtension) { - auto input = parseInput( std::move( filenameWithExtension ) ); - if( input->iterationEncoding != IterationEncoding::fileBased ) + auto input = parseInput(std::move(filenameWithExtension)); + if (input->iterationEncoding != IterationEncoding::fileBased) { return false; } - auto & series = get(); + auto &series = get(); series.m_filenamePrefix = input->filenamePrefix; series.m_filenamePostfix = input->filenamePostfix; series.m_filenamePadding = input->filenamePadding; @@ -470,13 +470,13 @@ bool Series::reparseExpansionPattern( } void Series::init( - std::shared_ptr< AbstractIOHandler > ioHandler, - std::unique_ptr< Series::ParsedInput > input ) + std::shared_ptr ioHandler, + std::unique_ptr input) { - auto & series = get(); + auto &series = get(); writable().IOHandler = ioHandler; series.iterations.linkHierarchy(writable()); - series.iterations.writable().ownKeyWithinParent = { "iterations" }; + series.iterations.writable().ownKeyWithinParent = {"iterations"}; series.m_name = input->name; @@ -486,10 +486,10 @@ void Series::init( series.m_filenamePostfix = input->filenamePostfix; series.m_filenamePadding = input->filenamePadding; - if( series.m_iterationEncoding == IterationEncoding::fileBased && + if (series.m_iterationEncoding == IterationEncoding::fileBased && !series.m_filenamePrefix.empty() && - std::isdigit( static_cast< unsigned char >( - *series.m_filenamePrefix.rbegin() ) ) ) + std::isdigit( + static_cast(*series.m_filenamePrefix.rbegin()))) { std::cerr << R"END( [Warning] In file-based iteration encoding, it is strongly recommended to avoid @@ -501,50 +501,51 @@ Given file pattern: ')END" << series.m_name << "'" << std::endl; } - if(IOHandler()->m_frontendAccess == Access::READ_ONLY || IOHandler()->m_frontendAccess == Access::READ_WRITE ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY || + IOHandler()->m_frontendAccess == Access::READ_WRITE) { /* Allow creation of values in Containers and setting of Attributes * Would throw for Access::READ_ONLY */ auto oldType = IOHandler()->m_frontendAccess; - auto newType = const_cast< Access* >(&IOHandler()->m_frontendAccess); + auto newType = const_cast(&IOHandler()->m_frontendAccess); *newType = Access::READ_WRITE; - if( input->iterationEncoding == IterationEncoding::fileBased ) + if (input->iterationEncoding == IterationEncoding::fileBased) readFileBased(); else readGorVBased(); - if( series.iterations.empty() ) + if (series.iterations.empty()) { /* Access::READ_WRITE can be used to create a new Series * allow setting attributes in that case */ written() = false; - initDefaults( input->iterationEncoding ); + initDefaults(input->iterationEncoding); setIterationEncoding(input->iterationEncoding); written() = true; } *newType = oldType; - } else + } + else { - initDefaults( input->iterationEncoding ); + initDefaults(input->iterationEncoding); setIterationEncoding(input->iterationEncoding); } series.m_lastFlushSuccessful = true; } -void -Series::initDefaults( IterationEncoding ie ) +void Series::initDefaults(IterationEncoding ie) { - if( !containsAttribute("openPMD")) - setOpenPMD( getStandard() ); - if( !containsAttribute("openPMDextension")) + if (!containsAttribute("openPMD")) + setOpenPMD(getStandard()); + if (!containsAttribute("openPMDextension")) setOpenPMDextension(0); - if( !containsAttribute("basePath")) + if (!containsAttribute("basePath")) { - if( ie == IterationEncoding::variableBased ) + if (ie == IterationEncoding::variableBased) { setAttribute( "basePath", auxiliary::replace_first(BASEPATH, "/%T/", "")); @@ -554,36 +555,35 @@ Series::initDefaults( IterationEncoding ie ) setAttribute("basePath", std::string(BASEPATH)); } } - if( !containsAttribute("date")) - setDate( auxiliary::getDateString() ); - if( !containsAttribute("software")) - setSoftware( "openPMD-api", getVersion() ); + if (!containsAttribute("date")) + setDate(auxiliary::getDateString()); + if (!containsAttribute("software")) + setSoftware("openPMD-api", getVersion()); } -std::future< void > -Series::flush_impl( +std::future Series::flush_impl( iterations_iterator begin, iterations_iterator end, FlushLevel level, - bool flushIOHandler ) + bool flushIOHandler) { IOHandler()->m_flushLevel = level; - auto & series = get(); + auto &series = get(); series.m_lastFlushSuccessful = true; try { - switch( iterationEncoding() ) + switch (iterationEncoding()) { using IE = IterationEncoding; - case IE::fileBased: - flushFileBased( begin, end ); - break; - case IE::groupBased: - case IE::variableBased: - flushGorVBased( begin, end ); - break; + case IE::fileBased: + flushFileBased(begin, end); + break; + case IE::groupBased: + case IE::variableBased: + flushGorVBased(begin, end); + break; } - if( flushIOHandler ) + if (flushIOHandler) { auto res = IOHandler()->flush(); IOHandler()->m_flushLevel = FlushLevel::InternalFlush; @@ -595,7 +595,7 @@ Series::flush_impl( return {}; } } - catch( ... ) + catch (...) { IOHandler()->m_flushLevel = FlushLevel::InternalFlush; series.m_lastFlushSuccessful = false; @@ -603,19 +603,18 @@ Series::flush_impl( } } -void -Series::flushFileBased( iterations_iterator begin, iterations_iterator end ) +void Series::flushFileBased(iterations_iterator begin, iterations_iterator end) { - auto & series = get(); - if( end == begin ) + auto &series = get(); + if (end == begin) throw std::runtime_error( - "fileBased output can not be written with no iterations." ); + "fileBased output can not be written with no iterations."); - if( IOHandler()->m_frontendAccess == Access::READ_ONLY ) - for( auto it = begin; it != end; ++it ) + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) + for (auto it = begin; it != end; ++it) { // Phase 1 - switch( openIterationIfDirty( it->first, it->second ) ) + switch (openIterationIfDirty(it->first, it->second)) { using IO = IterationOpened; case IO::HasBeenOpened: @@ -626,13 +625,13 @@ Series::flushFileBased( iterations_iterator begin, iterations_iterator end ) } // Phase 2 - if( it->second.get().m_closed == - internal::CloseStatus::ClosedInFrontend ) + if (it->second.get().m_closed == + internal::CloseStatus::ClosedInFrontend) { - Parameter< Operation::CLOSE_FILE > fClose; - IOHandler()->enqueue( - IOTask( &it->second, std::move( fClose ) ) ); - it->second.get().m_closed = internal::CloseStatus::ClosedInBackend; + Parameter fClose; + IOHandler()->enqueue(IOTask(&it->second, std::move(fClose))); + it->second.get().m_closed = + internal::CloseStatus::ClosedInBackend; } // Phase 3 @@ -641,10 +640,10 @@ Series::flushFileBased( iterations_iterator begin, iterations_iterator end ) else { bool allDirty = dirty(); - for( auto it = begin; it != end; ++it ) + for (auto it = begin; it != end; ++it) { // Phase 1 - switch( openIterationIfDirty( it->first, it->second ) ) + switch (openIterationIfDirty(it->first, it->second)) { using IO = IterationOpened; case IO::HasBeenOpened: { @@ -656,11 +655,11 @@ Series::flushFileBased( iterations_iterator begin, iterations_iterator end ) series.iterations.written() = false; dirty() |= it->second.dirty(); - std::string filename = iterationFilename( it->first ); - it->second.flushFileBased( filename, it->first ); + std::string filename = iterationFilename(it->first); + it->second.flushFileBased(filename, it->first); series.iterations.flush( - auxiliary::replace_first( basePath(), "%T/", "" ) ); + auxiliary::replace_first(basePath(), "%T/", "")); flushAttributes(); break; @@ -670,35 +669,35 @@ Series::flushFileBased( iterations_iterator begin, iterations_iterator end ) } // Phase 2 - if( it->second.get().m_closed == - internal::CloseStatus::ClosedInFrontend ) + if (it->second.get().m_closed == + internal::CloseStatus::ClosedInFrontend) { - Parameter< Operation::CLOSE_FILE > fClose; - IOHandler()->enqueue( - IOTask( &it->second, std::move( fClose ) ) ); - it->second.get().m_closed = internal::CloseStatus::ClosedInBackend; + Parameter fClose; + IOHandler()->enqueue(IOTask(&it->second, std::move(fClose))); + it->second.get().m_closed = + internal::CloseStatus::ClosedInBackend; } // Phase 3 IOHandler()->flush(); /* reset the dirty bit for every iteration (i.e. file) - * otherwise only the first iteration will have updates attributes */ + * otherwise only the first iteration will have updates attributes + */ dirty() = allDirty; } dirty() = false; } } -void -Series::flushGorVBased( iterations_iterator begin, iterations_iterator end ) +void Series::flushGorVBased(iterations_iterator begin, iterations_iterator end) { - auto & series = get(); - if( IOHandler()->m_frontendAccess == Access::READ_ONLY ) - for( auto it = begin; it != end; ++it ) + auto &series = get(); + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) + for (auto it = begin; it != end; ++it) { // Phase 1 - switch( openIterationIfDirty( it->first, it->second ) ) + switch (openIterationIfDirty(it->first, it->second)) { using IO = IterationOpened; case IO::HasBeenOpened: @@ -709,11 +708,12 @@ Series::flushGorVBased( iterations_iterator begin, iterations_iterator end ) } // Phase 2 - if( it->second.get().m_closed == - internal::CloseStatus::ClosedInFrontend ) + if (it->second.get().m_closed == + internal::CloseStatus::ClosedInFrontend) { // the iteration has no dedicated file in group-based mode - it->second.get().m_closed = internal::CloseStatus::ClosedInBackend; + it->second.get().m_closed = + internal::CloseStatus::ClosedInBackend; } // Phase 3 @@ -721,40 +721,40 @@ Series::flushGorVBased( iterations_iterator begin, iterations_iterator end ) } else { - if( !written() ) + if (!written()) { - Parameter< Operation::CREATE_FILE > fCreate; + Parameter fCreate; fCreate.name = series.m_name; fCreate.encoding = iterationEncoding(); IOHandler()->enqueue(IOTask(this, fCreate)); } series.iterations.flush( - auxiliary::replace_first( basePath(), "%T/", "" ) ); + auxiliary::replace_first(basePath(), "%T/", "")); - for( auto it = begin; it != end; ++it ) + for (auto it = begin; it != end; ++it) { // Phase 1 - switch( openIterationIfDirty( it->first, it->second ) ) + switch (openIterationIfDirty(it->first, it->second)) { using IO = IterationOpened; case IO::HasBeenOpened: - if( !it->second.written() ) + if (!it->second.written()) { - it->second.parent() = getWritable( &series.iterations ); + it->second.parent() = getWritable(&series.iterations); } - switch( iterationEncoding() ) + switch (iterationEncoding()) { using IE = IterationEncoding; case IE::groupBased: - it->second.flushGroupBased( it->first ); + it->second.flushGroupBased(it->first); break; case IE::variableBased: - it->second.flushVariableBased( it->first ); + it->second.flushVariableBased(it->first); break; default: throw std::runtime_error( - "[Series] Internal control flow error" ); + "[Series] Internal control flow error"); } break; case IO::RemainsClosed: @@ -762,11 +762,12 @@ Series::flushGorVBased( iterations_iterator begin, iterations_iterator end ) } // Phase 2 - if( it->second.get().m_closed == - internal::CloseStatus::ClosedInFrontend ) + if (it->second.get().m_closed == + internal::CloseStatus::ClosedInFrontend) { // the iteration has no dedicated file in group-based mode - it->second.get().m_closed = internal::CloseStatus::ClosedInBackend; + it->second.get().m_closed = + internal::CloseStatus::ClosedInBackend; } } @@ -775,10 +776,9 @@ Series::flushGorVBased( iterations_iterator begin, iterations_iterator end ) } } -void -Series::flushMeshesPath() +void Series::flushMeshesPath() { - Parameter< Operation::WRITE_ATT > aWrite; + Parameter aWrite; aWrite.name = "meshesPath"; Attribute a = getAttribute("meshesPath"); aWrite.resource = a.getResource(); @@ -786,10 +786,9 @@ Series::flushMeshesPath() IOHandler()->enqueue(IOTask(this, aWrite)); } -void -Series::flushParticlesPath() +void Series::flushParticlesPath() { - Parameter< Operation::WRITE_ATT > aWrite; + Parameter aWrite; aWrite.name = "particlesPath"; Attribute a = getAttribute("particlesPath"); aWrite.resource = a.getResource(); @@ -797,61 +796,62 @@ Series::flushParticlesPath() IOHandler()->enqueue(IOTask(this, aWrite)); } -void -Series::readFileBased( ) +void Series::readFileBased() { - auto & series = get(); - Parameter< Operation::OPEN_FILE > fOpen; - Parameter< Operation::READ_ATT > aRead; + auto &series = get(); + Parameter fOpen; + Parameter aRead; fOpen.encoding = iterationEncoding(); - if( !auxiliary::directory_exists(IOHandler()->directory) ) - throw no_such_file_error("Supplied directory is not valid: " + IOHandler()->directory); + if (!auxiliary::directory_exists(IOHandler()->directory)) + throw no_such_file_error( + "Supplied directory is not valid: " + IOHandler()->directory); auto isPartOfSeries = matcher( - series.m_filenamePrefix, series.m_filenamePadding, - series.m_filenamePostfix, series.m_format); + series.m_filenamePrefix, + series.m_filenamePadding, + series.m_filenamePostfix, + series.m_format); bool isContained; int padding; uint64_t iterationIndex; - std::set< int > paddings; - for( auto const& entry : auxiliary::list_directory(IOHandler()->directory) ) + std::set paddings; + for (auto const &entry : auxiliary::list_directory(IOHandler()->directory)) { std::tie(isContained, padding, iterationIndex) = isPartOfSeries(entry); - if( isContained ) + if (isContained) { - Iteration & i = series.iterations[ iterationIndex ]; - i.deferParseAccess( { - std::to_string( iterationIndex ), - iterationIndex, - true, - entry } ); - // TODO skip if the padding is exact the number of chars in an iteration? + Iteration &i = series.iterations[iterationIndex]; + i.deferParseAccess( + {std::to_string(iterationIndex), iterationIndex, true, entry}); + // TODO skip if the padding is exact the number of chars in an + // iteration? paddings.insert(padding); } } - if( series.iterations.empty() ) + if (series.iterations.empty()) { - /* Frontend access type might change during Series::read() to allow parameter modification. - * Backend access type stays unchanged for the lifetime of a Series. */ - if(IOHandler()->m_backendAccess == Access::READ_ONLY ) + /* Frontend access type might change during Series::read() to allow + * parameter modification. Backend access type stays unchanged for the + * lifetime of a Series. */ + if (IOHandler()->m_backendAccess == Access::READ_ONLY) throw no_such_file_error("No matching iterations found: " + name()); else - std::cerr << "No matching iterations found: " << name() << std::endl; + std::cerr << "No matching iterations found: " << name() + << std::endl; } - auto readIterationEagerly = []( Iteration & iteration ) - { + auto readIterationEagerly = [](Iteration &iteration) { iteration.runDeferredParseAccess(); - Parameter< Operation::CLOSE_FILE > fClose; - iteration.IOHandler()->enqueue( IOTask( &iteration, fClose ) ); + Parameter fClose; + iteration.IOHandler()->enqueue(IOTask(&iteration, fClose)); iteration.IOHandler()->flush(); iteration.get().m_closed = internal::CloseStatus::ClosedTemporarily; }; - if( series.m_parseLazily ) + if (series.m_parseLazily) { - for( auto & iteration : series.iterations ) + for (auto &iteration : series.iterations) { iteration.second.get().m_closed = internal::CloseStatus::ParseAccessDeferred; @@ -859,33 +859,36 @@ Series::readFileBased( ) // open the last iteration, just to parse Series attributes auto getLastIteration = series.iterations.end(); getLastIteration--; - auto & lastIteration = getLastIteration->second; - readIterationEagerly( lastIteration ); + auto &lastIteration = getLastIteration->second; + readIterationEagerly(lastIteration); } else { - for( auto & iteration : series.iterations ) + for (auto &iteration : series.iterations) { - readIterationEagerly( iteration.second ); + readIterationEagerly(iteration.second); } } - if( paddings.size() == 1u ) + if (paddings.size() == 1u) series.m_filenamePadding = *paddings.begin(); - /* Frontend access type might change during Series::read() to allow parameter modification. - * Backend access type stays unchanged for the lifetime of a Series. */ - if( paddings.size() > 1u && IOHandler()->m_backendAccess == Access::READ_WRITE ) - throw std::runtime_error("Cannot write to a series with inconsistent iteration padding. " - "Please specify '%0T' or open as read-only."); + /* Frontend access type might change during Series::read() to allow + * parameter modification. Backend access type stays unchanged for the + * lifetime of a Series. */ + if (paddings.size() > 1u && + IOHandler()->m_backendAccess == Access::READ_WRITE) + throw std::runtime_error( + "Cannot write to a series with inconsistent iteration padding. " + "Please specify '%0T' or open as read-only."); } -void Series::readOneIterationFileBased( std::string const & filePath ) +void Series::readOneIterationFileBased(std::string const &filePath) { - auto & series = get(); + auto &series = get(); - Parameter< Operation::OPEN_FILE > fOpen; - Parameter< Operation::READ_ATT > aRead; + Parameter fOpen; + Parameter aRead; fOpen.name = filePath; IOHandler()->enqueue(IOTask(this, fOpen)); @@ -896,23 +899,22 @@ void Series::readOneIterationFileBased( std::string const & filePath ) using DT = Datatype; aRead.name = "iterationEncoding"; - IOHandler()->enqueue( IOTask( this, aRead ) ); + IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) + if (*aRead.dtype == DT::STRING) { - std::string encoding = - Attribute( *aRead.resource ).get< std::string >(); - if( encoding == "fileBased" ) + std::string encoding = Attribute(*aRead.resource).get(); + if (encoding == "fileBased") series.m_iterationEncoding = IterationEncoding::fileBased; - else if( encoding == "groupBased" ) + else if (encoding == "groupBased") { series.m_iterationEncoding = IterationEncoding::groupBased; std::cerr << "Series constructor called with iteration " - "regex '%T' suggests loading a " - << "time series with fileBased iteration " - "encoding. Loaded file is groupBased.\n"; + "regex '%T' suggests loading a " + << "time series with fileBased iteration " + "encoding. Loaded file is groupBased.\n"; } - else if( encoding == "variableBased" ) + else if (encoding == "variableBased") { /* * Unlike if the file were group-based, this one doesn't work @@ -922,145 +924,150 @@ void Series::readOneIterationFileBased( std::string const & filePath ) "Series constructor called with iteration " "regex '%T' suggests loading a " "time series with fileBased iteration " - "encoding. Loaded file is variableBased." ); + "encoding. Loaded file is variableBased."); } else - throw std::runtime_error( - "Unknown iterationEncoding: " + encoding ); - setAttribute( "iterationEncoding", encoding ); + throw std::runtime_error("Unknown iterationEncoding: " + encoding); + setAttribute("iterationEncoding", encoding); } else - throw std::runtime_error( "Unexpected Attribute datatype " - "for 'iterationEncoding'" ); + throw std::runtime_error( + "Unexpected Attribute datatype " + "for 'iterationEncoding'"); aRead.name = "iterationFormat"; - IOHandler()->enqueue( IOTask( this, aRead ) ); + IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) + if (*aRead.dtype == DT::STRING) { written() = false; - setIterationFormat( - Attribute( *aRead.resource ).get< std::string >() ); + setIterationFormat(Attribute(*aRead.resource).get()); written() = true; } else throw std::runtime_error( - "Unexpected Attribute datatype for 'iterationFormat'" ); + "Unexpected Attribute datatype for 'iterationFormat'"); - Parameter< Operation::OPEN_PATH > pOpen; + Parameter pOpen; std::string version = openPMD(); - if( version == "1.0.0" || version == "1.0.1" || version == "1.1.0" ) + if (version == "1.0.0" || version == "1.0.1" || version == "1.1.0") pOpen.path = auxiliary::replace_first(basePath(), "/%T/", ""); else throw std::runtime_error("Unknown openPMD version - " + version); IOHandler()->enqueue(IOTask(&series.iterations, pOpen)); - readAttributes( ReadMode::IgnoreExisting ); - series.iterations.readAttributes(ReadMode::OverrideExisting ); + readAttributes(ReadMode::IgnoreExisting); + series.iterations.readAttributes(ReadMode::OverrideExisting); } -void -Series::readGorVBased( bool do_init ) +void Series::readGorVBased(bool do_init) { - auto & series = get(); - Parameter< Operation::OPEN_FILE > fOpen; + auto &series = get(); + Parameter fOpen; fOpen.name = series.m_name; fOpen.encoding = iterationEncoding(); IOHandler()->enqueue(IOTask(this, fOpen)); IOHandler()->flush(); - if( do_init ) + if (do_init) { readBase(); using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "iterationEncoding"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) + if (*aRead.dtype == DT::STRING) { - std::string encoding = Attribute(*aRead.resource).get< std::string >(); - if( encoding == "groupBased" ) + std::string encoding = + Attribute(*aRead.resource).get(); + if (encoding == "groupBased") series.m_iterationEncoding = IterationEncoding::groupBased; - else if( encoding == "variableBased" ) + else if (encoding == "variableBased") series.m_iterationEncoding = IterationEncoding::variableBased; - else if( encoding == "fileBased" ) + else if (encoding == "fileBased") { series.m_iterationEncoding = IterationEncoding::fileBased; - std::cerr << "Series constructor called with explicit iteration suggests loading a " - << "single file with groupBased iteration encoding. Loaded file is fileBased.\n"; + std::cerr << "Series constructor called with explicit " + "iteration suggests loading a " + << "single file with groupBased iteration encoding. " + "Loaded file is fileBased.\n"; /* * We'll want the openPMD API to continue series.m_name to open * the file instead of piecing the name together via * prefix-padding-postfix things. */ series.m_overrideFilebasedFilename = series.m_name; - } else - throw std::runtime_error("Unknown iterationEncoding: " + encoding); + } + else + throw std::runtime_error( + "Unknown iterationEncoding: " + encoding); setAttribute("iterationEncoding", encoding); } else - throw std::runtime_error("Unexpected Attribute datatype for 'iterationEncoding'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'iterationEncoding'"); aRead.name = "iterationFormat"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) + if (*aRead.dtype == DT::STRING) { written() = false; - setIterationFormat(Attribute(*aRead.resource).get< std::string >()); + setIterationFormat(Attribute(*aRead.resource).get()); written() = true; } else - throw std::runtime_error("Unexpected Attribute datatype for 'iterationFormat'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'iterationFormat'"); } - Parameter< Operation::OPEN_PATH > pOpen; + Parameter pOpen; std::string version = openPMD(); - if( version == "1.0.0" || version == "1.0.1" || version == "1.1.0" ) + if (version == "1.0.0" || version == "1.0.1" || version == "1.1.0") pOpen.path = auxiliary::replace_first(basePath(), "/%T/", ""); else throw std::runtime_error("Unknown openPMD version - " + version); IOHandler()->enqueue(IOTask(&series.iterations, pOpen)); - readAttributes( ReadMode::IgnoreExisting ); + readAttributes(ReadMode::IgnoreExisting); /* * 'snapshot' changes over steps, so reread that. */ - series.iterations.readAttributes( ReadMode::OverrideExisting ); + series.iterations.readAttributes(ReadMode::OverrideExisting); /* obtain all paths inside the basepath (i.e. all iterations) */ - Parameter< Operation::LIST_PATHS > pList; + Parameter pList; IOHandler()->enqueue(IOTask(&series.iterations, pList)); IOHandler()->flush(); - auto readSingleIteration = - [&series, &pOpen, this] - (uint64_t index, std::string path, bool guardAgainstRereading ) - { - if( series.iterations.contains( index ) ) + auto readSingleIteration = [&series, &pOpen, this]( + uint64_t index, + std::string path, + bool guardAgainstRereading) { + if (series.iterations.contains(index)) { // maybe re-read - auto & i = series.iterations.at( index ); + auto &i = series.iterations.at(index); // i.written(): the iteration has already been parsed // reparsing is not needed - if( guardAgainstRereading && i.written() ) + if (guardAgainstRereading && i.written()) { return; } - if( i.get().m_closed != internal::CloseStatus::ParseAccessDeferred ) + if (i.get().m_closed != internal::CloseStatus::ParseAccessDeferred) { pOpen.path = path; - IOHandler()->enqueue( IOTask( &i, pOpen ) ); - i.reread( path ); + IOHandler()->enqueue(IOTask(&i, pOpen)); + i.reread(path); } } else { // parse for the first time, resp. delay the parsing process - Iteration & i = series.iterations[ index ]; - i.deferParseAccess( { path, index, false, "" } ); - if( !series.m_parseLazily ) + Iteration &i = series.iterations[index]; + i.deferParseAccess({path, index, false, ""}); + if (!series.m_parseLazily) { i.runDeferredParseAccess(); i.get().m_closed = internal::CloseStatus::Open; @@ -1072,149 +1079,148 @@ Series::readGorVBased( bool do_init ) } }; - switch( iterationEncoding() ) + switch (iterationEncoding()) { case IterationEncoding::groupBased: /* * Sic! This happens when a file-based Series is opened in group-based mode. */ case IterationEncoding::fileBased: - for( auto const & it : *pList.paths ) + for (auto const &it : *pList.paths) { - uint64_t index = std::stoull( it ); - readSingleIteration( index, it, true ); + uint64_t index = std::stoull(it); + readSingleIteration(index, it, true); } break; - case IterationEncoding::variableBased: - { + case IterationEncoding::variableBased: { uint64_t index = 0; - if( series.iterations.containsAttribute( "snapshot" ) ) + if (series.iterations.containsAttribute("snapshot")) { - index = series.iterations - .getAttribute( "snapshot" ) - .get< uint64_t >(); + index = series.iterations.getAttribute("snapshot").get(); } - readSingleIteration( index, "", false ); + readSingleIteration(index, "", false); break; } } } -void -Series::readBase() +void Series::readBase() { - auto & series = get(); + auto &series = get(); using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "openPMD"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) - setOpenPMD(Attribute(*aRead.resource).get< std::string >()); + if (*aRead.dtype == DT::STRING) + setOpenPMD(Attribute(*aRead.resource).get()); else throw std::runtime_error("Unexpected Attribute datatype for 'openPMD'"); aRead.name = "openPMDextension"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == determineDatatype< uint32_t >() ) - setOpenPMDextension(Attribute(*aRead.resource).get< uint32_t >()); + if (*aRead.dtype == determineDatatype()) + setOpenPMDextension(Attribute(*aRead.resource).get()); else - throw std::runtime_error("Unexpected Attribute datatype for 'openPMDextension'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'openPMDextension'"); aRead.name = "basePath"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) - setAttribute("basePath", Attribute(*aRead.resource).get< std::string >()); + if (*aRead.dtype == DT::STRING) + setAttribute("basePath", Attribute(*aRead.resource).get()); else - throw std::runtime_error("Unexpected Attribute datatype for 'basePath'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'basePath'"); - Parameter< Operation::LIST_ATTS > aList; + Parameter aList; IOHandler()->enqueue(IOTask(this, aList)); IOHandler()->flush(); - if( std::count(aList.attributes->begin(), aList.attributes->end(), "meshesPath") == 1 ) + if (std::count( + aList.attributes->begin(), aList.attributes->end(), "meshesPath") == + 1) { aRead.name = "meshesPath"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) + if (*aRead.dtype == DT::STRING) { /* allow setting the meshes path after completed IO */ - for( auto& it : series.iterations ) + for (auto &it : series.iterations) it.second.meshes.written() = false; - setMeshesPath(Attribute(*aRead.resource).get< std::string >()); + setMeshesPath(Attribute(*aRead.resource).get()); - for( auto& it : series.iterations ) + for (auto &it : series.iterations) it.second.meshes.written() = true; } else - throw std::runtime_error("Unexpected Attribute datatype for 'meshesPath'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'meshesPath'"); } - if( std::count(aList.attributes->begin(), aList.attributes->end(), "particlesPath") == 1 ) + if (std::count( + aList.attributes->begin(), + aList.attributes->end(), + "particlesPath") == 1) { aRead.name = "particlesPath"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == DT::STRING ) + if (*aRead.dtype == DT::STRING) { /* allow setting the meshes path after completed IO */ - for( auto& it : series.iterations ) + for (auto &it : series.iterations) it.second.particles.written() = false; - setParticlesPath(Attribute(*aRead.resource).get< std::string >()); + setParticlesPath(Attribute(*aRead.resource).get()); - - for( auto& it : series.iterations ) + for (auto &it : series.iterations) it.second.particles.written() = true; } else - throw std::runtime_error("Unexpected Attribute datatype for 'particlesPath'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'particlesPath'"); } } -std::string -Series::iterationFilename( uint64_t i ) +std::string Series::iterationFilename(uint64_t i) { - auto & series = get(); - if( series.m_overrideFilebasedFilename.has_value() ) + auto &series = get(); + if (series.m_overrideFilebasedFilename.has_value()) { return series.m_overrideFilebasedFilename.value(); } - std::stringstream iteration( "" ); - iteration << std::setw( series.m_filenamePadding ) - << std::setfill( '0' ) << i; - return series.m_filenamePrefix + iteration.str() - + series.m_filenamePostfix; + std::stringstream iteration(""); + iteration << std::setw(series.m_filenamePadding) << std::setfill('0') << i; + return series.m_filenamePrefix + iteration.str() + series.m_filenamePostfix; } -Series::iterations_iterator -Series::indexOf( Iteration const & iteration ) +Series::iterations_iterator Series::indexOf(Iteration const &iteration) { - auto & series = get(); - for( auto it = series.iterations.begin(); it != series.iterations.end(); - ++it ) + auto &series = get(); + for (auto it = series.iterations.begin(); it != series.iterations.end(); + ++it) { - if( &it->second.Attributable::get() == &iteration.Attributable::get() ) + if (&it->second.Attributable::get() == &iteration.Attributable::get()) { return it; } } throw std::runtime_error( - "[Iteration::close] Iteration not found in Series." ); + "[Iteration::close] Iteration not found in Series."); } -AdvanceStatus -Series::advance( +AdvanceStatus Series::advance( AdvanceMode mode, - internal::AttributableData & file, + internal::AttributableData &file, iterations_iterator begin, - Iteration & iteration ) + Iteration &iteration) { - auto & series = get(); + auto &series = get(); auto end = begin; ++end; /* @@ -1225,25 +1231,24 @@ Series::advance( * In order to avoid having those tasks automatically appended by * flush_impl(), set CloseStatus to Open for now. */ - auto & itData = iteration.get(); + auto &itData = iteration.get(); internal::CloseStatus oldCloseStatus = itData.m_closed; - if( oldCloseStatus == internal::CloseStatus::ClosedInFrontend ) + if (oldCloseStatus == internal::CloseStatus::ClosedInFrontend) { itData.m_closed = internal::CloseStatus::Open; } - flush_impl( - begin, end, FlushLevel::UserFlush, /* flushIOHandler = */ false ); + flush_impl(begin, end, FlushLevel::UserFlush, /* flushIOHandler = */ false); - if( oldCloseStatus == internal::CloseStatus::ClosedInFrontend ) + if (oldCloseStatus == internal::CloseStatus::ClosedInFrontend) { // Series::flush() would normally turn a `ClosedInFrontend` into // a `ClosedInBackend`. Do that manually. itData.m_closed = internal::CloseStatus::ClosedInBackend; } - else if( + else if ( oldCloseStatus == internal::CloseStatus::ClosedInBackend && - series.m_iterationEncoding == IterationEncoding::fileBased ) + series.m_iterationEncoding == IterationEncoding::fileBased) { /* * In file-based iteration encoding, we want to avoid accidentally @@ -1253,56 +1258,51 @@ Series::advance( return AdvanceStatus::OK; } - Parameter< Operation::ADVANCE > param; - if( itData.m_closed == internal::CloseStatus::ClosedTemporarily && - series.m_iterationEncoding == IterationEncoding::fileBased ) + Parameter param; + if (itData.m_closed == internal::CloseStatus::ClosedTemporarily && + series.m_iterationEncoding == IterationEncoding::fileBased) { /* * If the Series has file-based iteration layout and the file has not * been opened by flushFileFileBased(), there's no use in nagging the * backend to do anything. */ - param.status = std::make_shared< AdvanceStatus >( AdvanceStatus::OK ); + param.status = std::make_shared(AdvanceStatus::OK); } else { param.mode = mode; - IOTask task( &file.m_writable, param ); - IOHandler()->enqueue( task ); + IOTask task(&file.m_writable, param); + IOHandler()->enqueue(task); } - - if( oldCloseStatus == internal::CloseStatus::ClosedInFrontend && - mode == AdvanceMode::ENDSTEP ) + if (oldCloseStatus == internal::CloseStatus::ClosedInFrontend && + mode == AdvanceMode::ENDSTEP) { using IE = IterationEncoding; - switch( series.m_iterationEncoding ) + switch (series.m_iterationEncoding) { - case IE::fileBased: - { - if( itData.m_closed != - internal::CloseStatus::ClosedTemporarily ) - { - Parameter< Operation::CLOSE_FILE > fClose; - IOHandler()->enqueue( - IOTask( &iteration, std::move( fClose ) ) ); - } - itData.m_closed = internal::CloseStatus::ClosedInBackend; - break; - } - case IE::groupBased: + case IE::fileBased: { + if (itData.m_closed != internal::CloseStatus::ClosedTemporarily) { - // We can now put some groups to rest - Parameter< Operation::CLOSE_PATH > fClose; - IOHandler()->enqueue( IOTask( &iteration, std::move( fClose ) ) ); - // In group-based iteration layout, files are - // not closed on a per-iteration basis - // We will treat it as such nonetheless - itData.m_closed = internal::CloseStatus::ClosedInBackend; - break; + Parameter fClose; + IOHandler()->enqueue(IOTask(&iteration, std::move(fClose))); } - case IE::variableBased: // no action necessary - break; + itData.m_closed = internal::CloseStatus::ClosedInBackend; + break; + } + case IE::groupBased: { + // We can now put some groups to rest + Parameter fClose; + IOHandler()->enqueue(IOTask(&iteration, std::move(fClose))); + // In group-based iteration layout, files are + // not closed on a per-iteration basis + // We will treat it as such nonetheless + itData.m_closed = internal::CloseStatus::ClosedInBackend; + break; + } + case IE::variableBased: // no action necessary + break; } } @@ -1314,7 +1314,7 @@ Series::advance( { IOHandler()->flush(); } - catch( ... ) + catch (...) { IOHandler()->m_flushLevel = FlushLevel::InternalFlush; throw; @@ -1324,39 +1324,39 @@ Series::advance( return *param.status; } -auto Series::openIterationIfDirty( uint64_t index, Iteration iteration ) +auto Series::openIterationIfDirty(uint64_t index, Iteration iteration) -> IterationOpened { /* * Check side conditions on accessing iterations, and if they are fulfilled, * forward function params to openIteration(). */ - if( iteration.get().m_closed == internal::CloseStatus::ParseAccessDeferred ) + if (iteration.get().m_closed == internal::CloseStatus::ParseAccessDeferred) { return IterationOpened::RemainsClosed; } bool const dirtyRecursive = iteration.dirtyRecursive(); - if( iteration.get().m_closed == internal::CloseStatus::ClosedInBackend ) + if (iteration.get().m_closed == internal::CloseStatus::ClosedInBackend) { // file corresponding with the iteration has previously been // closed and fully flushed // verify that there have been no further accesses - if( !iteration.written() ) + if (!iteration.written()) { throw std::runtime_error( "[Series] Closed iteration has not been written. This " - "is an internal error." ); + "is an internal error."); } - if( dirtyRecursive ) + if (dirtyRecursive) { throw std::runtime_error( "[Series] Detected illegal access to iteration that " - "has been closed previously." ); + "has been closed previously."); } return IterationOpened::RemainsClosed; } - switch( iterationEncoding() ) + switch (iterationEncoding()) { using IE = IterationEncoding; case IE::fileBased: @@ -1367,10 +1367,10 @@ auto Series::openIterationIfDirty( uint64_t index, Iteration iteration ) * 2. Or the Series has been changed globally in a manner that * requires adapting all iterations. */ - if( dirtyRecursive || this->dirty() ) + if (dirtyRecursive || this->dirty()) { // openIteration() will update the close status - openIteration( index, iteration ); + openIteration(index, iteration); return IterationOpened::HasBeenOpened; } break; @@ -1380,22 +1380,22 @@ auto Series::openIterationIfDirty( uint64_t index, Iteration iteration ) // this makes groupBased encoding safer for parallel usage // (variable-based encoding runs in lockstep anyway) // openIteration() will update the close status - openIteration( index, iteration ); + openIteration(index, iteration); return IterationOpened::HasBeenOpened; } return IterationOpened::RemainsClosed; } -void Series::openIteration( uint64_t index, Iteration iteration ) +void Series::openIteration(uint64_t index, Iteration iteration) { auto oldStatus = iteration.get().m_closed; - switch( oldStatus ) + switch (oldStatus) { using CL = internal::CloseStatus; case CL::ClosedInBackend: throw std::runtime_error( "[Series] Detected illegal access to iteration that " - "has been closed previously." ); + "has been closed previously."); case CL::ParseAccessDeferred: case CL::Open: case CL::ClosedTemporarily: @@ -1412,7 +1412,7 @@ void Series::openIteration( uint64_t index, Iteration iteration ) * Use two nested switches anyway to ensure compiler warnings upon adding * values to the enums. */ - switch( iterationEncoding() ) + switch (iterationEncoding()) { using IE = IterationEncoding; case IE::fileBased: { @@ -1425,29 +1425,29 @@ void Series::openIteration( uint64_t index, Iteration iteration ) * Similarly, in Create mode, the iteration must first be created * before it is possible to open it. */ - if( !iteration.written() && - ( IOHandler()->m_frontendAccess == Access::CREATE || - oldStatus != internal::CloseStatus::ParseAccessDeferred ) ) + if (!iteration.written() && + (IOHandler()->m_frontendAccess == Access::CREATE || + oldStatus != internal::CloseStatus::ParseAccessDeferred)) { // nothing to do, file will be opened by writing routines break; } - auto & series = get(); + auto &series = get(); // open the iteration's file again - Parameter< Operation::OPEN_FILE > fOpen; + Parameter fOpen; fOpen.encoding = iterationEncoding(); - fOpen.name = iterationFilename( index ); - IOHandler()->enqueue( IOTask( this, fOpen ) ); + fOpen.name = iterationFilename(index); + IOHandler()->enqueue(IOTask(this, fOpen)); /* open base path */ - Parameter< Operation::OPEN_PATH > pOpen; - pOpen.path = auxiliary::replace_first( basePath(), "%T/", "" ); - IOHandler()->enqueue( IOTask( &series.iterations, pOpen ) ); + Parameter pOpen; + pOpen.path = auxiliary::replace_first(basePath(), "%T/", ""); + IOHandler()->enqueue(IOTask(&series.iterations, pOpen)); /* open iteration path */ pOpen.path = iterationEncoding() == IterationEncoding::variableBased ? "" - : std::to_string( index ); - IOHandler()->enqueue( IOTask( &iteration, pOpen ) ); + : std::to_string(index); + IOHandler()->enqueue(IOTask(&iteration, pOpen)); break; } case IE::groupBased: @@ -1463,13 +1463,13 @@ namespace * Look up if the specified key is contained in the JSON dataset. * If yes, read it into the specified location. */ - template< typename From, typename Dest = From > - void getJsonOption( - json::TracingJSON & config, std::string const & key, Dest & dest ) + template + void + getJsonOption(json::TracingJSON &config, std::string const &key, Dest &dest) { - if( config.json().contains( key ) ) + if (config.json().contains(key)) { - dest = config[ key ].json().get< From >(); + dest = config[key].json().get(); } } @@ -1478,53 +1478,52 @@ namespace * Numbers and booleans are converted to their string representation. * The string is converted to lower case. */ - template< typename Dest = std::string > + template void getJsonOptionLowerCase( - json::TracingJSON & config, std::string const & key, Dest & dest ) + json::TracingJSON &config, std::string const &key, Dest &dest) { - if( config.json().contains( key ) ) + if (config.json().contains(key)) { auto maybeString = - json::asLowerCaseStringDynamic( config[ key ].json() ); - if( maybeString.has_value() ) + json::asLowerCaseStringDynamic(config[key].json()); + if (maybeString.has_value()) { - dest = std::move( maybeString.value() ); + dest = std::move(maybeString.value()); } else { throw error::BackendConfigSchema( - { key }, "Must be convertible to string type." ); + {key}, "Must be convertible to string type."); } } } -} +} // namespace -template< typename TracingJSON > -void Series::parseJsonOptions( - TracingJSON & options, ParsedInput & input ) +template +void Series::parseJsonOptions(TracingJSON &options, ParsedInput &input) { - auto & series = get(); - getJsonOption< bool >( - options, "defer_iteration_parsing", series.m_parseLazily ); + auto &series = get(); + getJsonOption( + options, "defer_iteration_parsing", series.m_parseLazily); // backend key { - std::map< std::string, Format > const backendDescriptors{ - { "hdf5", Format::HDF5 }, - { "adios1", Format::ADIOS1 }, - { "adios2", Format::ADIOS2 }, - { "json", Format::JSON } }; + std::map const backendDescriptors{ + {"hdf5", Format::HDF5}, + {"adios1", Format::ADIOS1}, + {"adios2", Format::ADIOS2}, + {"json", Format::JSON}}; std::string backend; - getJsonOptionLowerCase( options, "backend", backend ); - if( !backend.empty() ) + getJsonOptionLowerCase(options, "backend", backend); + if (!backend.empty()) { - auto it = backendDescriptors.find( backend ); - if( it != backendDescriptors.end() ) + auto it = backendDescriptors.find(backend); + if (it != backendDescriptors.end()) { - if( input.format != Format::DUMMY && - suffix( input.format ) != suffix( it->second ) ) + if (input.format != Format::DUMMY && + suffix(input.format) != suffix(it->second)) { std::cerr << "[Warning] Supplied filename extension '" - << suffix( input.format ) + << suffix(input.format) << "' contradicts the backend specified via the " "'backend' key. Will go on with backend " << it->first << "." << std::endl; @@ -1534,32 +1533,32 @@ void Series::parseJsonOptions( else { throw error::BackendConfigSchema( - { "backend" }, "Unknown backend specified: " + backend ); + {"backend"}, "Unknown backend specified: " + backend); } } } // iteration_encoding key { - std::map< std::string, IterationEncoding > const ieDescriptors{ - { "file_based", IterationEncoding::fileBased }, - { "group_based", IterationEncoding::groupBased }, - { "variable_based", IterationEncoding::variableBased } }; + std::map const ieDescriptors{ + {"file_based", IterationEncoding::fileBased}, + {"group_based", IterationEncoding::groupBased}, + {"variable_based", IterationEncoding::variableBased}}; std::string iterationEncoding; getJsonOptionLowerCase( - options, "iteration_encoding", iterationEncoding ); - if( !iterationEncoding.empty() ) + options, "iteration_encoding", iterationEncoding); + if (!iterationEncoding.empty()) { - auto it = ieDescriptors.find( iterationEncoding ); - if( it != ieDescriptors.end() ) + auto it = ieDescriptors.find(iterationEncoding); + if (it != ieDescriptors.end()) { input.iterationEncoding = it->second; } else { throw error::BackendConfigSchema( - { "iteration_encoding" }, + {"iteration_encoding"}, "Unknown iteration encoding specified: " + - iterationEncoding ); + iterationEncoding); } } } @@ -1567,86 +1566,82 @@ void Series::parseJsonOptions( namespace internal { -SeriesData::~SeriesData() -{ - // we must not throw in a destructor - try + SeriesData::~SeriesData() { - // WriteIterations gets the first shot at flushing - this->m_writeIterations = std::optional< WriteIterations >(); - /* - * Scenario: A user calls `Series::flush()` but does not check for - * thrown exceptions. The exception will propagate further up, usually - * thereby popping the stack frame that holds the `Series` object. - * `Series::~Series()` will run. This check avoids that the `Series` is - * needlessly flushed a second time. Otherwise, error messages can get - * very confusing. - */ - if( this->m_lastFlushSuccessful ) + // we must not throw in a destructor + try { - Series impl{ { this, []( auto const * ){} } }; - impl.flush(); + // WriteIterations gets the first shot at flushing + this->m_writeIterations = std::optional(); + /* + * Scenario: A user calls `Series::flush()` but does not check for + * thrown exceptions. The exception will propagate further up, + * usually thereby popping the stack frame that holds the `Series` + * object. `Series::~Series()` will run. This check avoids that the + * `Series` is needlessly flushed a second time. Otherwise, error + * messages can get very confusing. + */ + if (this->m_lastFlushSuccessful) + { + Series impl{{this, [](auto const *) {}}}; + impl.flush(); + } + } + catch (std::exception const &ex) + { + std::cerr << "[~Series] An error occurred: " << ex.what() + << std::endl; + } + catch (...) + { + std::cerr << "[~Series] An error occurred." << std::endl; } } - catch( std::exception const & ex ) - { - std::cerr << "[~Series] An error occurred: " << ex.what() << std::endl; - } - catch( ... ) - { - std::cerr << "[~Series] An error occurred." << std::endl; - } -} } // namespace internal -Series::Series() : Attributable{ nullptr }, iterations{} -{ -} +Series::Series() : Attributable{nullptr}, iterations{} +{} -Series::Series( std::shared_ptr< internal::SeriesData > data ) - : Attributable{ data } - , m_series{ std::move( data ) } +Series::Series(std::shared_ptr data) + : Attributable{data}, m_series{std::move(data)} { iterations = m_series->iterations; } #if openPMD_HAVE_MPI Series::Series( - std::string const & filepath, + std::string const &filepath, Access at, MPI_Comm comm, - std::string const & options ) - : Attributable{ nullptr } - , m_series{ new internal::SeriesData } + std::string const &options) + : Attributable{nullptr}, m_series{new internal::SeriesData} { - Attributable::setData( m_series ); + Attributable::setData(m_series); iterations = m_series->iterations; - json::TracingJSON optionsJson = json::parseOptions( - options, comm, /* considerFiles = */ true ); - auto input = parseInput( filepath ); - parseJsonOptions( optionsJson, *input ); - auto handler = createIOHandler( - input->path, at, input->format, comm, optionsJson ); - init( handler, std::move( input ) ); - json::warnGlobalUnusedOptions( optionsJson ); + json::TracingJSON optionsJson = + json::parseOptions(options, comm, /* considerFiles = */ true); + auto input = parseInput(filepath); + parseJsonOptions(optionsJson, *input); + auto handler = + createIOHandler(input->path, at, input->format, comm, optionsJson); + init(handler, std::move(input)); + json::warnGlobalUnusedOptions(optionsJson); } #endif Series::Series( - std::string const & filepath, Access at, std::string const & options ) - : Attributable{ nullptr } - , m_series{ new internal::SeriesData } + std::string const &filepath, Access at, std::string const &options) + : Attributable{nullptr}, m_series{new internal::SeriesData} { - Attributable::setData( m_series ); + Attributable::setData(m_series); iterations = m_series->iterations; - json::TracingJSON optionsJson = json::parseOptions( - options, /* considerFiles = */ true ); - auto input = parseInput( filepath ); - parseJsonOptions( optionsJson, *input ); - auto handler = createIOHandler( - input->path, at, input->format, optionsJson ); - init( handler, std::move( input ) ); - json::warnGlobalUnusedOptions( optionsJson ); + json::TracingJSON optionsJson = + json::parseOptions(options, /* considerFiles = */ true); + auto input = parseInput(filepath); + parseJsonOptions(optionsJson, *input); + auto handler = createIOHandler(input->path, at, input->format, optionsJson); + init(handler, std::move(input)); + json::warnGlobalUnusedOptions(optionsJson); } Series::operator bool() const @@ -1658,51 +1653,52 @@ ReadIterations Series::readIterations() { // Use private constructor instead of copy constructor to avoid // object slicing - return { this->m_series }; + return {this->m_series}; } -WriteIterations -Series::writeIterations() +WriteIterations Series::writeIterations() { - auto & series = get(); - if( !series.m_writeIterations.has_value() ) + auto &series = get(); + if (!series.m_writeIterations.has_value()) { - series.m_writeIterations = WriteIterations( this->iterations ); + series.m_writeIterations = WriteIterations(this->iterations); } return series.m_writeIterations.value(); } namespace { - std::string - cleanFilename(std::string const &filename, Format f) { - switch (f) { - case Format::HDF5: - case Format::ADIOS1: - case Format::ADIOS2: - case Format::ADIOS2_SST: - case Format::ADIOS2_SSC: - case Format::JSON: - return auxiliary::replace_last(filename, suffix(f), ""); - default: - return filename; + std::string cleanFilename(std::string const &filename, Format f) + { + switch (f) + { + case Format::HDF5: + case Format::ADIOS1: + case Format::ADIOS2: + case Format::ADIOS2_SST: + case Format::ADIOS2_SSC: + case Format::JSON: + return auxiliary::replace_last(filename, suffix(f), ""); + default: + return filename; } } std::function - buildMatcher(std::string const ®exPattern, int padding) { + buildMatcher(std::string const ®exPattern, int padding) + { std::regex pattern(regexPattern); return [pattern, padding](std::string const &filename) -> Match { std::smatch regexMatches; bool match = std::regex_match(filename, regexMatches, pattern); - int processedPadding = padding != 0 - ? padding - : ( match ? regexMatches[ 1 ].length() : 0 ); + int processedPadding = + padding != 0 ? padding : (match ? regexMatches[1].length() : 0); return { match, processedPadding, - match ? std::stoull( regexMatches[ 1 ] ) : 0 }; }; + match ? std::stoull(regexMatches[1]) : 0}; + }; } std::function matcher( @@ -1711,8 +1707,8 @@ namespace std::string const &postfix, Format f) { - std::string filenameSuffix = suffix( f ); - if( filenameSuffix.empty() ) + std::string filenameSuffix = suffix(f); + if (filenameSuffix.empty()) { return [](std::string const &) -> Match { return {false, 0, 0}; }; } @@ -1738,5 +1734,5 @@ namespace nameReg += postfix + filenameSuffix + "$"; return buildMatcher(nameReg, padding); } -} // namespace [anonymous] +} // namespace } // namespace openPMD diff --git a/src/WriteIterations.cpp b/src/WriteIterations.cpp index 7e90a33391..be2e72f47f 100644 --- a/src/WriteIterations.cpp +++ b/src/WriteIterations.cpp @@ -25,54 +25,51 @@ namespace openPMD { -WriteIterations::SharedResources::SharedResources( iterations_t _iterations ) - : iterations( std::move( _iterations ) ) -{ -} +WriteIterations::SharedResources::SharedResources(iterations_t _iterations) + : iterations(std::move(_iterations)) +{} WriteIterations::SharedResources::~SharedResources() { - if( currentlyOpen.has_value() && - iterations.retrieveSeries().get().m_lastFlushSuccessful ) + if (currentlyOpen.has_value() && + iterations.retrieveSeries().get().m_lastFlushSuccessful) { auto lastIterationIndex = currentlyOpen.value(); - auto & lastIteration = iterations.at( lastIterationIndex ); - if( !lastIteration.closed() ) + auto &lastIteration = iterations.at(lastIterationIndex); + if (!lastIteration.closed()) { lastIteration.close(); } } } -WriteIterations::WriteIterations( iterations_t iterations ) - : shared{ std::make_shared< SharedResources >( std::move( iterations ) ) } -{ -} +WriteIterations::WriteIterations(iterations_t iterations) + : shared{std::make_shared(std::move(iterations))} +{} -WriteIterations::mapped_type & -WriteIterations::operator[]( key_type const & key ) +WriteIterations::mapped_type &WriteIterations::operator[](key_type const &key) { // make a copy // explicit cast so MSVC can figure out how to do it correctly - return operator[]( static_cast< key_type && >( key_type{ key } ) ); + return operator[](static_cast(key_type{key})); } -WriteIterations::mapped_type & WriteIterations::operator[]( key_type && key ) +WriteIterations::mapped_type &WriteIterations::operator[](key_type &&key) { - if( shared->currentlyOpen.has_value() ) + if (shared->currentlyOpen.has_value()) { auto lastIterationIndex = shared->currentlyOpen.value(); - auto & lastIteration = shared->iterations.at( lastIterationIndex ); - if( lastIterationIndex != key && !lastIteration.closed() ) + auto &lastIteration = shared->iterations.at(lastIterationIndex); + if (lastIterationIndex != key && !lastIteration.closed()) { lastIteration.close(); } } shared->currentlyOpen = key; - auto & res = shared->iterations[ std::move( key ) ]; - if( res.getStepStatus() == StepStatus::NoStep ) + auto &res = shared->iterations[std::move(key)]; + if (res.getStepStatus() == StepStatus::NoStep) { res.beginStep(); - res.setStepStatus( StepStatus::DuringStep ); + res.setStepStatus(StepStatus::DuringStep); } return res; } diff --git a/src/auxiliary/Date.cpp b/src/auxiliary/Date.cpp index d059d63e9f..ae6f356754 100644 --- a/src/auxiliary/Date.cpp +++ b/src/auxiliary/Date.cpp @@ -22,28 +22,27 @@ #include #include -#include #include - +#include namespace openPMD::auxiliary { - std::string getDateString( std::string const & format ) - { - constexpr size_t maxLen = 30u; - std::array< char, maxLen > buffer; +std::string getDateString(std::string const &format) +{ + constexpr size_t maxLen = 30u; + std::array buffer; - time_t rawtime; - time( &rawtime ); - struct tm* timeinfo; - // https://github.com/openPMD/openPMD-api/pull/657#issuecomment-574424885 - timeinfo = localtime( &rawtime ); // lgtm[cpp/potentially-dangerous-function] + time_t rawtime; + time(&rawtime); + struct tm *timeinfo; + // https://github.com/openPMD/openPMD-api/pull/657#issuecomment-574424885 + timeinfo = localtime(&rawtime); // lgtm[cpp/potentially-dangerous-function] - strftime( buffer.data(), maxLen, format.c_str(), timeinfo ); + strftime(buffer.data(), maxLen, format.c_str(), timeinfo); - std::stringstream dateString; - dateString << buffer.data(); + std::stringstream dateString; + dateString << buffer.data(); - return dateString.str(); - } -} // namespace openPMD + return dateString.str(); +} +} // namespace openPMD::auxiliary diff --git a/src/auxiliary/Filesystem.cpp b/src/auxiliary/Filesystem.cpp index 8d0abe20d1..3e2c65f3af 100644 --- a/src/auxiliary/Filesystem.cpp +++ b/src/auxiliary/Filesystem.cpp @@ -22,12 +22,12 @@ #include "openPMD/auxiliary/StringManip.hpp" #ifdef _WIN32 -# include +#include #else -# include -# include -# include -# include +#include +#include +#include +#include #endif #include @@ -35,131 +35,136 @@ #include #include - namespace openPMD::auxiliary { -bool -directory_exists(std::string const& path) +bool directory_exists(std::string const &path) { #ifdef _WIN32 DWORD attributes = GetFileAttributes(path.c_str()); - return (attributes != INVALID_FILE_ATTRIBUTES && - (attributes & FILE_ATTRIBUTE_DIRECTORY)); + return ( + attributes != INVALID_FILE_ATTRIBUTES && + (attributes & FILE_ATTRIBUTE_DIRECTORY)); #else struct stat s; return (0 == stat(path.c_str(), &s)) && S_ISDIR(s.st_mode); #endif } -bool -file_exists( std::string const& path ) +bool file_exists(std::string const &path) { #ifdef _WIN32 DWORD attributes = GetFileAttributes(path.c_str()); - return (attributes != INVALID_FILE_ATTRIBUTES && - !(attributes & FILE_ATTRIBUTE_DIRECTORY)); + return ( + attributes != INVALID_FILE_ATTRIBUTES && + !(attributes & FILE_ATTRIBUTE_DIRECTORY)); #else struct stat s; return (0 == stat(path.c_str(), &s)) && S_ISREG(s.st_mode); #endif } -std::vector< std::string > -list_directory(std::string const& path ) +std::vector list_directory(std::string const &path) { - std::vector< std::string > ret; + std::vector ret; #ifdef _WIN32 std::string pattern(path); pattern.append("\\*"); WIN32_FIND_DATA data; HANDLE hFind = FindFirstFile(pattern.c_str(), &data); - if( hFind == INVALID_HANDLE_VALUE ) + if (hFind == INVALID_HANDLE_VALUE) throw std::system_error(std::error_code(errno, std::system_category())); - do { - if( strcmp(data.cFileName, ".") != 0 && strcmp(data.cFileName, "..") != 0 ) + do + { + if (strcmp(data.cFileName, ".") != 0 && + strcmp(data.cFileName, "..") != 0) ret.emplace_back(data.cFileName); } while (FindNextFile(hFind, &data) != 0); FindClose(hFind); #else auto directory = opendir(path.c_str()); - if( !directory ) + if (!directory) throw std::system_error(std::error_code(errno, std::system_category())); - dirent* entry; + dirent *entry; while ((entry = readdir(directory)) != nullptr) - if( strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0 ) + if (strcmp(entry->d_name, ".") != 0 && strcmp(entry->d_name, "..") != 0) ret.emplace_back(entry->d_name); closedir(directory); #endif return ret; } -bool -create_directories( std::string const& path ) +bool create_directories(std::string const &path) { - if( directory_exists(path) ) + if (directory_exists(path)) return true; #ifdef _WIN32 - auto mk = [](std::string const& p) -> bool { return CreateDirectory(p.c_str(), nullptr); }; + auto mk = [](std::string const &p) -> bool { + return CreateDirectory(p.c_str(), nullptr); + }; #else mode_t mask = umask(0); umask(mask); - auto mk = [mask](std::string const& p) -> bool { return (0 == mkdir(p.c_str(), 0777 & ~mask));}; + auto mk = [mask](std::string const &p) -> bool { + return (0 == mkdir(p.c_str(), 0777 & ~mask)); + }; #endif std::istringstream ss(path); std::string token; std::string partialPath; - if( auxiliary::starts_with(path, directory_separator) ) + if (auxiliary::starts_with(path, directory_separator)) partialPath += directory_separator; bool success = true; - while( std::getline( ss, token, directory_separator ) ) + while (std::getline(ss, token, directory_separator)) { - if( !token.empty() ) + if (!token.empty()) partialPath += token + directory_separator; - if( !directory_exists( partialPath ) ) + if (!directory_exists(partialPath)) { bool partial_success = mk(partialPath); - if( !partial_success ) + if (!partial_success) // did someone else just race us to create this dir? - if( !directory_exists( partialPath ) ) + if (!directory_exists(partialPath)) success = success && partial_success; } } return success; } -bool -remove_directory( std::string const& path ) +bool remove_directory(std::string const &path) { - if( !directory_exists(path) ) + if (!directory_exists(path)) return false; bool success = true; #ifdef _WIN32 - auto del = [](std::string const& p) -> bool { return RemoveDirectory(p.c_str()); }; + auto del = [](std::string const &p) -> bool { + return RemoveDirectory(p.c_str()); + }; #else - auto del = [](std::string const& p) -> bool { return (0 == remove(p.c_str()));}; + auto del = [](std::string const &p) -> bool { + return (0 == remove(p.c_str())); + }; #endif - for( auto const& entry : list_directory(path) ) + for (auto const &entry : list_directory(path)) { std::string partialPath = path + directory_separator + entry; - if( directory_exists(partialPath) ) + if (directory_exists(partialPath)) success &= remove_directory(partialPath); - else if( file_exists(partialPath) ) + else if (file_exists(partialPath)) success &= remove_file(partialPath); } success &= del(path); return success; } -bool -remove_file( std::string const& path ) +bool remove_file(std::string const &path) { - if( !file_exists(path) ) - return false; + if (!file_exists(path)) + return false; #ifdef _WIN32 return DeleteFile(path.c_str()); @@ -172,23 +177,23 @@ remove_file( std::string const& path ) namespace { - template< typename > + template struct MPI_Types; - template<> - struct MPI_Types< unsigned long > + template <> + struct MPI_Types { static MPI_Datatype const value; }; - template<> - struct MPI_Types< unsigned long long > + template <> + struct MPI_Types { static MPI_Datatype const value; }; - template<> - struct MPI_Types< unsigned > + template <> + struct MPI_Types { static MPI_Datatype const value; }; @@ -197,56 +202,55 @@ namespace * Only some of these are actually instanciated, * so suppress warnings for the others. */ - [[maybe_unused]] - MPI_Datatype const MPI_Types< unsigned >::value = MPI_UNSIGNED; - [[maybe_unused]] - MPI_Datatype const MPI_Types< unsigned long >::value = MPI_UNSIGNED_LONG; - [[maybe_unused]] - MPI_Datatype const MPI_Types< unsigned long long >::value = MPI_UNSIGNED_LONG_LONG; + [[maybe_unused]] MPI_Datatype const MPI_Types::value = + MPI_UNSIGNED; + [[maybe_unused]] MPI_Datatype const MPI_Types::value = + MPI_UNSIGNED_LONG; + [[maybe_unused]] MPI_Datatype const MPI_Types::value = + MPI_UNSIGNED_LONG_LONG; } // namespace -std::string -collective_file_read( std::string const & path, MPI_Comm comm ) +std::string collective_file_read(std::string const &path, MPI_Comm comm) { int rank, size; - MPI_Comm_rank( comm, &rank ); - MPI_Comm_size( comm, &size ); + MPI_Comm_rank(comm, &rank); + MPI_Comm_size(comm, &size); std::string res; size_t stringLength = 0; - if( rank == 0 ) + if (rank == 0) { std::fstream handle; - handle.open( path, std::ios_base::in ); + handle.open(path, std::ios_base::in); std::stringstream stream; stream << handle.rdbuf(); res = stream.str(); - if( !handle.good() ) + if (!handle.good()) { throw std::runtime_error( - "Failed reading JSON config from file " + path + "." ); + "Failed reading JSON config from file " + path + "."); } stringLength = res.size() + 1; } - MPI_Datatype datatype = MPI_Types< size_t >::value; - int err = MPI_Bcast( &stringLength, 1, datatype, 0, comm ); - if( err ) + MPI_Datatype datatype = MPI_Types::value; + int err = MPI_Bcast(&stringLength, 1, datatype, 0, comm); + if (err) { throw std::runtime_error( - "[collective_file_read] MPI_Bcast stringLength failure." ); + "[collective_file_read] MPI_Bcast stringLength failure."); } - std::vector< char > recvbuf( stringLength, 0 ); - if(rank == 0) + std::vector recvbuf(stringLength, 0); + if (rank == 0) { std::copy_n(res.c_str(), stringLength, recvbuf.data()); } - err = MPI_Bcast( recvbuf.data(), stringLength, MPI_CHAR, 0, comm ); - if( err ) + err = MPI_Bcast(recvbuf.data(), stringLength, MPI_CHAR, 0, comm); + if (err) { throw std::runtime_error( - "[collective_file_read] MPI_Bcast file content failure." ); + "[collective_file_read] MPI_Bcast file content failure."); } - if( rank != 0 ) + if (rank != 0) { res = recvbuf.data(); } @@ -255,4 +259,4 @@ collective_file_read( std::string const & path, MPI_Comm comm ) #endif -} // namespace openPMD +} // namespace openPMD::auxiliary diff --git a/src/auxiliary/JSON.cpp b/src/auxiliary/JSON.cpp index 1e78d61538..0b277e0580 100644 --- a/src/auxiliary/JSON.cpp +++ b/src/auxiliary/JSON.cpp @@ -39,171 +39,162 @@ namespace openPMD::json { - TracingJSON::TracingJSON() - : TracingJSON( ParsedConfig{} ) - { - } - - TracingJSON::TracingJSON( - nlohmann::json originalJSON, - SupportedLanguages originallySpecifiedAs_in ) - : originallySpecifiedAs( originallySpecifiedAs_in ) - , m_originalJSON( - std::make_shared< nlohmann::json >( std::move( originalJSON ) ) ) - , m_shadow( std::make_shared< nlohmann::json >() ) - , m_positionInOriginal( &*m_originalJSON ) - , m_positionInShadow( &*m_shadow ) - { - } - - TracingJSON::TracingJSON( ParsedConfig parsedConfig ) - : TracingJSON{ - std::move( parsedConfig.config ), - parsedConfig.originallySpecifiedAs } - { - } +TracingJSON::TracingJSON() : TracingJSON(ParsedConfig{}) +{} + +TracingJSON::TracingJSON( + nlohmann::json originalJSON, SupportedLanguages originallySpecifiedAs_in) + : originallySpecifiedAs(originallySpecifiedAs_in) + , m_originalJSON(std::make_shared(std::move(originalJSON))) + , m_shadow(std::make_shared()) + , m_positionInOriginal(&*m_originalJSON) + , m_positionInShadow(&*m_shadow) +{} + +TracingJSON::TracingJSON(ParsedConfig parsedConfig) + : TracingJSON{ + std::move(parsedConfig.config), parsedConfig.originallySpecifiedAs} +{} + +nlohmann::json const &TracingJSON::getShadow() const +{ + return *m_positionInShadow; +} - nlohmann::json const & TracingJSON::getShadow() const - { - return *m_positionInShadow; - } +nlohmann::json TracingJSON::invertShadow() const +{ + nlohmann::json inverted = *m_positionInOriginal; + invertShadow(inverted, *m_positionInShadow); + return inverted; +} - nlohmann::json TracingJSON::invertShadow() const +void TracingJSON::invertShadow( + nlohmann::json &result, nlohmann::json const &shadow) const +{ + if (!shadow.is_object()) { - nlohmann::json inverted = *m_positionInOriginal; - invertShadow( inverted, *m_positionInShadow ); - return inverted; + return; } - - void TracingJSON::invertShadow( - nlohmann::json & result, nlohmann::json const & shadow ) const + std::vector toRemove; + for (auto it = shadow.begin(); it != shadow.end(); ++it) { - if( !shadow.is_object() ) + nlohmann::json &partialResult = result[it.key()]; + if (partialResult.is_object()) { - return; - } - std::vector< std::string > toRemove; - for( auto it = shadow.begin(); it != shadow.end(); ++it ) - { - nlohmann::json & partialResult = result[ it.key() ]; - if( partialResult.is_object() ) + invertShadow(partialResult, it.value()); + if (partialResult.size() == 0) { - invertShadow( partialResult, it.value() ); - if( partialResult.size() == 0 ) - { - toRemove.emplace_back( it.key() ); - } - } - else - { - toRemove.emplace_back( it.key() ); + toRemove.emplace_back(it.key()); } } - for( auto const & key : toRemove ) + else { - result.erase( key ); + toRemove.emplace_back(it.key()); } } - - void - TracingJSON::declareFullyRead() + for (auto const &key : toRemove) { - if( m_trace ) - { - // copy over - *m_positionInShadow = *m_positionInOriginal; - } + result.erase(key); } +} - TracingJSON::TracingJSON( - std::shared_ptr< nlohmann::json > originalJSON, - std::shared_ptr< nlohmann::json > shadow, - nlohmann::json * positionInOriginal, - nlohmann::json * positionInShadow, - SupportedLanguages originallySpecifiedAs_in, - bool trace ) - : originallySpecifiedAs( originallySpecifiedAs_in ), - m_originalJSON( std::move( originalJSON ) ), - m_shadow( std::move( shadow ) ), - m_positionInOriginal( positionInOriginal ), - m_positionInShadow( positionInShadow ), - m_trace( trace ) +void TracingJSON::declareFullyRead() +{ + if (m_trace) { + // copy over + *m_positionInShadow = *m_positionInOriginal; } - - namespace { - std::optional< std::string > - extractFilename( std::string const & unparsed ) +} + +TracingJSON::TracingJSON( + std::shared_ptr originalJSON, + std::shared_ptr shadow, + nlohmann::json *positionInOriginal, + nlohmann::json *positionInShadow, + SupportedLanguages originallySpecifiedAs_in, + bool trace) + : originallySpecifiedAs(originallySpecifiedAs_in) + , m_originalJSON(std::move(originalJSON)) + , m_shadow(std::move(shadow)) + , m_positionInOriginal(positionInOriginal) + , m_positionInShadow(positionInShadow) + , m_trace(trace) +{} + +namespace +{ + std::optional extractFilename(std::string const &unparsed) { - std::string trimmed = auxiliary::trim( - unparsed, []( char c ) { return std::isspace( c ); } ); - if( !trimmed.empty() && trimmed.at( 0 ) == '@' ) + std::string trimmed = + auxiliary::trim(unparsed, [](char c) { return std::isspace(c); }); + if (!trimmed.empty() && trimmed.at(0) == '@') { - trimmed = trimmed.substr( 1 ); + trimmed = trimmed.substr(1); trimmed = auxiliary::trim( - trimmed, []( char c ) { return std::isspace( c ); } ); - return std::make_optional( trimmed ); + trimmed, [](char c) { return std::isspace(c); }); + return std::make_optional(trimmed); } else { - return std::optional< std::string >{}; + return std::optional{}; } } - nlohmann::json tomlToJson( - toml::value const & val, std::vector< std::string > & currentPath ); + nlohmann::json + tomlToJson(toml::value const &val, std::vector ¤tPath); - nlohmann::json tomlToJson( - toml::value const & val, std::vector< std::string > & currentPath ) + nlohmann::json + tomlToJson(toml::value const &val, std::vector ¤tPath) { - if( val.is_boolean() ) + if (val.is_boolean()) { return val.as_boolean(); } - else if( val.is_integer() ) + else if (val.is_integer()) { return val.as_integer(); } - else if( val.is_floating() ) + else if (val.is_floating()) { return val.as_floating(); } - else if( val.is_string() ) + else if (val.is_string()) { - return std::string( val.as_string() ); + return std::string(val.as_string()); } - else if( + else if ( val.is_offset_datetime() || val.is_local_datetime() || - val.is_local_date() || val.is_local_time() ) + val.is_local_date() || val.is_local_time()) { throw error::BackendConfigSchema( - currentPath, "Cannot convert date/time type to JSON." ); + currentPath, "Cannot convert date/time type to JSON."); } - else if( val.is_array() ) + else if (val.is_array()) { - auto const & arr = val.as_array(); + auto const &arr = val.as_array(); nlohmann::json result = nlohmann::json::array(); - for( size_t i = 0; i < arr.size(); ++i ) + for (size_t i = 0; i < arr.size(); ++i) { - currentPath.push_back( std::to_string( i ) ); - result[ i ] = tomlToJson( arr[ i ], currentPath ); + currentPath.push_back(std::to_string(i)); + result[i] = tomlToJson(arr[i], currentPath); currentPath.pop_back(); } return result; } - else if( val.is_table() ) + else if (val.is_table()) { - auto const & tab = val.as_table(); + auto const &tab = val.as_table(); nlohmann::json result = nlohmann::json::object(); - for( auto const & pair : tab ) + for (auto const &pair : tab) { - currentPath.push_back( pair.first ); - result[ pair.first ] = tomlToJson( pair.second, currentPath ); + currentPath.push_back(pair.first); + result[pair.first] = tomlToJson(pair.second, currentPath); currentPath.pop_back(); } return result; } - else if( val.is_uninitialized() ) + else if (val.is_uninitialized()) { return nlohmann::json(); // null } @@ -211,388 +202,375 @@ namespace openPMD::json throw error::BackendConfigSchema( currentPath, "Unexpected datatype in TOML configuration. This is probably a " - "bug." ); + "bug."); } toml::value jsonToToml( - nlohmann::json const & val, std::vector< std::string > & currentPath ); + nlohmann::json const &val, std::vector ¤tPath); - toml::value jsonToToml( - nlohmann::json const & val, std::vector< std::string > & currentPath ) + toml::value + jsonToToml(nlohmann::json const &val, std::vector ¤tPath) { - switch( val.type() ) + switch (val.type()) { case nlohmann::json::value_t::null: return toml::value(); case nlohmann::json::value_t::object: { toml::value::table_type res; - for( auto pair = val.begin(); pair != val.end(); ++pair ) + for (auto pair = val.begin(); pair != val.end(); ++pair) { - currentPath.push_back( pair.key() ); - res[ pair.key() ] = jsonToToml( pair.value(), currentPath ); + currentPath.push_back(pair.key()); + res[pair.key()] = jsonToToml(pair.value(), currentPath); currentPath.pop_back(); } - return toml::value( std::move( res ) ); + return toml::value(std::move(res)); } case nlohmann::json::value_t::array: { toml::value::array_type res; - res.reserve( val.size() ); + res.reserve(val.size()); size_t index = 0; - for( auto const & entry : val ) + for (auto const &entry : val) { - currentPath.push_back( std::to_string( index ) ); - res.emplace_back( jsonToToml( entry, currentPath ) ); + currentPath.push_back(std::to_string(index)); + res.emplace_back(jsonToToml(entry, currentPath)); currentPath.pop_back(); } - return toml::value( std::move( res ) ); + return toml::value(std::move(res)); } case nlohmann::json::value_t::string: - return val.get< std::string >(); + return val.get(); case nlohmann::json::value_t::boolean: - return val.get< bool >(); + return val.get(); case nlohmann::json::value_t::number_integer: - return val.get< nlohmann::json::number_integer_t >(); + return val.get(); case nlohmann::json::value_t::number_unsigned: - return val.get< nlohmann::json::number_unsigned_t >(); + return val.get(); case nlohmann::json::value_t::number_float: - return val.get< nlohmann::json::number_float_t >(); + return val.get(); case nlohmann::json::value_t::binary: - return val.get< nlohmann::json::binary_t >(); + return val.get(); case nlohmann::json::value_t::discarded: throw error::BackendConfigSchema( currentPath, - "Internal JSON parser datatype leaked into JSON value." ); + "Internal JSON parser datatype leaked into JSON value."); } - throw std::runtime_error( "Unreachable!" ); - } + throw std::runtime_error("Unreachable!"); } +} // namespace - nlohmann::json tomlToJson( toml::value const & val ) - { - std::vector< std::string > currentPath; - // that's as deep as our config currently goes, +1 for good measure - currentPath.reserve( 7 ); - return tomlToJson( val, currentPath ); - } +nlohmann::json tomlToJson(toml::value const &val) +{ + std::vector currentPath; + // that's as deep as our config currently goes, +1 for good measure + currentPath.reserve(7); + return tomlToJson(val, currentPath); +} + +toml::value jsonToToml(nlohmann::json const &val) +{ + std::vector currentPath; + // that's as deep as our config currently goes, +1 for good measure + currentPath.reserve(7); + return jsonToToml(val, currentPath); +} - toml::value jsonToToml( nlohmann::json const & val ) +namespace +{ + ParsedConfig parseInlineOptions(std::string const &options) { - std::vector< std::string > currentPath; - // that's as deep as our config currently goes, +1 for good measure - currentPath.reserve( 7 ); - return jsonToToml( val, currentPath ); + std::string trimmed = + auxiliary::trim(options, [](char c) { return std::isspace(c); }); + ParsedConfig res; + if (trimmed.empty()) + { + return res; + } + if (trimmed.at(0) == '{') + { + res.config = nlohmann::json::parse(options); + res.originallySpecifiedAs = SupportedLanguages::JSON; + } + else + { + std::istringstream istream( + options.c_str(), std::ios_base::binary | std::ios_base::in); + toml::value tomlVal = + toml::parse(istream, "[inline TOML specification]"); + res.config = json::tomlToJson(tomlVal); + res.originallySpecifiedAs = SupportedLanguages::TOML; + } + lowerCase(res.config); + return res; } +} // namespace - namespace +ParsedConfig parseOptions(std::string const &options, bool considerFiles) +{ + if (considerFiles) { - ParsedConfig parseInlineOptions( std::string const & options ) + auto filename = extractFilename(options); + if (filename.has_value()) { - std::string trimmed = auxiliary::trim( - options, []( char c ) { return std::isspace( c ); } ); + std::fstream handle; + handle.open( + filename.value(), std::ios_base::binary | std::ios_base::in); ParsedConfig res; - if( trimmed.empty() ) + if (auxiliary::ends_with(filename.value(), ".toml")) { - return res; + toml::value tomlVal = toml::parse(handle, filename.value()); + res.config = tomlToJson(tomlVal); + res.originallySpecifiedAs = SupportedLanguages::TOML; } - if( trimmed.at( 0 ) == '{' ) + else { - res.config = nlohmann::json::parse( options ); + // default: JSON + handle >> res.config; res.originallySpecifiedAs = SupportedLanguages::JSON; } - else + if (!handle.good()) { - std::istringstream istream( - options.c_str(), - std::ios_base::binary | std::ios_base::in ); - toml::value tomlVal = - toml::parse( istream, "[inline TOML specification]" ); - res.config = json::tomlToJson( tomlVal ); - res.originallySpecifiedAs = SupportedLanguages::TOML; + throw std::runtime_error( + "Failed reading JSON config from file " + filename.value() + + "."); } - lowerCase( res.config ); + lowerCase(res.config); return res; } } + return parseInlineOptions(options); +} - ParsedConfig - parseOptions( std::string const & options, bool considerFiles ) +#if openPMD_HAVE_MPI +ParsedConfig +parseOptions(std::string const &options, MPI_Comm comm, bool considerFiles) +{ + if (considerFiles) { - if( considerFiles ) + auto filename = extractFilename(options); + if (filename.has_value()) { - auto filename = extractFilename( options ); - if( filename.has_value() ) + ParsedConfig res; + std::string fileContent = + auxiliary::collective_file_read(filename.value(), comm); + if (auxiliary::ends_with(filename.value(), ".toml")) { - std::fstream handle; - handle.open( - filename.value(), std::ios_base::binary | std::ios_base::in ); - ParsedConfig res; - if( auxiliary::ends_with( filename.value(), ".toml" ) ) - { - toml::value tomlVal = toml::parse( handle, filename.value() ); - res.config = tomlToJson( tomlVal ); - res.originallySpecifiedAs = SupportedLanguages::TOML; - } - else - { - // default: JSON - handle >> res.config; - res.originallySpecifiedAs = SupportedLanguages::JSON; - } - if( !handle.good() ) - { - throw std::runtime_error( - "Failed reading JSON config from file " + - filename.value() + "." ); - } - lowerCase( res.config ); - return res; + std::istringstream istream( + fileContent.c_str(), + std::ios_base::binary | std::ios_base::in); + res.config = tomlToJson(toml::parse(istream, filename.value())); + res.originallySpecifiedAs = SupportedLanguages::TOML; } - } - return parseInlineOptions( options ); - } - -#if openPMD_HAVE_MPI - ParsedConfig parseOptions( - std::string const & options, MPI_Comm comm, bool considerFiles ) - { - if( considerFiles ) - { - auto filename = extractFilename( options ); - if( filename.has_value() ) + else { - ParsedConfig res; - std::string fileContent = - auxiliary::collective_file_read( filename.value(), comm ); - if( auxiliary::ends_with( filename.value(), ".toml" ) ) - { - std::istringstream istream( - fileContent.c_str(), - std::ios_base::binary | std::ios_base::in ); - res.config = - tomlToJson( toml::parse( istream, filename.value() ) ); - res.originallySpecifiedAs = SupportedLanguages::TOML; - } - else - { - // default:: JSON - res.config = nlohmann::json::parse( fileContent ); - res.originallySpecifiedAs = SupportedLanguages::JSON; - } - lowerCase( res.config ); - return res; + // default:: JSON + res.config = nlohmann::json::parse(fileContent); + res.originallySpecifiedAs = SupportedLanguages::JSON; } + lowerCase(res.config); + return res; } - return parseInlineOptions( options ); } + return parseInlineOptions(options); +} #endif - template< typename F > - static nlohmann::json & lowerCase( - nlohmann::json & json, - std::vector< std::string > & currentPath, - F const & ignoreCurrentPath ) - { - auto transFormCurrentObject = [ ¤tPath ]( - nlohmann::json::object_t & val ) { - // somekey -> SomeKey - std::map< std::string, std::string > originalKeys; - for( auto & pair : val ) - { - std::string lower = - auxiliary::lowerCase( std::string( pair.first ) ); - auto findEntry = originalKeys.find( lower ); - if( findEntry != originalKeys.end() ) - { - // double entry found - std::vector< std::string > copyCurrentPath{ currentPath }; - copyCurrentPath.push_back( lower ); - throw error::BackendConfigSchema( - std::move( copyCurrentPath ), - "JSON config: duplicate keys." ); - } - originalKeys.emplace_hint( - findEntry, std::move( lower ), pair.first ); - } - - nlohmann::json::object_t newObject; - for( auto & pair : originalKeys ) - { - newObject[ pair.first ] = std::move( val[ pair.second ] ); - } - val = newObject; - }; - - if( json.is_object() ) +template +static nlohmann::json &lowerCase( + nlohmann::json &json, + std::vector ¤tPath, + F const &ignoreCurrentPath) +{ + auto transFormCurrentObject = [¤tPath]( + nlohmann::json::object_t &val) { + // somekey -> SomeKey + std::map originalKeys; + for (auto &pair : val) { - auto & val = json.get_ref< nlohmann::json::object_t & >(); - - if( !ignoreCurrentPath( currentPath ) ) + std::string lower = auxiliary::lowerCase(std::string(pair.first)); + auto findEntry = originalKeys.find(lower); + if (findEntry != originalKeys.end()) { - transFormCurrentObject( val ); - } - - // now recursively - for( auto & pair : val ) - { - // ensure that the path consists only of lowercase strings, - // even if ignoreCurrentPath() was true - currentPath.push_back( - auxiliary::lowerCase( std::string( pair.first ) ) ); - lowerCase( pair.second, currentPath, ignoreCurrentPath ); - currentPath.pop_back(); + // double entry found + std::vector copyCurrentPath{currentPath}; + copyCurrentPath.push_back(lower); + throw error::BackendConfigSchema( + std::move(copyCurrentPath), "JSON config: duplicate keys."); } + originalKeys.emplace_hint(findEntry, std::move(lower), pair.first); } - else if( json.is_array() ) + + nlohmann::json::object_t newObject; + for (auto &pair : originalKeys) { - for( auto & val : json ) - { - currentPath.emplace_back( "\vnum" ); - lowerCase( val, currentPath, ignoreCurrentPath ); - currentPath.pop_back(); - } + newObject[pair.first] = std::move(val[pair.second]); } - return json; - } + val = newObject; + }; - nlohmann::json & lowerCase( nlohmann::json & json ) + if (json.is_object()) { - std::vector< std::string > currentPath; - // that's as deep as our config currently goes, +1 for good measure - currentPath.reserve( 7 ); - return lowerCase( - json, currentPath, []( std::vector< std::string > const & path ) { - std::vector< std::string > const ignoredPaths[] = { - { "adios2", "engine", "parameters" }, - { "adios2", - "dataset", - "operators", - /* - * We use "\vnum" to indicate "any array index". - */ - "\vnum", - "parameters" } }; - for( auto const & ignored : ignoredPaths ) - { - if( ignored == path ) - { - return true; - } - } - return false; - } ); - } + auto &val = json.get_ref(); - std::optional< std::string > - asStringDynamic( nlohmann::json const & value ) - { - if( value.is_string() ) - { - return value.get< std::string >(); - } - else if( value.is_number_integer() ) + if (!ignoreCurrentPath(currentPath)) { - return std::to_string( value.get< long long >() ); + transFormCurrentObject(val); } - else if( value.is_number_float() ) + + // now recursively + for (auto &pair : val) { - return std::to_string( value.get< long double >() ); + // ensure that the path consists only of lowercase strings, + // even if ignoreCurrentPath() was true + currentPath.push_back( + auxiliary::lowerCase(std::string(pair.first))); + lowerCase(pair.second, currentPath, ignoreCurrentPath); + currentPath.pop_back(); } - else if( value.is_boolean() ) + } + else if (json.is_array()) + { + for (auto &val : json) { - return std::string( value.get< bool >() ? "1" : "0" ); + currentPath.emplace_back("\vnum"); + lowerCase(val, currentPath, ignoreCurrentPath); + currentPath.pop_back(); } - return std::optional< std::string >{}; } + return json; +} - std::optional< std::string > - asLowerCaseStringDynamic( nlohmann::json const & value ) +nlohmann::json &lowerCase(nlohmann::json &json) +{ + std::vector currentPath; + // that's as deep as our config currently goes, +1 for good measure + currentPath.reserve(7); + return lowerCase( + json, currentPath, [](std::vector const &path) { + std::vector const ignoredPaths[] = { + {"adios2", "engine", "parameters"}, + {"adios2", + "dataset", + "operators", + /* + * We use "\vnum" to indicate "any array index". + */ + "\vnum", + "parameters"}}; + for (auto const &ignored : ignoredPaths) + { + if (ignored == path) + { + return true; + } + } + return false; + }); +} + +std::optional asStringDynamic(nlohmann::json const &value) +{ + if (value.is_string()) { - auto maybeString = asStringDynamic( value ); - if( maybeString.has_value() ) - { - auxiliary::lowerCase( maybeString.value() ); - } - return maybeString; + return value.get(); + } + else if (value.is_number_integer()) + { + return std::to_string(value.get()); + } + else if (value.is_number_float()) + { + return std::to_string(value.get()); + } + else if (value.is_boolean()) + { + return std::string(value.get() ? "1" : "0"); + } + return std::optional{}; +} + +std::optional asLowerCaseStringDynamic(nlohmann::json const &value) +{ + auto maybeString = asStringDynamic(value); + if (maybeString.has_value()) + { + auxiliary::lowerCase(maybeString.value()); } + return maybeString; +} - std::vector< std::string > backendKeys{ - "adios1", "adios2", "json", "hdf5" }; +std::vector backendKeys{"adios1", "adios2", "json", "hdf5"}; - void warnGlobalUnusedOptions( TracingJSON const & config ) +void warnGlobalUnusedOptions(TracingJSON const &config) +{ + auto shadow = config.invertShadow(); + // The backends are supposed to deal with this + // Only global options here + for (auto const &backendKey : json::backendKeys) { - auto shadow = config.invertShadow(); - // The backends are supposed to deal with this - // Only global options here - for( auto const & backendKey : json::backendKeys ) + shadow.erase(backendKey); + } + if (shadow.size() > 0) + { + switch (config.originallySpecifiedAs) { - shadow.erase( backendKey ); + case SupportedLanguages::JSON: + std::cerr + << "[Series] The following parts of the global JSON config " + "remains unused:\n" + << shadow.dump() << std::endl; + break; + case SupportedLanguages::TOML: { + auto asToml = jsonToToml(shadow); + std::cerr + << "[Series] The following parts of the global TOML config " + "remains unused:\n" + << asToml << std::endl; } - if( shadow.size() > 0 ) - { - switch( config.originallySpecifiedAs ) - { - case SupportedLanguages::JSON: - std::cerr - << "[Series] The following parts of the global JSON config " - "remains unused:\n" - << shadow.dump() << std::endl; - break; - case SupportedLanguages::TOML: { - auto asToml = jsonToToml( shadow ); - std::cerr - << "[Series] The following parts of the global TOML config " - "remains unused:\n" - << asToml << std::endl; - } - } } } +} - nlohmann::json & - merge( nlohmann::json & defaultVal, nlohmann::json const & overwrite ) +nlohmann::json & +merge(nlohmann::json &defaultVal, nlohmann::json const &overwrite) +{ + if (defaultVal.is_object() && overwrite.is_object()) { - if( defaultVal.is_object() && overwrite.is_object() ) + std::vector prunedKeys; + for (auto it = overwrite.begin(); it != overwrite.end(); ++it) { - std::vector< std::string > prunedKeys; - for( auto it = overwrite.begin(); it != overwrite.end(); ++it ) + auto &valueInDefault = defaultVal[it.key()]; + merge(valueInDefault, it.value()); + if (valueInDefault.is_null()) { - auto & valueInDefault = defaultVal[ it.key() ]; - merge( valueInDefault, it.value() ); - if( valueInDefault.is_null() ) - { - prunedKeys.emplace_back( it.key() ); - } - } - for( auto const & key : prunedKeys ) - { - defaultVal.erase( key ); + prunedKeys.emplace_back(it.key()); } } - else + for (auto const &key : prunedKeys) { - /* - * Anything else, just overwrite. - * Note: There's no clear generic way to merge arrays: - * Should we concatenate? Or should we merge at the same indices? - * From the user side, this means: - * An application can specify a number of default compression - * operators, e.g. in adios2.dataset.operators, but a user can - * overwrite the operators. Neither appending nor pointwise update - * are quite useful here. - */ - defaultVal = overwrite; + defaultVal.erase(key); } - return defaultVal; } - - std::string - merge( std::string const & defaultValue, std::string const & overwrite ) + else { - auto res = - parseOptions( defaultValue, /* considerFiles = */ false ).config; - merge( - res, - parseOptions( overwrite, /* considerFiles = */ false ).config ); - return res.dump(); + /* + * Anything else, just overwrite. + * Note: There's no clear generic way to merge arrays: + * Should we concatenate? Or should we merge at the same indices? + * From the user side, this means: + * An application can specify a number of default compression + * operators, e.g. in adios2.dataset.operators, but a user can + * overwrite the operators. Neither appending nor pointwise update + * are quite useful here. + */ + defaultVal = overwrite; } -} // namespace openPMD + return defaultVal; +} + +std::string merge(std::string const &defaultValue, std::string const &overwrite) +{ + auto res = parseOptions(defaultValue, /* considerFiles = */ false).config; + merge(res, parseOptions(overwrite, /* considerFiles = */ false).config); + return res.dump(); +} +} // namespace openPMD::json diff --git a/src/backend/Attributable.cpp b/src/backend/Attributable.cpp index d1e5a221ef..146b7d1991 100644 --- a/src/backend/Attributable.cpp +++ b/src/backend/Attributable.cpp @@ -33,41 +33,37 @@ namespace openPMD { namespace internal { -AttributableData::AttributableData() : m_writable{ this } -{ -} -} + AttributableData::AttributableData() : m_writable{this} + {} +} // namespace internal Attributable::Attributable() = default; -Attributable::Attributable( - std::shared_ptr< internal::AttributableData > attri ) - : m_attri{ std::move( attri ) } -{ -} +Attributable::Attributable(std::shared_ptr attri) + : m_attri{std::move(attri)} +{} -Attribute -Attributable::getAttribute(std::string const& key) const +Attribute Attributable::getAttribute(std::string const &key) const { - auto & attri = get(); + auto &attri = get(); auto it = attri.m_attributes.find(key); - if( it != attri.m_attributes.cend() ) + if (it != attri.m_attributes.cend()) return it->second; throw no_such_attribute_error(key); } -bool -Attributable::deleteAttribute(std::string const& key) +bool Attributable::deleteAttribute(std::string const &key) { - auto & attri = get(); - if(Access::READ_ONLY == IOHandler()->m_frontendAccess ) - throw std::runtime_error("Can not delete an Attribute in a read-only Series."); + auto &attri = get(); + if (Access::READ_ONLY == IOHandler()->m_frontendAccess) + throw std::runtime_error( + "Can not delete an Attribute in a read-only Series."); auto it = attri.m_attributes.find(key); - if( it != attri.m_attributes.end() ) + if (it != attri.m_attributes.end()) { - Parameter< Operation::DELETE_ATT > aDelete; + Parameter aDelete; aDelete.name = key; IOHandler()->enqueue(IOTask(this, aDelete)); IOHandler()->flush(); @@ -77,109 +73,103 @@ Attributable::deleteAttribute(std::string const& key) return false; } -std::vector< std::string > -Attributable::attributes() const +std::vector Attributable::attributes() const { - auto & attri = get(); - std::vector< std::string > ret; + auto &attri = get(); + std::vector ret; ret.reserve(attri.m_attributes.size()); - for( auto const& entry : attri.m_attributes ) + for (auto const &entry : attri.m_attributes) ret.emplace_back(entry.first); return ret; } -size_t -Attributable::numAttributes() const +size_t Attributable::numAttributes() const { return get().m_attributes.size(); } -bool -Attributable::containsAttribute(std::string const &key) const +bool Attributable::containsAttribute(std::string const &key) const { - auto & attri = get(); + auto &attri = get(); return attri.m_attributes.find(key) != attri.m_attributes.end(); } -std::string -Attributable::comment() const +std::string Attributable::comment() const { - return getAttribute("comment").get< std::string >(); + return getAttribute("comment").get(); } -Attributable& -Attributable::setComment(std::string const& c) +Attributable &Attributable::setComment(std::string const &c) { setAttribute("comment", c); return *this; } -void -Attributable::seriesFlush() +void Attributable::seriesFlush() { writable().seriesFlush(); } Series Attributable::retrieveSeries() const { - Writable const * findSeries = &writable(); - while( findSeries->parent ) + Writable const *findSeries = &writable(); + while (findSeries->parent) { findSeries = findSeries->parent; } - auto seriesData = &auxiliary::deref_dynamic_cast< internal::SeriesData >( - findSeries->attributable ); - return Series{ { seriesData, []( auto const * ){} } }; + auto seriesData = &auxiliary::deref_dynamic_cast( + findSeries->attributable); + return Series{{seriesData, [](auto const *) {}}}; } -Iteration const & Attributable::containingIteration() const +Iteration const &Attributable::containingIteration() const { - std::vector< Writable const * > searchQueue; - searchQueue.reserve( 7 ); - Writable const * findSeries = &writable(); - while( findSeries ) + std::vector searchQueue; + searchQueue.reserve(7); + Writable const *findSeries = &writable(); + while (findSeries) { - searchQueue.push_back( findSeries ); + searchQueue.push_back(findSeries); // we don't need to push the last Writable since it's the Series anyway findSeries = findSeries->parent; } // End of the queue: // Iteration -> Series.iterations -> Series - if( searchQueue.size() < 3 ) + if (searchQueue.size() < 3) { throw std::runtime_error( "containingIteration(): Must be called for an object contained in " - "an iteration." ); + "an iteration."); } auto end = searchQueue.rbegin(); - internal::AttributableData const * attr = ( *( end + 2 ) )->attributable; - if( attr == nullptr ) - throw std::runtime_error( "containingIteration(): attributable must not be a nullptr." ); + internal::AttributableData const *attr = (*(end + 2))->attributable; + if (attr == nullptr) + throw std::runtime_error( + "containingIteration(): attributable must not be a nullptr."); /* * We now know the unique instance of Attributable that corresponds with * the iteration. * Since the class Iteration itself still follows the old class design, * we will have to take a detour via Series. */ - auto & series = auxiliary::deref_dynamic_cast< internal::SeriesData >( - ( *searchQueue.rbegin() )->attributable ); - for( auto const & pair : series.iterations ) + auto &series = auxiliary::deref_dynamic_cast( + (*searchQueue.rbegin())->attributable); + for (auto const &pair : series.iterations) { - if( &static_cast< Attributable const & >( pair.second ).get() == attr ) + if (&static_cast(pair.second).get() == attr) { return pair.second; } } throw std::runtime_error( - "Containing iteration not found in containing Series." ); + "Containing iteration not found in containing Series."); } -Iteration & Attributable::containingIteration() +Iteration &Attributable::containingIteration() { - return const_cast< Iteration & >( - static_cast< Attributable const * >( this ) - ->containingIteration() ); + return const_cast( + static_cast(this)->containingIteration()); } std::string Attributable::MyPath::filePath() const @@ -190,50 +180,47 @@ std::string Attributable::MyPath::filePath() const auto Attributable::myPath() const -> MyPath { MyPath res; - Writable const * findSeries = &writable(); - while( findSeries->parent ) + Writable const *findSeries = &writable(); + while (findSeries->parent) { // we don't need to push_back the ownKeyWithinParent of the Series class // so it's alright that this loop doesn't ask the key of the last found // Writable // push these in reverse because we're building the list from the back - for( auto it = findSeries->ownKeyWithinParent.rbegin(); + for (auto it = findSeries->ownKeyWithinParent.rbegin(); it != findSeries->ownKeyWithinParent.rend(); - ++it ) + ++it) { - res.group.push_back(*it ); + res.group.push_back(*it); } findSeries = findSeries->parent; } - std::reverse(res.group.begin(), res.group.end() ); - auto & seriesData = - auxiliary::deref_dynamic_cast< internal::SeriesData >( - findSeries->attributable ); - Series series{ { &seriesData, []( auto const * ) {} } }; + std::reverse(res.group.begin(), res.group.end()); + auto &seriesData = auxiliary::deref_dynamic_cast( + findSeries->attributable); + Series series{{&seriesData, [](auto const *) {}}}; res.seriesName = series.name(); - res.seriesExtension = suffix( seriesData.m_format ); + res.seriesExtension = suffix(seriesData.m_format); res.directory = IOHandler()->directory; return res; } -void -Attributable::seriesFlush( FlushLevel level ) +void Attributable::seriesFlush(FlushLevel level) { - writable().seriesFlush( level ); + writable().seriesFlush(level); } -void -Attributable::flushAttributes() +void Attributable::flushAttributes() { - if( IOHandler()->m_flushLevel == FlushLevel::SkeletonOnly ) + if (IOHandler()->m_flushLevel == FlushLevel::SkeletonOnly) { return; } - if( dirty() ) + if (dirty()) { - Parameter< Operation::WRITE_ATT > aWrite; - for( std::string const & att_name : attributes() ) + Parameter aWrite; + for (std::string const &att_name : attributes()) { aWrite.name = att_name; aWrite.resource = getAttribute(att_name).getResource(); @@ -245,46 +232,45 @@ Attributable::flushAttributes() } } -void -Attributable::readAttributes( ReadMode mode ) +void Attributable::readAttributes(ReadMode mode) { - auto & attri = get(); - Parameter< Operation::LIST_ATTS > aList; + auto &attri = get(); + Parameter aList; IOHandler()->enqueue(IOTask(this, aList)); IOHandler()->flush(); - std::vector< std::string > written_attributes = attributes(); + std::vector written_attributes = attributes(); /* std::set_difference requires sorted ranges */ std::sort(aList.attributes->begin(), aList.attributes->end()); std::sort(written_attributes.begin(), written_attributes.end()); - std::set< std::string > tmpAttributes; - switch( mode ) + std::set tmpAttributes; + switch (mode) { case ReadMode::IgnoreExisting: // reread: aList - written_attributes std::set_difference( - aList.attributes->begin(), aList.attributes->end(), - written_attributes.begin(), written_attributes.end(), + aList.attributes->begin(), + aList.attributes->end(), + written_attributes.begin(), + written_attributes.end(), std::inserter(tmpAttributes, tmpAttributes.begin())); break; case ReadMode::OverrideExisting: - tmpAttributes = std::set< std::string >( - aList.attributes->begin(), - aList.attributes->end() ); + tmpAttributes = std::set( + aList.attributes->begin(), aList.attributes->end()); break; case ReadMode::FullyReread: attri.m_attributes.clear(); - tmpAttributes = std::set< std::string >( - aList.attributes->begin(), - aList.attributes->end() ); + tmpAttributes = std::set( + aList.attributes->begin(), aList.attributes->end()); break; } using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; - for( auto const& att_name : tmpAttributes ) + for (auto const &att_name : tmpAttributes) { aRead.name = att_name; std::string att = auxiliary::strip(att_name, {'\0'}); @@ -292,161 +278,157 @@ Attributable::readAttributes( ReadMode mode ) try { IOHandler()->flush(); - } catch( unsupported_data_error const& e ) + } + catch (unsupported_data_error const &e) { - std::cerr << "Skipping non-standard attribute " - << att << " (" - << e.what() - << ")\n"; + std::cerr << "Skipping non-standard attribute " << att << " (" + << e.what() << ")\n"; continue; } Attribute a(*aRead.resource); - auto guardUnitDimension = - [ this ]( std::string const & key, auto vector ) - { - if( key == "unitDimension" ) + auto guardUnitDimension = [this](std::string const &key, auto vector) { + if (key == "unitDimension") { // Some backends may report the wrong type when reading - if( vector.size() != 7 ) + if (vector.size() != 7) { throw std::runtime_error( "[Attributable] " - "Unexpected datatype for unitDimension." ); + "Unexpected datatype for unitDimension."); } - std::array< double, 7 > arr; - std::copy_n( vector.begin(), 7, arr.begin() ); - setAttribute( key, std::move( arr ) ); + std::array arr; + std::copy_n(vector.begin(), 7, arr.begin()); + setAttribute(key, std::move(arr)); } else { - setAttribute( key, std::move( vector ) ); + setAttribute(key, std::move(vector)); } }; - switch( *aRead.dtype ) + switch (*aRead.dtype) { - case DT::CHAR: - setAttribute(att, a.get< char >()); - break; - case DT::UCHAR: - setAttribute(att, a.get< unsigned char >()); - break; - case DT::SHORT: - setAttribute(att, a.get< short >()); - break; - case DT::INT: - setAttribute(att, a.get< int >()); - break; - case DT::LONG: - setAttribute(att, a.get< long >()); - break; - case DT::LONGLONG: - setAttribute(att, a.get< long long >()); - break; - case DT::USHORT: - setAttribute(att, a.get< unsigned short >()); - break; - case DT::UINT: - setAttribute(att, a.get< unsigned int >()); - break; - case DT::ULONG: - setAttribute(att, a.get< unsigned long >()); - break; - case DT::ULONGLONG: - setAttribute(att, a.get< unsigned long long >()); - break; - case DT::FLOAT: - setAttribute(att, a.get< float >()); - break; - case DT::DOUBLE: - setAttribute(att, a.get< double >()); - break; - case DT::LONG_DOUBLE: - setAttribute(att, a.get< long double >()); - break; - case DT::CFLOAT: - setAttribute(att, a.get< std::complex< float > >()); - break; - case DT::CDOUBLE: - setAttribute(att, a.get< std::complex< double > >()); - break; - case DT::CLONG_DOUBLE: - setAttribute(att, a.get< std::complex< long double > >()); - break; - case DT::STRING: - setAttribute(att, a.get< std::string >()); - break; - case DT::VEC_CHAR: - setAttribute(att, a.get< std::vector< char > >()); - break; - case DT::VEC_SHORT: - setAttribute(att, a.get< std::vector< short > >()); - break; - case DT::VEC_INT: - setAttribute(att, a.get< std::vector< int > >()); - break; - case DT::VEC_LONG: - setAttribute(att, a.get< std::vector< long > >()); - break; - case DT::VEC_LONGLONG: - setAttribute(att, a.get< std::vector< long long > >()); - break; - case DT::VEC_UCHAR: - setAttribute(att, a.get< std::vector< unsigned char > >()); - break; - case DT::VEC_USHORT: - setAttribute(att, a.get< std::vector< unsigned short > >()); - break; - case DT::VEC_UINT: - setAttribute(att, a.get< std::vector< unsigned int > >()); - break; - case DT::VEC_ULONG: - setAttribute(att, a.get< std::vector< unsigned long > >()); - break; - case DT::VEC_ULONGLONG: - setAttribute(att, a.get< std::vector< unsigned long long > >()); - break; - case DT::VEC_FLOAT: - guardUnitDimension( att, a.get< std::vector< float > >() ); - break; - case DT::VEC_DOUBLE: - guardUnitDimension( att, a.get< std::vector< double > >() ); - break; - case DT::VEC_LONG_DOUBLE: - guardUnitDimension( att, a.get< std::vector< long double > >() ); - break; - case DT::VEC_CFLOAT: - setAttribute(att, a.get< std::vector< std::complex< float > > >()); - break; - case DT::VEC_CDOUBLE: - setAttribute(att, a.get< std::vector< std::complex< double > > >()); - break; - case DT::VEC_CLONG_DOUBLE: - setAttribute(att, a.get< std::vector< std::complex< long double > > >()); - break; - case DT::VEC_STRING: - setAttribute(att, a.get< std::vector< std::string > >()); - break; - case DT::ARR_DBL_7: - setAttribute(att, a.get< std::array< double, 7 > >()); - break; - case DT::BOOL: - setAttribute(att, a.get< bool >()); - break; - case DT::UNDEFINED: - throw std::runtime_error("Invalid Attribute datatype during read"); + case DT::CHAR: + setAttribute(att, a.get()); + break; + case DT::UCHAR: + setAttribute(att, a.get()); + break; + case DT::SHORT: + setAttribute(att, a.get()); + break; + case DT::INT: + setAttribute(att, a.get()); + break; + case DT::LONG: + setAttribute(att, a.get()); + break; + case DT::LONGLONG: + setAttribute(att, a.get()); + break; + case DT::USHORT: + setAttribute(att, a.get()); + break; + case DT::UINT: + setAttribute(att, a.get()); + break; + case DT::ULONG: + setAttribute(att, a.get()); + break; + case DT::ULONGLONG: + setAttribute(att, a.get()); + break; + case DT::FLOAT: + setAttribute(att, a.get()); + break; + case DT::DOUBLE: + setAttribute(att, a.get()); + break; + case DT::LONG_DOUBLE: + setAttribute(att, a.get()); + break; + case DT::CFLOAT: + setAttribute(att, a.get>()); + break; + case DT::CDOUBLE: + setAttribute(att, a.get>()); + break; + case DT::CLONG_DOUBLE: + setAttribute(att, a.get>()); + break; + case DT::STRING: + setAttribute(att, a.get()); + break; + case DT::VEC_CHAR: + setAttribute(att, a.get>()); + break; + case DT::VEC_SHORT: + setAttribute(att, a.get>()); + break; + case DT::VEC_INT: + setAttribute(att, a.get>()); + break; + case DT::VEC_LONG: + setAttribute(att, a.get>()); + break; + case DT::VEC_LONGLONG: + setAttribute(att, a.get>()); + break; + case DT::VEC_UCHAR: + setAttribute(att, a.get>()); + break; + case DT::VEC_USHORT: + setAttribute(att, a.get>()); + break; + case DT::VEC_UINT: + setAttribute(att, a.get>()); + break; + case DT::VEC_ULONG: + setAttribute(att, a.get>()); + break; + case DT::VEC_ULONGLONG: + setAttribute(att, a.get>()); + break; + case DT::VEC_FLOAT: + guardUnitDimension(att, a.get>()); + break; + case DT::VEC_DOUBLE: + guardUnitDimension(att, a.get>()); + break; + case DT::VEC_LONG_DOUBLE: + guardUnitDimension(att, a.get>()); + break; + case DT::VEC_CFLOAT: + setAttribute(att, a.get>>()); + break; + case DT::VEC_CDOUBLE: + setAttribute(att, a.get>>()); + break; + case DT::VEC_CLONG_DOUBLE: + setAttribute(att, a.get>>()); + break; + case DT::VEC_STRING: + setAttribute(att, a.get>()); + break; + case DT::ARR_DBL_7: + setAttribute(att, a.get>()); + break; + case DT::BOOL: + setAttribute(att, a.get()); + break; + case DT::UNDEFINED: + throw std::runtime_error("Invalid Attribute datatype during read"); } } dirty() = false; } -void -Attributable::linkHierarchy(Writable& w) +void Attributable::linkHierarchy(Writable &w) { auto handler = w.IOHandler; writable().IOHandler = handler; writable().parent = &w; } -} // openPMD +} // namespace openPMD diff --git a/src/backend/BaseRecordComponent.cpp b/src/backend/BaseRecordComponent.cpp index 54503f8313..1c86431e02 100644 --- a/src/backend/BaseRecordComponent.cpp +++ b/src/backend/BaseRecordComponent.cpp @@ -23,60 +23,55 @@ namespace openPMD { -double -BaseRecordComponent::unitSI() const +double BaseRecordComponent::unitSI() const { - return getAttribute("unitSI").get< double >(); + return getAttribute("unitSI").get(); } -BaseRecordComponent& -BaseRecordComponent::resetDatatype(Datatype d) +BaseRecordComponent &BaseRecordComponent::resetDatatype(Datatype d) { - if( written() ) - throw std::runtime_error("A Records Datatype can not (yet) be changed after it has been written."); + if (written()) + throw std::runtime_error( + "A Records Datatype can not (yet) be changed after it has been " + "written."); get().m_dataset.dtype = d; return *this; } -Datatype -BaseRecordComponent::getDatatype() const +Datatype BaseRecordComponent::getDatatype() const { return get().m_dataset.dtype; } -bool -BaseRecordComponent::constant() const +bool BaseRecordComponent::constant() const { return get().m_isConstant; } -ChunkTable -BaseRecordComponent::availableChunks() +ChunkTable BaseRecordComponent::availableChunks() { - auto & rc = get(); - if( rc.m_isConstant ) + auto &rc = get(); + if (rc.m_isConstant) { - Offset offset( rc.m_dataset.extent.size(), 0 ); - return ChunkTable{ { std::move( offset ), rc.m_dataset.extent } }; + Offset offset(rc.m_dataset.extent.size(), 0); + return ChunkTable{{std::move(offset), rc.m_dataset.extent}}; } containingIteration().open(); - Parameter< Operation::AVAILABLE_CHUNKS > param; - IOTask task( this, param ); - IOHandler()->enqueue( task ); + Parameter param; + IOTask task(this, param); + IOHandler()->enqueue(task); IOHandler()->flush(); - return std::move( *param.chunks ); + return std::move(*param.chunks); } BaseRecordComponent::BaseRecordComponent( - std::shared_ptr< internal::BaseRecordComponentData > data) - : Attributable{ data } - , m_baseRecordComponentData{ std::move( data ) } + std::shared_ptr data) + : Attributable{data}, m_baseRecordComponentData{std::move(data)} {} -BaseRecordComponent::BaseRecordComponent() - : Attributable{ nullptr } +BaseRecordComponent::BaseRecordComponent() : Attributable{nullptr} { - Attributable::setData( m_baseRecordComponentData ); + Attributable::setData(m_baseRecordComponentData); } } // namespace openPMD diff --git a/src/backend/Container.cpp b/src/backend/Container.cpp index 019491e6ce..8df16f5ea9 100644 --- a/src/backend/Container.cpp +++ b/src/backend/Container.cpp @@ -24,35 +24,35 @@ namespace openPMD::detail { -template<> -std::vector< std::string > keyAsString< std::string const & >( - std::string const & key, std::vector< std::string > const & parentKey ) +template <> +std::vector keyAsString( + std::string const &key, std::vector const &parentKey) { - if( key == RecordComponent::SCALAR ) + if (key == RecordComponent::SCALAR) { auto ret = parentKey; - ret.emplace_back( RecordComponent::SCALAR ); + ret.emplace_back(RecordComponent::SCALAR); return ret; } else { - return { key }; + return {key}; } } -template<> -std::vector< std::string > keyAsString< std::string >( - std::string && key, std::vector< std::string > const & parentKey ) +template <> +std::vector keyAsString( + std::string &&key, std::vector const &parentKey) { - if( key == RecordComponent::SCALAR ) + if (key == RecordComponent::SCALAR) { auto ret = parentKey; - ret.emplace_back( RecordComponent::SCALAR ); + ret.emplace_back(RecordComponent::SCALAR); return ret; } else { - return { std::move( key ) }; + return {std::move(key)}; } } -} +} // namespace openPMD::detail diff --git a/src/backend/MeshRecordComponent.cpp b/src/backend/MeshRecordComponent.cpp index 8c9634a32c..0eddda5bbe 100644 --- a/src/backend/MeshRecordComponent.cpp +++ b/src/backend/MeshRecordComponent.cpp @@ -20,55 +20,51 @@ */ #include "openPMD/backend/MeshRecordComponent.hpp" - namespace openPMD { -MeshRecordComponent::MeshRecordComponent() - : RecordComponent() +MeshRecordComponent::MeshRecordComponent() : RecordComponent() { - setPosition(std::vector< double >{0}); + setPosition(std::vector{0}); } -void -MeshRecordComponent::read() +void MeshRecordComponent::read() { using DT = Datatype; - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "position"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); Attribute a = Attribute(*aRead.resource); - if( *aRead.dtype == DT::VEC_FLOAT || *aRead.dtype == DT::FLOAT ) - setPosition(a.get< std::vector< float > >()); - else if( *aRead.dtype == DT::VEC_DOUBLE || *aRead.dtype == DT::DOUBLE ) - setPosition(a.get< std::vector< double > >()); - else if( *aRead.dtype == DT::VEC_LONG_DOUBLE || *aRead.dtype == DT::LONG_DOUBLE ) - setPosition(a.get< std::vector< long double > >()); + if (*aRead.dtype == DT::VEC_FLOAT || *aRead.dtype == DT::FLOAT) + setPosition(a.get>()); + else if (*aRead.dtype == DT::VEC_DOUBLE || *aRead.dtype == DT::DOUBLE) + setPosition(a.get>()); + else if ( + *aRead.dtype == DT::VEC_LONG_DOUBLE || *aRead.dtype == DT::LONG_DOUBLE) + setPosition(a.get>()); else - throw std::runtime_error( "Unexpected Attribute datatype for 'position'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'position'"); readBase(); } -template< typename T > -MeshRecordComponent& -MeshRecordComponent::setPosition(std::vector< T > pos) +template +MeshRecordComponent &MeshRecordComponent::setPosition(std::vector pos) { - static_assert(std::is_floating_point< T >::value, - "Type of attribute must be floating point"); + static_assert( + std::is_floating_point::value, + "Type of attribute must be floating point"); setAttribute("position", pos); return *this; } -template -MeshRecordComponent& -MeshRecordComponent::setPosition(std::vector< float > pos); -template -MeshRecordComponent& -MeshRecordComponent::setPosition(std::vector< double > pos); -template -MeshRecordComponent& -MeshRecordComponent::setPosition(std::vector< long double > pos); -} // openPMD +template MeshRecordComponent & +MeshRecordComponent::setPosition(std::vector pos); +template MeshRecordComponent & +MeshRecordComponent::setPosition(std::vector pos); +template MeshRecordComponent & +MeshRecordComponent::setPosition(std::vector pos); +} // namespace openPMD diff --git a/src/backend/PatchRecord.cpp b/src/backend/PatchRecord.cpp index 3926677a30..433bc5928a 100644 --- a/src/backend/PatchRecord.cpp +++ b/src/backend/PatchRecord.cpp @@ -18,63 +18,66 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include "openPMD/auxiliary/Memory.hpp" #include "openPMD/backend/PatchRecord.hpp" - +#include "openPMD/auxiliary/Memory.hpp" namespace openPMD { -PatchRecord& -PatchRecord::setUnitDimension(std::map< UnitDimension, double > const& udim) +PatchRecord & +PatchRecord::setUnitDimension(std::map const &udim) { - if( !udim.empty() ) + if (!udim.empty()) { - std::array< double, 7 > tmpUnitDimension = this->unitDimension(); - for( auto const& entry : udim ) + std::array tmpUnitDimension = this->unitDimension(); + for (auto const &entry : udim) tmpUnitDimension[static_cast(entry.first)] = entry.second; setAttribute("unitDimension", tmpUnitDimension); } return *this; } -void -PatchRecord::flush_impl(std::string const& path) +void PatchRecord::flush_impl(std::string const &path) { - if( this->find(RecordComponent::SCALAR) == this->end() ) + if (this->find(RecordComponent::SCALAR) == this->end()) { - if(IOHandler()->m_frontendAccess != Access::READ_ONLY ) - Container< PatchRecordComponent >::flush(path); // warning (clang-tidy-10): bugprone-parent-virtual-call - for( auto& comp : *this ) + if (IOHandler()->m_frontendAccess != Access::READ_ONLY) + Container::flush( + path); // warning (clang-tidy-10): bugprone-parent-virtual-call + for (auto &comp : *this) comp.second.flush(comp.first); - } else + } + else this->operator[](RecordComponent::SCALAR).flush(path); - if( IOHandler()->m_flushLevel == FlushLevel::UserFlush ) + if (IOHandler()->m_flushLevel == FlushLevel::UserFlush) { this->dirty() = false; } } -void -PatchRecord::read() +void PatchRecord::read() { - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "unitDimension"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == Datatype::ARR_DBL_7 || *aRead.dtype == Datatype::VEC_DOUBLE ) - this->setAttribute("unitDimension", Attribute(*aRead.resource).template get< std::array< double, 7 > >()); + if (*aRead.dtype == Datatype::ARR_DBL_7 || + *aRead.dtype == Datatype::VEC_DOUBLE) + this->setAttribute( + "unitDimension", + Attribute(*aRead.resource).template get>()); else - throw std::runtime_error("Unexpected Attribute datatype for 'unitDimension'"); + throw std::runtime_error( + "Unexpected Attribute datatype for 'unitDimension'"); - Parameter< Operation::LIST_DATASETS > dList; + Parameter dList; IOHandler()->enqueue(IOTask(this, dList)); IOHandler()->flush(); - Parameter< Operation::OPEN_DATASET > dOpen; - for( auto const& component_name : *dList.datasets ) + Parameter dOpen; + for (auto const &component_name : *dList.datasets) { - PatchRecordComponent& prc = (*this)[component_name]; + PatchRecordComponent &prc = (*this)[component_name]; dOpen.name = component_name; IOHandler()->enqueue(IOTask(&prc, dOpen)); IOHandler()->flush(); @@ -86,4 +89,4 @@ PatchRecord::read() } dirty() = false; } -} // openPMD +} // namespace openPMD diff --git a/src/backend/PatchRecordComponent.cpp b/src/backend/PatchRecordComponent.cpp index 9331e65b90..6eb696f174 100644 --- a/src/backend/PatchRecordComponent.cpp +++ b/src/backend/PatchRecordComponent.cpp @@ -18,87 +18,84 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include "openPMD/auxiliary/Memory.hpp" #include "openPMD/backend/PatchRecordComponent.hpp" +#include "openPMD/auxiliary/Memory.hpp" #include - namespace openPMD { namespace internal { PatchRecordComponentData::PatchRecordComponentData() { - PatchRecordComponent impl{ { this, []( auto const * ){} } }; - impl.setUnitSI( 1 ); + PatchRecordComponent impl{{this, [](auto const *) {}}}; + impl.setUnitSI(1); } -} +} // namespace internal -PatchRecordComponent& -PatchRecordComponent::setUnitSI(double usi) +PatchRecordComponent &PatchRecordComponent::setUnitSI(double usi) { setAttribute("unitSI", usi); return *this; } -PatchRecordComponent& -PatchRecordComponent::resetDataset(Dataset d) +PatchRecordComponent &PatchRecordComponent::resetDataset(Dataset d) { - if( written() ) - throw std::runtime_error("A Records Dataset can not (yet) be changed after it has been written."); - if( d.extent.empty() ) - throw std::runtime_error("Dataset extent must be at least 1D."); - if( std::any_of(d.extent.begin(), d.extent.end(), - [](Extent::value_type const& i) { return i == 0u; }) ) - throw std::runtime_error("Dataset extent must not be zero in any dimension."); + if (written()) + throw std::runtime_error( + "A Records Dataset can not (yet) be changed after it has been " + "written."); + if (d.extent.empty()) + throw std::runtime_error("Dataset extent must be at least 1D."); + if (std::any_of( + d.extent.begin(), d.extent.end(), [](Extent::value_type const &i) { + return i == 0u; + })) + throw std::runtime_error( + "Dataset extent must not be zero in any dimension."); get().m_dataset = d; dirty() = true; return *this; } -uint8_t -PatchRecordComponent::getDimensionality() const +uint8_t PatchRecordComponent::getDimensionality() const { return 1; } -Extent -PatchRecordComponent::getExtent() const +Extent PatchRecordComponent::getExtent() const { return get().m_dataset.extent; } -PatchRecordComponent::PatchRecordComponent() - : BaseRecordComponent{ nullptr } +PatchRecordComponent::PatchRecordComponent() : BaseRecordComponent{nullptr} { - BaseRecordComponent::setData( m_patchRecordComponentData ); + BaseRecordComponent::setData(m_patchRecordComponentData); } PatchRecordComponent::PatchRecordComponent( - std::shared_ptr< internal::PatchRecordComponentData > data ) - : BaseRecordComponent{ data } - , m_patchRecordComponentData{ std::move( data ) } -{ -} + std::shared_ptr data) + : BaseRecordComponent{data}, m_patchRecordComponentData{std::move(data)} +{} -void -PatchRecordComponent::flush(std::string const& name) +void PatchRecordComponent::flush(std::string const &name) { - auto & rc = get(); - if(IOHandler()->m_frontendAccess == Access::READ_ONLY ) + auto &rc = get(); + if (IOHandler()->m_frontendAccess == Access::READ_ONLY) { - while( !rc.m_chunks.empty() ) + while (!rc.m_chunks.empty()) { IOHandler()->enqueue(rc.m_chunks.front()); rc.m_chunks.pop(); } - } else + } + else { - if( !written() ) + if (!written()) { - Parameter< Operation::CREATE_DATASET > dCreate; + Parameter dCreate; dCreate.name = name; dCreate.extent = getExtent(); dCreate.dtype = getDatatype(); @@ -106,7 +103,7 @@ PatchRecordComponent::flush(std::string const& name) IOHandler()->enqueue(IOTask(this, dCreate)); } - while( !rc.m_chunks.empty() ) + while (!rc.m_chunks.empty()) { IOHandler()->enqueue(rc.m_chunks.front()); rc.m_chunks.pop(); @@ -116,30 +113,28 @@ PatchRecordComponent::flush(std::string const& name) } } -void -PatchRecordComponent::read() +void PatchRecordComponent::read() { - Parameter< Operation::READ_ATT > aRead; + Parameter aRead; aRead.name = "unitSI"; IOHandler()->enqueue(IOTask(this, aRead)); IOHandler()->flush(); - if( *aRead.dtype == Datatype::DOUBLE ) - setUnitSI(Attribute(*aRead.resource).get< double >()); + if (*aRead.dtype == Datatype::DOUBLE) + setUnitSI(Attribute(*aRead.resource).get()); else throw std::runtime_error("Unexpected Attribute datatype for 'unitSI'"); - readAttributes( ReadMode::FullyReread ); // this will set dirty() = false + readAttributes(ReadMode::FullyReread); // this will set dirty() = false } -bool -PatchRecordComponent::dirtyRecursive() const +bool PatchRecordComponent::dirtyRecursive() const { - if( this->dirty() ) + if (this->dirty()) { return true; } - auto & rc = get(); + auto &rc = get(); return !rc.m_chunks.empty(); } -} // openPMD +} // namespace openPMD diff --git a/src/backend/Writable.cpp b/src/backend/Writable.cpp index 606332cd05..7f3733904b 100644 --- a/src/backend/Writable.cpp +++ b/src/backend/Writable.cpp @@ -22,31 +22,28 @@ #include "openPMD/Series.hpp" #include "openPMD/auxiliary/DerefDynamicCast.hpp" - namespace openPMD { - Writable::Writable(internal::AttributableData* a) - : abstractFilePosition{nullptr}, - IOHandler{nullptr}, - attributable{a}, - parent{nullptr}, - dirty{true}, - written{false} - { } +Writable::Writable(internal::AttributableData *a) + : abstractFilePosition{nullptr} + , IOHandler{nullptr} + , attributable{a} + , parent{nullptr} + , dirty{true} + , written{false} +{} - void - Writable::seriesFlush() - { - seriesFlush( FlushLevel::UserFlush ); - } +void Writable::seriesFlush() +{ + seriesFlush(FlushLevel::UserFlush); +} - void - Writable::seriesFlush( FlushLevel level ) - { - auto series = Attributable( { attributable, []( auto const * ){} } ) - .retrieveSeries(); - series.flush_impl( - series.iterations.begin(), series.iterations.end(), level ); - } +void Writable::seriesFlush(FlushLevel level) +{ + auto series = + Attributable({attributable, [](auto const *) {}}).retrieveSeries(); + series.flush_impl( + series.iterations.begin(), series.iterations.end(), level); +} -} // openPMD +} // namespace openPMD diff --git a/src/benchmark/mpi/OneDimensionalBlockSlicer.cpp b/src/benchmark/mpi/OneDimensionalBlockSlicer.cpp index 97cba7769b..e494b175de 100644 --- a/src/benchmark/mpi/OneDimensionalBlockSlicer.cpp +++ b/src/benchmark/mpi/OneDimensionalBlockSlicer.cpp @@ -23,74 +23,53 @@ #include - namespace openPMD { - OneDimensionalBlockSlicer::OneDimensionalBlockSlicer( Extent::value_type dim ) : - m_dim { dim } - {} +OneDimensionalBlockSlicer::OneDimensionalBlockSlicer(Extent::value_type dim) + : m_dim{dim} +{} +std::pair +OneDimensionalBlockSlicer::sliceBlock(Extent &totalExtent, int size, int rank) +{ + Offset offs(totalExtent.size(), 0); - std::pair< - Offset, - Extent - > OneDimensionalBlockSlicer::sliceBlock( - Extent & totalExtent, - int size, - int rank - ) + if (rank >= size) { - Offset offs( - totalExtent.size( ), - 0 - ); - - if( rank >= size ) - { - Extent extent( - totalExtent.size( ), - 0 - ); - return std::make_pair( - std::move( offs ), - std::move( extent ) - ); - } - - auto dim = this->m_dim; + Extent extent(totalExtent.size(), 0); + return std::make_pair(std::move(offs), std::move(extent)); + } - // for more equal balancing, we want the start index - // at the upper gaussian bracket of (N/n*rank) - // where N the size of the dataset in dimension dim - // and n the MPI size - // for avoiding integer overflow, this is the same as: - // (N div n)*rank + round((N%n)/n*rank) - auto f = [&totalExtent, size, dim]( int threadRank ) - { - auto N = totalExtent[dim]; - auto res = ( N / size ) * threadRank; - auto padDivident = ( N % size ) * threadRank; - auto pad = padDivident / size; - if( pad * size < padDivident ) - { - pad += 1; - } - return res + pad; - }; + auto dim = this->m_dim; - offs[dim] = f( rank ); - Extent localExtent { totalExtent }; - if( rank >= size - 1 ) - { - localExtent[dim] -= offs[dim]; - } - else + // for more equal balancing, we want the start index + // at the upper gaussian bracket of (N/n*rank) + // where N the size of the dataset in dimension dim + // and n the MPI size + // for avoiding integer overflow, this is the same as: + // (N div n)*rank + round((N%n)/n*rank) + auto f = [&totalExtent, size, dim](int threadRank) { + auto N = totalExtent[dim]; + auto res = (N / size) * threadRank; + auto padDivident = (N % size) * threadRank; + auto pad = padDivident / size; + if (pad * size < padDivident) { - localExtent[dim] = f( rank + 1 ) - offs[dim]; + pad += 1; } - return std::make_pair( - std::move( offs ), - std::move( localExtent ) - ); + return res + pad; + }; + + offs[dim] = f(rank); + Extent localExtent{totalExtent}; + if (rank >= size - 1) + { + localExtent[dim] -= offs[dim]; + } + else + { + localExtent[dim] = f(rank + 1) - offs[dim]; } + return std::make_pair(std::move(offs), std::move(localExtent)); } +} // namespace openPMD diff --git a/src/binding/python/Access.cpp b/src/binding/python/Access.cpp index 0b1c274c7c..338f42db25 100644 --- a/src/binding/python/Access.cpp +++ b/src/binding/python/Access.cpp @@ -26,11 +26,10 @@ namespace py = pybind11; using namespace openPMD; - -void init_Access(py::module &m) { +void init_Access(py::module &m) +{ py::enum_(m, "Access") .value("read_only", Access::READ_ONLY) .value("read_write", Access::READ_WRITE) - .value("create", Access::CREATE) - ; + .value("create", Access::CREATE); } diff --git a/src/binding/python/Attributable.cpp b/src/binding/python/Attributable.cpp index 2e5134a32d..69708fc948 100644 --- a/src/binding/python/Attributable.cpp +++ b/src/binding/python/Attributable.cpp @@ -19,14 +19,14 @@ * If not, see . */ #include "openPMD/backend/Attributable.hpp" -#include "openPMD/backend/Attribute.hpp" +#include "openPMD/DatatypeHelpers.hpp" #include "openPMD/auxiliary/Variant.hpp" +#include "openPMD/backend/Attribute.hpp" #include "openPMD/binding/python/Numpy.hpp" -#include "openPMD/DatatypeHelpers.hpp" #include -#include #include +#include #include #include @@ -35,18 +35,15 @@ #include #include - namespace py = pybind11; using namespace openPMD; -using PyAttributeKeys = std::vector< std::string >; -//PYBIND11_MAKE_OPAQUE(PyAttributeKeys) +using PyAttributeKeys = std::vector; +// PYBIND11_MAKE_OPAQUE(PyAttributeKeys) bool setAttributeFromBufferInfo( - Attributable & attr, - std::string const& key, - py::buffer& a -) { + Attributable &attr, std::string const &key, py::buffer &a) +{ using DT = Datatype; py::buffer_info buf = a.request(); @@ -58,68 +55,78 @@ bool setAttributeFromBufferInfo( // https://github.com/pybind/pybind11/issues/1224#issuecomment-354357392 // scalars, see PEP 3118 // requires Numpy 1.15+ - if( buf.ndim == 0 ) { + if (buf.ndim == 0) + { // refs: // https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.interface.html // https://docs.python.org/3/library/struct.html#format-characters // std::cout << " scalar type '" << buf.format << "'" << std::endl; // typestring: encoding + type + number of bytes - switch( dtype_from_bufferformat( buf.format ) ) + switch (dtype_from_bufferformat(buf.format)) { - case DT::BOOL: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::SHORT: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::INT: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::LONG: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::LONGLONG: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::USHORT: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::UINT: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::ULONG: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::ULONGLONG: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::FLOAT: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::DOUBLE: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::LONG_DOUBLE: - return attr.setAttribute( key, *static_cast(buf.ptr) ); - break; - case DT::CFLOAT: - return attr.setAttribute( key, *static_cast*>(buf.ptr) ); - break; - case DT::CDOUBLE: - return attr.setAttribute( key, *static_cast*>(buf.ptr) ); - break; - case DT::CLONG_DOUBLE: - return attr.setAttribute( key, *static_cast*>(buf.ptr) ); - break; - default: - throw std::runtime_error("set_attribute: Unknown " - "Python type '" + buf.format + - "' for attribute '" + key + "'"); + case DT::BOOL: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::SHORT: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::INT: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::LONG: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::LONGLONG: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::USHORT: + return attr.setAttribute( + key, *static_cast(buf.ptr)); + break; + case DT::UINT: + return attr.setAttribute( + key, *static_cast(buf.ptr)); + break; + case DT::ULONG: + return attr.setAttribute( + key, *static_cast(buf.ptr)); + break; + case DT::ULONGLONG: + return attr.setAttribute( + key, *static_cast(buf.ptr)); + break; + case DT::FLOAT: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::DOUBLE: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::LONG_DOUBLE: + return attr.setAttribute(key, *static_cast(buf.ptr)); + break; + case DT::CFLOAT: + return attr.setAttribute( + key, *static_cast *>(buf.ptr)); + break; + case DT::CDOUBLE: + return attr.setAttribute( + key, *static_cast *>(buf.ptr)); + break; + case DT::CLONG_DOUBLE: + return attr.setAttribute( + key, *static_cast *>(buf.ptr)); + break; + default: + throw std::runtime_error( + "set_attribute: Unknown " + "Python type '" + + buf.format + "' for attribute '" + key + "'"); } return false; } // lists & ndarrays: all will be flattended to 1D lists - else { + else + { // std::cout << " array type '" << buf.format << "'" << std::endl; /* required are contiguous buffers @@ -127,18 +134,18 @@ bool setAttributeFromBufferInfo( * - not strided with paddings * - not a view in another buffer that results in striding */ - auto* view = new Py_buffer(); + auto *view = new Py_buffer(); int flags = PyBUF_STRIDES | PyBUF_FORMAT; - if( PyObject_GetBuffer( a.ptr(), view, flags ) != 0 ) + if (PyObject_GetBuffer(a.ptr(), view, flags) != 0) { delete view; throw py::error_already_set(); } - bool isContiguous = ( PyBuffer_IsContiguous( view, 'A' ) != 0 ); - PyBuffer_Release( view ); + bool isContiguous = (PyBuffer_IsContiguous(view, 'A') != 0); + PyBuffer_Release(view); delete view; - if( !isContiguous ) + if (!isContiguous) throw py::index_error( "non-contiguous buffer provided, handling not implemented!"); // @todo in order to implement stride handling, one needs to @@ -154,106 +161,108 @@ bool setAttributeFromBufferInfo( ) ); else */ // std::cout << "+++++++++++ BUFFER: " << buf.format << std::endl; - if( buf.format.find("b") != std::string::npos ) - return attr.setAttribute( key, + if (buf.format.find("b") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("h") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("h") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("i") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("i") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("l") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("l") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("q") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("q") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("B") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("B") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("H") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("H") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("I") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("I") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("L") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("L") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("Q") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("Q") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("Zf") != std::string::npos ) - return attr.setAttribute( key, - std::vector>( - static_cast*>(buf.ptr), - static_cast*>(buf.ptr) + buf.size - ) ); - else if( buf.format.find("Zd") != std::string::npos ) - return attr.setAttribute( key, - std::vector>( - static_cast*>(buf.ptr), - static_cast*>(buf.ptr) + buf.size - ) ); - else if( buf.format.find("Zg") != std::string::npos ) - return attr.setAttribute( key, - std::vector>( - static_cast*>(buf.ptr), - static_cast*>(buf.ptr) + buf.size - ) ); - else if( buf.format.find("f") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("Zf") != std::string::npos) + return attr.setAttribute( + key, + std::vector>( + static_cast *>(buf.ptr), + static_cast *>(buf.ptr) + buf.size)); + else if (buf.format.find("Zd") != std::string::npos) + return attr.setAttribute( + key, + std::vector>( + static_cast *>(buf.ptr), + static_cast *>(buf.ptr) + buf.size)); + else if (buf.format.find("Zg") != std::string::npos) + return attr.setAttribute( + key, + std::vector>( + static_cast *>(buf.ptr), + static_cast *>(buf.ptr) + + buf.size)); + else if (buf.format.find("f") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("d") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("d") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); - else if( buf.format.find("g") != std::string::npos ) - return attr.setAttribute( key, + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); + else if (buf.format.find("g") != std::string::npos) + return attr.setAttribute( + key, std::vector( - static_cast(buf.ptr), - static_cast(buf.ptr) + buf.size - ) ); + static_cast(buf.ptr), + static_cast(buf.ptr) + buf.size)); else - throw std::runtime_error("set_attribute: Unknown " - "Python type '" + buf.format + - "' for attribute '" + key + "'"); + throw std::runtime_error( + "set_attribute: Unknown " + "Python type '" + + buf.format + "' for attribute '" + key + "'"); return false; } @@ -261,148 +270,140 @@ bool setAttributeFromBufferInfo( struct SetAttributeFromObject { - static constexpr char const * errorMsg = "Attributable.set_attribute()"; + static constexpr char const *errorMsg = "Attributable.set_attribute()"; - template< typename RequestedType > - static bool call( - Attributable & attr, - std::string const& key, - py::object& obj ) + template + static bool + call(Attributable &attr, std::string const &key, py::object &obj) { - if( std::string( py::str( obj.get_type() ) ) == "" ) + if (std::string(py::str(obj.get_type())) == "") { - using ListType = std::vector< RequestedType >; - return attr.setAttribute< ListType >( key, obj.cast< ListType >() ); + using ListType = std::vector; + return attr.setAttribute(key, obj.cast()); } else { - return attr.setAttribute< RequestedType >( - key, obj.cast< RequestedType >() ); + return attr.setAttribute( + key, obj.cast()); } } }; -template<> -bool SetAttributeFromObject::call< double >( - Attributable & attr, std::string const & key, py::object & obj ) +template <> +bool SetAttributeFromObject::call( + Attributable &attr, std::string const &key, py::object &obj) { - if( std::string( py::str( obj.get_type() ) ) == "" ) + if (std::string(py::str(obj.get_type())) == "") { - using ListType = std::vector< double >; - using ArrayType = std::array< double, 7 >; - ListType const & asVector = obj.cast< ListType >(); - if( asVector.size() == 7 && key == "unitDimension" ) + using ListType = std::vector; + using ArrayType = std::array; + ListType const &asVector = obj.cast(); + if (asVector.size() == 7 && key == "unitDimension") { ArrayType asArray; - std::copy_n( asVector.begin(), 7, asArray.begin() ); - return attr.setAttribute< ArrayType >( key, asArray ); + std::copy_n(asVector.begin(), 7, asArray.begin()); + return attr.setAttribute(key, asArray); } else { - return attr.setAttribute< ListType >( key, asVector ); + return attr.setAttribute(key, asVector); } } else { - return attr.setAttribute< double >( key, obj.cast< double >() ); + return attr.setAttribute(key, obj.cast()); } } -template<> -bool SetAttributeFromObject::call< bool >( - Attributable & attr, std::string const & key, py::object & obj ) +template <> +bool SetAttributeFromObject::call( + Attributable &attr, std::string const &key, py::object &obj) { - return attr.setAttribute< bool >( key, obj.cast< bool >() ); + return attr.setAttribute(key, obj.cast()); } -template<> -bool SetAttributeFromObject::call< char >( - Attributable & attr, std::string const & key, py::object & obj ) +template <> +bool SetAttributeFromObject::call( + Attributable &attr, std::string const &key, py::object &obj) { - if( std::string( py::str( obj.get_type() ) ) == "" ) + if (std::string(py::str(obj.get_type())) == "") { - using ListChar = std::vector< char >; - using ListString = std::vector< std::string >; + using ListChar = std::vector; + using ListString = std::vector; try { - return attr.setAttribute< ListString >( - key, obj.cast< ListString >() ); + return attr.setAttribute(key, obj.cast()); } - catch( const py::cast_error & ) + catch (const py::cast_error &) { - return attr.setAttribute< ListChar >( key, obj.cast< ListChar >() ); + return attr.setAttribute(key, obj.cast()); } } - else if( std::string( py::str( obj.get_type() ) ) == "" ) + else if (std::string(py::str(obj.get_type())) == "") { - return attr.setAttribute< std::string >( - key, obj.cast< std::string >() ); + return attr.setAttribute(key, obj.cast()); } else { - return attr.setAttribute< char >( key, obj.cast< char >() ); + return attr.setAttribute(key, obj.cast()); } } bool setAttributeFromObject( - Attributable & attr, - std::string const & key, - py::object & obj, - pybind11::dtype datatype ) + Attributable &attr, + std::string const &key, + py::object &obj, + pybind11::dtype datatype) { - Datatype requestedDatatype = dtype_from_numpy( datatype ); - return switchNonVectorType< SetAttributeFromObject >( - requestedDatatype, attr, key, obj ); + Datatype requestedDatatype = dtype_from_numpy(datatype); + return switchNonVectorType( + requestedDatatype, attr, key, obj); } -void init_Attributable(py::module &m) { +void init_Attributable(py::module &m) +{ py::class_(m, "Attributable") .def(py::init()) - .def("__repr__", - [](Attributable const & attr) { - return ""; - } - ) - .def("series_flush", py::overload_cast< >(&Attributable::seriesFlush)) + .def( + "__repr__", + [](Attributable const &attr) { + return ""; + }) + .def("series_flush", py::overload_cast<>(&Attributable::seriesFlush)) .def_property_readonly( "attributes", - []( Attributable & attr ) - { - return attr.attributes(); - }, + [](Attributable &attr) { return attr.attributes(); }, // ref + keepalive - py::return_value_policy::reference_internal - ) + py::return_value_policy::reference_internal) // C++ pass-through API: Setter // note that the order of overloads is important! - // all buffer protocol compatible objects, including numpy arrays if not specialized specifically... - .def("set_attribute", []( Attributable & attr, std::string const& key, py::buffer& a ) { - // std::cout << "set attr via py::buffer: " << key << std::endl; - return setAttributeFromBufferInfo( - attr, - key, - a - ); - }) - .def("set_attribute", []( - Attributable & attr, - std::string const& key, - py::object& obj, - pybind11::dtype datatype ) - { - return setAttributeFromObject( attr, key, obj, datatype ); - }, + // all buffer protocol compatible objects, including numpy arrays if not + // specialized specifically... + .def( + "set_attribute", + [](Attributable &attr, std::string const &key, py::buffer &a) { + // std::cout << "set attr via py::buffer: " << key << std::endl; + return setAttributeFromBufferInfo(attr, key, a); + }) + .def( + "set_attribute", + [](Attributable &attr, + std::string const &key, + py::object &obj, + pybind11::dtype datatype) { + return setAttributeFromObject(attr, key, obj, datatype); + }, py::arg("key"), py::arg("value"), - py::arg("datatype") - ) + py::arg("datatype")) // fundamental Python types - .def("set_attribute", &Attributable::setAttribute< bool >) - .def("set_attribute", &Attributable::setAttribute< unsigned char >) + .def("set_attribute", &Attributable::setAttribute) + .def("set_attribute", &Attributable::setAttribute) // -> handle all native python integers as long // .def("set_attribute", &Attributable::setAttribute< short >) // .def("set_attribute", &Attributable::setAttribute< int >) @@ -411,63 +412,83 @@ void init_Attributable(py::module &m) { // .def("set_attribute", &Attributable::setAttribute< unsigned short >) // .def("set_attribute", &Attributable::setAttribute< unsigned int >) // .def("set_attribute", &Attributable::setAttribute< unsigned long >) - // .def("set_attribute", &Attributable::setAttribute< unsigned long long >) - .def("set_attribute", &Attributable::setAttribute< long >) + // .def("set_attribute", &Attributable::setAttribute< unsigned long long + // >) + .def("set_attribute", &Attributable::setAttribute) // work-around for https://github.com/pybind/pybind11/issues/1512 // -> handle all native python floats as double // .def("set_attribute", &Attributable::setAttribute< float >) // .def("set_attribute", &Attributable::setAttribute< long double >) - .def("set_attribute", &Attributable::setAttribute< double >) + .def("set_attribute", &Attributable::setAttribute) // work-around for https://github.com/pybind/pybind11/issues/1509 // -> since there is only str in Python, chars are strings // .def("set_attribute", &Attributable::setAttribute< char >) - .def("set_attribute", []( Attributable & attr, std::string const& key, std::string const& value ) { - return attr.setAttribute( key, value ); - }) + .def( + "set_attribute", + [](Attributable &attr, + std::string const &key, + std::string const &value) { + return attr.setAttribute(key, value); + }) - // Plain Python arrays and plain python lists of homogeneous, fundamental Python types - // not specialized in C++ API - // .def("set_attribute", &Attributable::setAttribute< std::vector< bool > >) - // there is only str in Python, chars are strings - // .def("set_attribute", &Attributable::setAttribute< std::vector< char > >) - .def("set_attribute", &Attributable::setAttribute< std::vector< unsigned char > >) - .def("set_attribute", &Attributable::setAttribute< std::vector< long > >) - .def("set_attribute", &Attributable::setAttribute< std::vector< double > >) // TODO: this implicitly casts list of complex - // probably affected by bug https://github.com/pybind/pybind11/issues/1258 - .def("set_attribute", []( Attributable & attr, std::string const& key, std::vector< std::string > const& value ) { - return attr.setAttribute( key, value ); - }) - // .def("set_attribute", &Attributable::setAttribute< std::array< double, 7 > >) + // Plain Python arrays and plain python lists of homogeneous, + // fundamental Python types not specialized in C++ API + // .def("set_attribute", &Attributable::setAttribute< std::vector< bool + // > >) there is only str in Python, chars are strings + // .def("set_attribute", &Attributable::setAttribute< std::vector< char + // > >) + .def( + "set_attribute", + &Attributable::setAttribute>) + .def("set_attribute", &Attributable::setAttribute>) + .def( + "set_attribute", + &Attributable::setAttribute>) // TODO: this implicitly casts list of complex + // probably affected by bug + // https://github.com/pybind/pybind11/issues/1258 + .def( + "set_attribute", + [](Attributable &attr, + std::string const &key, + std::vector const &value) { + return attr.setAttribute(key, value); + }) + // .def("set_attribute", &Attributable::setAttribute< std::array< + // double, 7 > >) // C++ pass-through API: Getter - .def("get_attribute", []( Attributable & attr, std::string const& key ) { - auto v = attr.getAttribute(key); - return v.getResource(); - // TODO instead of returning lists, return all arrays (ndim > 0) as numpy arrays? - }) - .def_property_readonly("attribute_dtypes", []( Attributable const & attributable ) { - std::map< std::string, pybind11::dtype > dtypes; - for( auto const & attr : attributable.attributes() ) - { - dtypes[ attr ] = - dtype_to_numpy( attributable.getAttribute( attr ).dtype ); - } - return dtypes; - }) + .def( + "get_attribute", + [](Attributable &attr, std::string const &key) { + auto v = attr.getAttribute(key); + return v.getResource(); + // TODO instead of returning lists, return all arrays (ndim > 0) + // as numpy arrays? + }) + .def_property_readonly( + "attribute_dtypes", + [](Attributable const &attributable) { + std::map dtypes; + for (auto const &attr : attributable.attributes()) + { + dtypes[attr] = + dtype_to_numpy(attributable.getAttribute(attr).dtype); + } + return dtypes; + }) .def("delete_attribute", &Attributable::deleteAttribute) .def("contains_attribute", &Attributable::containsAttribute) .def("__len__", &Attributable::numAttributes) - // @todo _ipython_key_completions_ if we find a way to add a [] interface + // @todo _ipython_key_completions_ if we find a way to add a [] + // interface - .def_property("comment", &Attributable::comment, &Attributable::setComment) + .def_property( + "comment", &Attributable::comment, &Attributable::setComment) // TODO remove in future versions (deprecated) - .def("set_comment", &Attributable::setComment) - ; + .def("set_comment", &Attributable::setComment); - py::bind_vector< PyAttributeKeys >( - m, - "Attribute_Keys" - ); + py::bind_vector(m, "Attribute_Keys"); } diff --git a/src/binding/python/BaseRecord.cpp b/src/binding/python/BaseRecord.cpp index e3a1e6f57c..2696a6afa7 100644 --- a/src/binding/python/BaseRecord.cpp +++ b/src/binding/python/BaseRecord.cpp @@ -22,29 +22,42 @@ #include #include "openPMD/backend/BaseRecord.hpp" +#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/backend/Container.hpp" #include "openPMD/backend/MeshRecordComponent.hpp" -#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/backend/PatchRecordComponent.hpp" #include "openPMD/binding/python/UnitDimension.hpp" namespace py = pybind11; using namespace openPMD; - -void init_BaseRecord(py::module &m) { +void init_BaseRecord(py::module &m) +{ constexpr auto doc_scalar = R"docstr( Returns true if this record only contains a single component. )docstr"; - py::class_, Container< BaseRecordComponent > >(m, "Base_Record_Base_Record_Component") - .def_property_readonly("unit_dimension", &BaseRecord< BaseRecordComponent >::unitDimension, python::doc_unit_dimension) - .def_property_readonly("scalar", &BaseRecord< BaseRecordComponent >::scalar, doc_scalar); + py::class_, Container>( + m, "Base_Record_Base_Record_Component") + .def_property_readonly( + "unit_dimension", + &BaseRecord::unitDimension, + python::doc_unit_dimension) + .def_property_readonly( + "scalar", &BaseRecord::scalar, doc_scalar); - py::class_, Container< RecordComponent > >(m, "Base_Record_Record_Component") - .def_property_readonly("scalar", &BaseRecord< RecordComponent >::scalar, doc_scalar); - py::class_, Container< MeshRecordComponent > >(m, "Base_Record_Mesh_Record_Component") - .def_property_readonly("scalar", &BaseRecord< MeshRecordComponent >::scalar, doc_scalar); - py::class_, Container< PatchRecordComponent > >(m, "Base_Record_Patch_Record_Component") - .def_property_readonly("scalar", &BaseRecord< PatchRecordComponent >::scalar, doc_scalar); + py::class_, Container>( + m, "Base_Record_Record_Component") + .def_property_readonly( + "scalar", &BaseRecord::scalar, doc_scalar); + py::class_, Container>( + m, "Base_Record_Mesh_Record_Component") + .def_property_readonly( + "scalar", &BaseRecord::scalar, doc_scalar); + py::class_< + BaseRecord, + Container>( + m, "Base_Record_Patch_Record_Component") + .def_property_readonly( + "scalar", &BaseRecord::scalar, doc_scalar); } diff --git a/src/binding/python/BaseRecordComponent.cpp b/src/binding/python/BaseRecordComponent.cpp index 8b2747477f..0a3a306e76 100644 --- a/src/binding/python/BaseRecordComponent.cpp +++ b/src/binding/python/BaseRecordComponent.cpp @@ -18,12 +18,12 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include #include +#include #include -#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/Datatype.hpp" +#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/binding/python/Numpy.hpp" #include @@ -31,25 +31,24 @@ namespace py = pybind11; using namespace openPMD; - -void init_BaseRecordComponent(py::module &m) { +void init_BaseRecordComponent(py::module &m) +{ py::class_(m, "Base_Record_Component") - .def("__repr__", - [](BaseRecordComponent const & brc) { + .def( + "__repr__", + [](BaseRecordComponent const &brc) { std::stringstream ss; ss << ""; return ss.str(); - } - ) + }) .def("reset_datatype", &BaseRecordComponent::resetDatatype) .def("available_chunks", &BaseRecordComponent::availableChunks) .def_property_readonly("unit_SI", &BaseRecordComponent::unitSI) .def_property_readonly("constant", &BaseRecordComponent::constant) - .def_property_readonly("dtype", [](BaseRecordComponent & brc) { - return dtype_to_numpy( brc.getDatatype() ); - }) - ; + .def_property_readonly("dtype", [](BaseRecordComponent &brc) { + return dtype_to_numpy(brc.getDatatype()); + }); } diff --git a/src/binding/python/ChunkInfo.cpp b/src/binding/python/ChunkInfo.cpp index a6698d290b..5edc29353b 100644 --- a/src/binding/python/ChunkInfo.cpp +++ b/src/binding/python/ChunkInfo.cpp @@ -29,34 +29,34 @@ namespace py = pybind11; using namespace openPMD; - -void init_Chunk(py::module &m) { +void init_Chunk(py::module &m) +{ py::class_(m, "ChunkInfo") - .def(py::init(), - py::arg("offset"), py::arg("extent")) - .def("__repr__", - [](const ChunkInfo & c) { - return ""; - } - ) + .def(py::init(), py::arg("offset"), py::arg("extent")) + .def( + "__repr__", + [](const ChunkInfo &c) { + return ""; + }) .def_readwrite("offset", &ChunkInfo::offset) - .def_readwrite("extent", &ChunkInfo::extent) - ; + .def_readwrite("extent", &ChunkInfo::extent); py::class_(m, "WrittenChunkInfo") - .def(py::init(), - py::arg("offset"), py::arg("extent")) - .def(py::init(), - py::arg("offset"), py::arg("extent"), py::arg("rank")) - .def("__repr__", - [](const WrittenChunkInfo & c) { - return ""; - } - ) - .def_readwrite("offset", &WrittenChunkInfo::offset ) - .def_readwrite("extent", &WrittenChunkInfo::extent ) - .def_readwrite("source_id", &WrittenChunkInfo::sourceID ) + .def(py::init(), py::arg("offset"), py::arg("extent")) + .def( + py::init(), + py::arg("offset"), + py::arg("extent"), + py::arg("rank")) + .def( + "__repr__", + [](const WrittenChunkInfo &c) { + return ""; + }) + .def_readwrite("offset", &WrittenChunkInfo::offset) + .def_readwrite("extent", &WrittenChunkInfo::extent) + .def_readwrite("source_id", &WrittenChunkInfo::sourceID) .def(py::pickle( // __getstate__ @@ -70,12 +70,11 @@ void init_Chunk(py::module &m) { if (t.size() != 3) throw std::runtime_error("Invalid state!"); - auto const offset = t[0].cast< Offset >(); - auto const extent = t[1].cast< Extent >(); - auto const sourceID = t[2].cast< decltype(WrittenChunkInfo::sourceID) >(); + auto const offset = t[0].cast(); + auto const extent = t[1].cast(); + auto const sourceID = + t[2].cast(); return WrittenChunkInfo(offset, extent, sourceID); - } - )) - ; + })); } diff --git a/src/binding/python/Container.cpp b/src/binding/python/Container.cpp index 7bfeddceae..357cb0bba2 100644 --- a/src/binding/python/Container.cpp +++ b/src/binding/python/Container.cpp @@ -25,195 +25,126 @@ */ #include -#include #include +#include -#include "openPMD/backend/Container.hpp" -#include "openPMD/backend/BaseRecord.hpp" -#include "openPMD/backend/MeshRecordComponent.hpp" -#include "openPMD/backend/PatchRecordComponent.hpp" -#include "openPMD/backend/BaseRecordComponent.hpp" -#include "openPMD/backend/PatchRecord.hpp" #include "openPMD/Iteration.hpp" #include "openPMD/Mesh.hpp" -#include "openPMD/ParticleSpecies.hpp" #include "openPMD/ParticlePatches.hpp" +#include "openPMD/ParticleSpecies.hpp" #include "openPMD/Record.hpp" +#include "openPMD/backend/BaseRecord.hpp" +#include "openPMD/backend/BaseRecordComponent.hpp" +#include "openPMD/backend/Container.hpp" +#include "openPMD/backend/MeshRecordComponent.hpp" +#include "openPMD/backend/PatchRecord.hpp" +#include "openPMD/backend/PatchRecordComponent.hpp" -#include #include +#include #include namespace py = pybind11; using namespace openPMD; - namespace detail { - /* based on std_bind.h in pybind11 - * - * Copyright (c) 2016 Sergey Lyskov and Wenzel Jakob - * - * BSD-style license, see pybind11 LICENSE file. - */ - template< - typename Map, - typename holder_type = std::unique_ptr< Map >, - typename... Args - > - py::class_< - Map, - holder_type, - Attributable - > bind_container( - py::handle scope, - std::string const & name, - Args && ... args - ) +/* based on std_bind.h in pybind11 + * + * Copyright (c) 2016 Sergey Lyskov and Wenzel Jakob + * + * BSD-style license, see pybind11 LICENSE file. + */ +template < + typename Map, + typename holder_type = std::unique_ptr, + typename... Args> +py::class_ +bind_container(py::handle scope, std::string const &name, Args &&...args) +{ + using KeyType = typename Map::key_type; + using MappedType = typename Map::mapped_type; + using Class_ = py::class_; + + // If either type is a non-module-local bound type then make the map + // binding non-local as well; otherwise (e.g. both types are either + // module-local or converting) the map will be module-local. + auto tinfo = py::detail::get_type_info(typeid(MappedType)); + bool local = !tinfo || tinfo->module_local; + if (local) { - using KeyType = typename Map::key_type; - using MappedType = typename Map::mapped_type; - using Class_ = py::class_< - Map, - holder_type, - Attributable - >; - - // If either type is a non-module-local bound type then make the map - // binding non-local as well; otherwise (e.g. both types are either - // module-local or converting) the map will be module-local. - auto tinfo = py::detail::get_type_info( typeid( MappedType ) ); - bool local = !tinfo || tinfo->module_local; - if( local ) { - tinfo = py::detail::get_type_info( typeid( KeyType ) ); - local = !tinfo || tinfo->module_local; - } - - Class_ cl( - scope, - name.c_str(), - py::module_local( local ), - std::forward< Args >( args ) ... - ); - - cl.def( py::init() ); - - // Register stream insertion operator (if possible) - py::detail::map_if_insertion_operator< - Map, - Class_ - >( - cl, - name - ); - - cl.def( - "__bool__", - []( const Map & m ) - -> bool - { - return !m.empty(); - }, - "Check whether the container is nonempty" - ); - - cl.def( - "__iter__", - []( Map & m ) - { - return py::make_key_iterator( - m.begin(), - m.end() - ); - }, - // keep container alive while iterator exists - py::keep_alive< - 0, - 1 - >() - ); - - cl.def( - "items", - []( Map & m ) - { - return py::make_iterator( - m.begin(), - m.end() - ); - }, - // keep container alive while iterator exists - py::keep_alive< - 0, - 1 - >() - ); - - // keep same policy as Container class: missing keys are created - cl.def( - "__getitem__", - []( - Map & m, - KeyType const & k - ) -> MappedType & { - return m[ k ]; - }, - // ref + keepalive - py::return_value_policy::reference_internal - ); - - // Assignment provided only if the type is copyable - py::detail::map_assignment< - Map, - Class_ - >( cl ); - - cl.def( - "__delitem__", - []( - Map &m, - KeyType const & k - ) { - auto it = m.find( k ); - if( it == m.end() ) - throw py::key_error(); - m.erase( it ); - } - ); - - cl.def( - "__len__", - &Map::size - ); - - cl.def( - "_ipython_key_completions_", - []( Map & m ) { - auto l = py::list(); - for( const auto &myPair : m ) - l.append( myPair.first ); - return l; - } - ); - - return cl; + tinfo = py::detail::get_type_info(typeid(KeyType)); + local = !tinfo || tinfo->module_local; } -} // namespace detail + Class_ cl( + scope, + name.c_str(), + py::module_local(local), + std::forward(args)...); + + cl.def(py::init()); + + // Register stream insertion operator (if possible) + py::detail::map_if_insertion_operator(cl, name); + + cl.def( + "__bool__", + [](const Map &m) -> bool { return !m.empty(); }, + "Check whether the container is nonempty"); + + cl.def( + "__iter__", + [](Map &m) { return py::make_key_iterator(m.begin(), m.end()); }, + // keep container alive while iterator exists + py::keep_alive<0, 1>()); + + cl.def( + "items", + [](Map &m) { return py::make_iterator(m.begin(), m.end()); }, + // keep container alive while iterator exists + py::keep_alive<0, 1>()); + + // keep same policy as Container class: missing keys are created + cl.def( + "__getitem__", + [](Map &m, KeyType const &k) -> MappedType & { return m[k]; }, + // ref + keepalive + py::return_value_policy::reference_internal); + + // Assignment provided only if the type is copyable + py::detail::map_assignment(cl); + + cl.def("__delitem__", [](Map &m, KeyType const &k) { + auto it = m.find(k); + if (it == m.end()) + throw py::key_error(); + m.erase(it); + }); + + cl.def("__len__", &Map::size); + + cl.def("_ipython_key_completions_", [](Map &m) { + auto l = py::list(); + for (const auto &myPair : m) + l.append(myPair.first); + return l; + }); + + return cl; +} +} // namespace detail -using PyIterationContainer = Container< - Iteration, - uint64_t ->; -using PyMeshContainer = Container< Mesh >; -using PyPartContainer = Container< ParticleSpecies >; -using PyPatchContainer = Container< ParticlePatches >; -using PyRecordContainer = Container< Record >; -using PyPatchRecordContainer = Container< PatchRecord >; -using PyRecordComponentContainer = Container< RecordComponent >; -using PyMeshRecordComponentContainer = Container< MeshRecordComponent >; -using PyPatchRecordComponentContainer = Container< PatchRecordComponent >; -using PyBaseRecordComponentContainer = Container< BaseRecordComponent >; +using PyIterationContainer = Container; +using PyMeshContainer = Container; +using PyPartContainer = Container; +using PyPatchContainer = Container; +using PyRecordContainer = Container; +using PyPatchRecordContainer = Container; +using PyRecordComponentContainer = Container; +using PyMeshRecordComponentContainer = Container; +using PyPatchRecordComponentContainer = Container; +using PyBaseRecordComponentContainer = Container; PYBIND11_MAKE_OPAQUE(PyIterationContainer) PYBIND11_MAKE_OPAQUE(PyMeshContainer) PYBIND11_MAKE_OPAQUE(PyPartContainer) @@ -225,45 +156,21 @@ PYBIND11_MAKE_OPAQUE(PyMeshRecordComponentContainer) PYBIND11_MAKE_OPAQUE(PyPatchRecordComponentContainer) PYBIND11_MAKE_OPAQUE(PyBaseRecordComponentContainer) -void init_Container( py::module & m ) { - ::detail::bind_container< PyIterationContainer >( - m, - "Iteration_Container" - ); - ::detail::bind_container< PyMeshContainer >( - m, - "Mesh_Container" - ); - ::detail::bind_container< PyPartContainer >( - m, - "Particle_Container" - ); - ::detail::bind_container< PyPatchContainer >( - m, - "Particle_Patches_Container" - ); - ::detail::bind_container< PyRecordContainer >( - m, - "Record_Container" - ); - ::detail::bind_container< PyPatchRecordContainer >( - m, - "Patch_Record_Container" - ); - ::detail::bind_container< PyRecordComponentContainer >( - m, - "Record_Component_Container" - ); - ::detail::bind_container< PyMeshRecordComponentContainer >( - m, - "Mesh_Record_Component_Container" - ); - ::detail::bind_container< PyPatchRecordComponentContainer >( - m, - "Patch_Record_Component_Container" - ); - ::detail::bind_container< PyBaseRecordComponentContainer >( - m, - "Base_Record_Component_Container" - ); +void init_Container(py::module &m) +{ + ::detail::bind_container(m, "Iteration_Container"); + ::detail::bind_container(m, "Mesh_Container"); + ::detail::bind_container(m, "Particle_Container"); + ::detail::bind_container(m, "Particle_Patches_Container"); + ::detail::bind_container(m, "Record_Container"); + ::detail::bind_container( + m, "Patch_Record_Container"); + ::detail::bind_container( + m, "Record_Component_Container"); + ::detail::bind_container( + m, "Mesh_Record_Component_Container"); + ::detail::bind_container( + m, "Patch_Record_Component_Container"); + ::detail::bind_container( + m, "Base_Record_Component_Container"); } diff --git a/src/binding/python/Dataset.cpp b/src/binding/python/Dataset.cpp index 16d2ef4cf3..e24d3b52ba 100644 --- a/src/binding/python/Dataset.cpp +++ b/src/binding/python/Dataset.cpp @@ -29,42 +29,44 @@ namespace py = pybind11; using namespace openPMD; - -void init_Dataset(py::module &m) { +void init_Dataset(py::module &m) +{ py::class_(m, "Dataset") - .def(py::init(), - py::arg("dtype"), py::arg("extent") - ) + .def(py::init(), py::arg("dtype"), py::arg("extent")) .def(py::init(), py::arg("extent")) - .def(py::init( [](py::dtype dt, Extent e) { - auto const d = dtype_from_numpy( dt ); - return new Dataset{d, e}; - }), - py::arg("dtype"), py::arg("extent") - ) - .def(py::init(), - py::arg("dtype"), py::arg("extent"), py::arg("options") - ) - .def(py::init( [](py::dtype dt, Extent e, std::string options) { - auto const d = dtype_from_numpy( dt ); - return new Dataset{d, e, std::move(options)}; - }), - py::arg("dtype"), py::arg("extent"), py::arg("options") - ) + .def( + py::init([](py::dtype dt, Extent e) { + auto const d = dtype_from_numpy(dt); + return new Dataset{d, e}; + }), + py::arg("dtype"), + py::arg("extent")) + .def( + py::init(), + py::arg("dtype"), + py::arg("extent"), + py::arg("options")) + .def( + py::init([](py::dtype dt, Extent e, std::string options) { + auto const d = dtype_from_numpy(dt); + return new Dataset{d, e, std::move(options)}; + }), + py::arg("dtype"), + py::arg("extent"), + py::arg("options")) - .def("__repr__", + .def( + "__repr__", [](const Dataset &d) { - return ""; - } - ) + return ""; + }) .def_readonly("extent", &Dataset::extent) .def("extend", &Dataset::extend) .def_readonly("rank", &Dataset::rank) - .def_property_readonly("dtype", [](const Dataset &d) { - return dtype_to_numpy( d.dtype ); - }) - .def_readwrite("options", &Dataset::options) - ; + .def_property_readonly( + "dtype", [](const Dataset &d) { return dtype_to_numpy(d.dtype); }) + .def_readwrite("options", &Dataset::options); } diff --git a/src/binding/python/Datatype.cpp b/src/binding/python/Datatype.cpp index 3447b067a6..6c5587725d 100644 --- a/src/binding/python/Datatype.cpp +++ b/src/binding/python/Datatype.cpp @@ -27,8 +27,8 @@ namespace py = pybind11; using namespace openPMD; - -void init_Datatype(py::module &m) { +void init_Datatype(py::module &m) +{ py::enum_(m, "Datatype", py::arithmetic()) .value("CHAR", Datatype::CHAR) .value("UCHAR", Datatype::UCHAR) @@ -60,13 +60,12 @@ void init_Datatype(py::module &m) { .value("VEC_STRING", Datatype::VEC_STRING) .value("ARR_DBL_7", Datatype::ARR_DBL_7) .value("BOOL", Datatype::BOOL) - .value("UNDEFINED", Datatype::UNDEFINED) - ; + .value("UNDEFINED", Datatype::UNDEFINED); m.def("determine_datatype", [](py::dtype const dt) { - return dtype_from_numpy( dt ); + return dtype_from_numpy(dt); }); - m.def("determine_datatype", [](py::array const & a) { - return dtype_from_numpy( a.dtype() ); + m.def("determine_datatype", [](py::array const &a) { + return dtype_from_numpy(a.dtype()); }); } diff --git a/src/binding/python/Error.cpp b/src/binding/python/Error.cpp index 056faab956..ea5712296b 100644 --- a/src/binding/python/Error.cpp +++ b/src/binding/python/Error.cpp @@ -5,19 +5,19 @@ namespace py = pybind11; using namespace openPMD; -void init_Error( py::module & m ) +void init_Error(py::module &m) { - auto & baseError = py::register_exception< Error >( m, "Error" ); - py::register_exception< error::OperationUnsupportedInBackend >( - m, "ErrorOperationUnsupportedInBackend", baseError ); - py::register_exception< error::WrongAPIUsage >( - m, "ErrorWrongAPIUsage", baseError ); - py::register_exception< error::BackendConfigSchema >( - m, "ErrorBackendConfigSchema", baseError ); + auto &baseError = py::register_exception(m, "Error"); + py::register_exception( + m, "ErrorOperationUnsupportedInBackend", baseError); + py::register_exception( + m, "ErrorWrongAPIUsage", baseError); + py::register_exception( + m, "ErrorBackendConfigSchema", baseError); #ifndef NDEBUG - m.def( "test_throw", []( std::string description ) { - throw error::OperationUnsupportedInBackend( "json", description ); - } ); + m.def("test_throw", [](std::string description) { + throw error::OperationUnsupportedInBackend("json", description); + }); #endif } diff --git a/src/binding/python/Helper.cpp b/src/binding/python/Helper.cpp index 64da843ac2..9b07a37f70 100644 --- a/src/binding/python/Helper.cpp +++ b/src/binding/python/Helper.cpp @@ -21,33 +21,31 @@ #include #include +#include "openPMD/Series.hpp" #include "openPMD/cli/ls.hpp" #include "openPMD/helper/list_series.hpp" -#include "openPMD/Series.hpp" -#include #include +#include #include - namespace py = pybind11; using namespace openPMD; -void init_Helper(py::module &m) { - m.def("list_series", - [](Series & series, bool const longer) { - std::stringstream s; - helper::listSeries( series, longer, s ); - py::print(s.str()); - }, - py::arg("series"), - py::arg_v("longer", false, "Print more verbose output."), - "List information about an openPMD data series" - ) - // CLI entry point - .def("_ls_run", // &cli::ls::run - [](std::vector< std::string > & argv) { - return cli::ls::run( argv ); - } - ); +void init_Helper(py::module &m) +{ + m.def( + "list_series", + [](Series &series, bool const longer) { + std::stringstream s; + helper::listSeries(series, longer, s); + py::print(s.str()); + }, + py::arg("series"), + py::arg_v("longer", false, "Print more verbose output."), + "List information about an openPMD data series") + // CLI entry point + .def( + "_ls_run", // &cli::ls::run + [](std::vector &argv) { return cli::ls::run(argv); }); } diff --git a/src/binding/python/Iteration.cpp b/src/binding/python/Iteration.cpp index ec85f4fd9a..6454789b1e 100644 --- a/src/binding/python/Iteration.cpp +++ b/src/binding/python/Iteration.cpp @@ -28,24 +28,34 @@ namespace py = pybind11; using namespace openPMD; - -void init_Iteration(py::module &m) { +void init_Iteration(py::module &m) +{ py::class_(m, "Iteration") .def(py::init()) - .def("__repr__", - [](Iteration const & it) { - return ""; - } - ) + .def( + "__repr__", + [](Iteration const &it) { + return ""; + }) - .def_property("time", &Iteration::time, &Iteration::setTime) - .def_property("time", &Iteration::time, &Iteration::setTime) - .def_property("time", &Iteration::time, &Iteration::setTime) + .def_property( + "time", &Iteration::time, &Iteration::setTime) + .def_property( + "time", &Iteration::time, &Iteration::setTime) + .def_property( + "time", + &Iteration::time, + &Iteration::setTime) .def_property("dt", &Iteration::dt, &Iteration::setDt) .def_property("dt", &Iteration::dt, &Iteration::setDt) - .def_property("dt", &Iteration::dt, &Iteration::setDt) - .def_property("time_unit_SI", &Iteration::timeUnitSI, &Iteration::setTimeUnitSI) + .def_property( + "dt", &Iteration::dt, &Iteration::setDt) + .def_property( + "time_unit_SI", &Iteration::timeUnitSI, &Iteration::setTimeUnitSI) .def("open", &Iteration::open) .def("close", &Iteration::close, py::arg("flush") = true) @@ -58,13 +68,16 @@ void init_Iteration(py::module &m) { .def("set_dt", &Iteration::setDt) .def("set_time_unit_SI", &Iteration::setTimeUnitSI) - .def_readwrite("meshes", &Iteration::meshes, + .def_readwrite( + "meshes", + &Iteration::meshes, py::return_value_policy::reference, // garbage collection: return value must be freed before Iteration py::keep_alive<1, 0>()) - .def_readwrite("particles", &Iteration::particles, + .def_readwrite( + "particles", + &Iteration::particles, py::return_value_policy::reference, // garbage collection: return value must be freed before Iteration - py::keep_alive<1, 0>()) - ; + py::keep_alive<1, 0>()); } diff --git a/src/binding/python/IterationEncoding.cpp b/src/binding/python/IterationEncoding.cpp index a4edc7c03c..479ef65555 100644 --- a/src/binding/python/IterationEncoding.cpp +++ b/src/binding/python/IterationEncoding.cpp @@ -26,11 +26,10 @@ namespace py = pybind11; using namespace openPMD; - -void init_IterationEncoding(py::module &m) { +void init_IterationEncoding(py::module &m) +{ py::enum_(m, "Iteration_Encoding") .value("file_based", IterationEncoding::fileBased) .value("group_based", IterationEncoding::groupBased) - .value("variable_based", IterationEncoding::variableBased) - ; + .value("variable_based", IterationEncoding::variableBased); } diff --git a/src/binding/python/Mesh.cpp b/src/binding/python/Mesh.cpp index 3ff67e9771..2f95f87ecf 100644 --- a/src/binding/python/Mesh.cpp +++ b/src/binding/python/Mesh.cpp @@ -24,8 +24,8 @@ #include "openPMD/Mesh.hpp" #include "openPMD/backend/BaseRecord.hpp" #include "openPMD/backend/MeshRecordComponent.hpp" -#include "openPMD/binding/python/UnitDimension.hpp" #include "openPMD/binding/python/Pickle.hpp" +#include "openPMD/binding/python/UnitDimension.hpp" #include #include @@ -33,45 +33,79 @@ namespace py = pybind11; using namespace openPMD; +void init_Mesh(py::module &m) +{ + py::class_> cl(m, "Mesh"); + cl.def(py::init()) -void init_Mesh(py::module &m) { - py::class_ > cl(m, "Mesh"); - cl - .def(py::init()) + .def( + "__repr__", + [](Mesh const &mesh) { + return ""; + }) - .def("__repr__", - [](Mesh const & mesh) { - return ""; - } - ) - - .def_property("unit_dimension", + .def_property( + "unit_dimension", &Mesh::unitDimension, &Mesh::setUnitDimension, python::doc_unit_dimension) - .def_property("geometry", &Mesh::geometry, py::overload_cast(&Mesh::setGeometry)) .def_property( - "geometry_string", &Mesh::geometryString, py::overload_cast(&Mesh::setGeometry)) - .def_property("geometry_parameters", &Mesh::geometryParameters, &Mesh::setGeometryParameters) - .def_property("data_order", - [](Mesh const & mesh){ return static_cast< char >(mesh.dataOrder()); }, - [](Mesh & mesh, char d){ mesh.setDataOrder(Mesh::DataOrder(d)); }, - "Data Order of the Mesh (deprecated and set to C in openPMD 2)" - ) + "geometry", + &Mesh::geometry, + py::overload_cast(&Mesh::setGeometry)) + .def_property( + "geometry_string", + &Mesh::geometryString, + py::overload_cast(&Mesh::setGeometry)) + .def_property( + "geometry_parameters", + &Mesh::geometryParameters, + &Mesh::setGeometryParameters) + .def_property( + "data_order", + [](Mesh const &mesh) { + return static_cast(mesh.dataOrder()); + }, + [](Mesh &mesh, char d) { mesh.setDataOrder(Mesh::DataOrder(d)); }, + "Data Order of the Mesh (deprecated and set to C in openPMD 2)") .def_property("axis_labels", &Mesh::axisLabels, &Mesh::setAxisLabels) - .def_property("grid_spacing", &Mesh::gridSpacing, &Mesh::setGridSpacing) - .def_property("grid_spacing", &Mesh::gridSpacing, &Mesh::setGridSpacing) - .def_property("grid_spacing", &Mesh::gridSpacing, &Mesh::setGridSpacing) - .def_property("grid_global_offset", &Mesh::gridGlobalOffset, &Mesh::setGridGlobalOffset) + .def_property( + "grid_spacing", + &Mesh::gridSpacing, + &Mesh::setGridSpacing) + .def_property( + "grid_spacing", + &Mesh::gridSpacing, + &Mesh::setGridSpacing) + .def_property( + "grid_spacing", + &Mesh::gridSpacing, + &Mesh::setGridSpacing) + .def_property( + "grid_global_offset", + &Mesh::gridGlobalOffset, + &Mesh::setGridGlobalOffset) .def_property("grid_unit_SI", &Mesh::gridUnitSI, &Mesh::setGridUnitSI) - .def_property("time_offset", &Mesh::timeOffset, &Mesh::setTimeOffset) - .def_property("time_offset", &Mesh::timeOffset, &Mesh::setTimeOffset) - .def_property("time_offset", &Mesh::timeOffset, &Mesh::setTimeOffset) + .def_property( + "time_offset", + &Mesh::timeOffset, + &Mesh::setTimeOffset) + .def_property( + "time_offset", + &Mesh::timeOffset, + &Mesh::setTimeOffset) + .def_property( + "time_offset", + &Mesh::timeOffset, + &Mesh::setTimeOffset) // TODO remove in future versions (deprecated) .def("set_unit_dimension", &Mesh::setUnitDimension) - .def("set_geometry", py::overload_cast(&Mesh::setGeometry)) + .def( + "set_geometry", + py::overload_cast(&Mesh::setGeometry)) .def("set_geometry", py::overload_cast(&Mesh::setGeometry)) .def("set_geometry_parameters", &Mesh::setGeometryParameters) .def("set_axis_labels", &Mesh::setAxisLabels) @@ -79,21 +113,17 @@ void init_Mesh(py::module &m) { .def("set_grid_spacing", &Mesh::setGridSpacing) .def("set_grid_spacing", &Mesh::setGridSpacing) .def("set_grid_global_offset", &Mesh::setGridGlobalOffset) - .def("set_grid_unit_SI", &Mesh::setGridUnitSI) - ; + .def("set_grid_unit_SI", &Mesh::setGridUnitSI); add_pickle( - cl, - [](openPMD::Series & series, std::vector< std::string > const & group ) { + cl, [](openPMD::Series &series, std::vector const &group) { uint64_t const n_it = std::stoull(group.at(1)); return series.iterations[n_it].meshes[group.at(3)]; - } - ); + }); py::enum_(m, "Geometry") .value("cartesian", Mesh::Geometry::cartesian) .value("thetaMode", Mesh::Geometry::thetaMode) .value("cylindrical", Mesh::Geometry::cylindrical) .value("spherical", Mesh::Geometry::spherical) - .value("other", Mesh::Geometry::other) - ; + .value("other", Mesh::Geometry::other); } diff --git a/src/binding/python/MeshRecordComponent.cpp b/src/binding/python/MeshRecordComponent.cpp index dea58425e0..ff702b53d5 100644 --- a/src/binding/python/MeshRecordComponent.cpp +++ b/src/binding/python/MeshRecordComponent.cpp @@ -21,9 +21,9 @@ #include #include -#include "openPMD/backend/MeshRecordComponent.hpp" #include "openPMD/RecordComponent.hpp" #include "openPMD/Series.hpp" +#include "openPMD/backend/MeshRecordComponent.hpp" #include "openPMD/binding/python/Pickle.hpp" #include @@ -32,35 +32,38 @@ namespace py = pybind11; using namespace openPMD; +void init_MeshRecordComponent(py::module &m) +{ + py::class_ cl( + m, "Mesh_Record_Component"); + cl.def( + "__repr__", + [](MeshRecordComponent const &rc) { + return ""; + }) -void init_MeshRecordComponent(py::module &m) { - py::class_ cl(m, "Mesh_Record_Component"); - cl - .def("__repr__", - [](MeshRecordComponent const & rc) { - return ""; - } - ) - - .def_property("position", + .def_property( + "position", &MeshRecordComponent::position, &MeshRecordComponent::setPosition, - "Relative position of the component on an element (node/cell/voxel) of the mesh") - .def_property("position", + "Relative position of the component on an element " + "(node/cell/voxel) of the mesh") + .def_property( + "position", &MeshRecordComponent::position, &MeshRecordComponent::setPosition, - "Relative position of the component on an element (node/cell/voxel) of the mesh") - .def_property("position", + "Relative position of the component on an element " + "(node/cell/voxel) of the mesh") + .def_property( + "position", &MeshRecordComponent::position, &MeshRecordComponent::setPosition, - "Relative position of the component on an element (node/cell/voxel) of the mesh") - ; + "Relative position of the component on an element " + "(node/cell/voxel) of the mesh"); add_pickle( - cl, - [](openPMD::Series & series, std::vector< std::string > const & group ) { + cl, [](openPMD::Series &series, std::vector const &group) { uint64_t const n_it = std::stoull(group.at(1)); return series.iterations[n_it].meshes[group.at(3)][group.at(4)]; - } - ); + }); } diff --git a/src/binding/python/ParticlePatches.cpp b/src/binding/python/ParticlePatches.cpp index 3950080861..a6e252805c 100644 --- a/src/binding/python/ParticlePatches.cpp +++ b/src/binding/python/ParticlePatches.cpp @@ -22,23 +22,23 @@ #include #include "openPMD/ParticlePatches.hpp" -#include "openPMD/backend/PatchRecord.hpp" #include "openPMD/backend/Container.hpp" +#include "openPMD/backend/PatchRecord.hpp" #include namespace py = pybind11; using namespace openPMD; +void init_ParticlePatches(py::module &m) +{ + py::class_>(m, "Particle_Patches") + .def( + "__repr__", + [](ParticlePatches const &pp) { + return ""; + }) -void init_ParticlePatches(py::module &m) { - py::class_ >(m, "Particle_Patches") - .def("__repr__", - [](ParticlePatches const & pp) { - return ""; - } - ) - - .def_property_readonly("num_patches", &ParticlePatches::numPatches) - ; + .def_property_readonly("num_patches", &ParticlePatches::numPatches); } diff --git a/src/binding/python/ParticleSpecies.cpp b/src/binding/python/ParticleSpecies.cpp index deaf0c6a8f..008bc7a7b3 100644 --- a/src/binding/python/ParticleSpecies.cpp +++ b/src/binding/python/ParticleSpecies.cpp @@ -33,23 +33,17 @@ namespace py = pybind11; using namespace openPMD; +void init_ParticleSpecies(py::module &m) +{ + py::class_> cl(m, "ParticleSpecies"); + cl.def( + "__repr__", + [](ParticleSpecies const &) { return ""; }) -void init_ParticleSpecies(py::module &m) { - py::class_ > cl(m, "ParticleSpecies"); - cl - .def("__repr__", - [](ParticleSpecies const &) { - return ""; - } - ) - - .def_readwrite("particle_patches", &ParticleSpecies::particlePatches) - ; + .def_readwrite("particle_patches", &ParticleSpecies::particlePatches); add_pickle( - cl, - [](openPMD::Series & series, std::vector< std::string > const & group ) { + cl, [](openPMD::Series &series, std::vector const &group) { uint64_t const n_it = std::stoull(group.at(1)); return series.iterations[n_it].particles[group.at(3)]; - } - ); + }); } diff --git a/src/binding/python/PatchRecord.cpp b/src/binding/python/PatchRecord.cpp index 191ff9fc6f..3a14d105c1 100644 --- a/src/binding/python/PatchRecord.cpp +++ b/src/binding/python/PatchRecord.cpp @@ -21,23 +21,23 @@ #include #include +#include "openPMD/backend/BaseRecord.hpp" #include "openPMD/backend/PatchRecord.hpp" #include "openPMD/backend/PatchRecordComponent.hpp" -#include "openPMD/backend/BaseRecord.hpp" #include "openPMD/binding/python/UnitDimension.hpp" namespace py = pybind11; using namespace openPMD; - -void init_PatchRecord(py::module &m) { - py::class_ >(m, "Patch_Record") - .def_property("unit_dimension", - &PatchRecord::unitDimension, - &PatchRecord::setUnitDimension, - python::doc_unit_dimension) +void init_PatchRecord(py::module &m) +{ + py::class_>(m, "Patch_Record") + .def_property( + "unit_dimension", + &PatchRecord::unitDimension, + &PatchRecord::setUnitDimension, + python::doc_unit_dimension) // TODO remove in future versions (deprecated) - .def("set_unit_dimension", &PatchRecord::setUnitDimension) - ; + .def("set_unit_dimension", &PatchRecord::setUnitDimension); } diff --git a/src/binding/python/PatchRecordComponent.cpp b/src/binding/python/PatchRecordComponent.cpp index 4d9cc5f151..cecd57b252 100644 --- a/src/binding/python/PatchRecordComponent.cpp +++ b/src/binding/python/PatchRecordComponent.cpp @@ -21,148 +21,181 @@ #include #include -#include "openPMD/backend/PatchRecordComponent.hpp" -#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/auxiliary/ShareRaw.hpp" +#include "openPMD/backend/BaseRecordComponent.hpp" +#include "openPMD/backend/PatchRecordComponent.hpp" #include "openPMD/binding/python/Numpy.hpp" namespace py = pybind11; using namespace openPMD; - -void init_PatchRecordComponent(py::module &m) { - py::class_(m, "Patch_Record_Component") - .def_property("unit_SI", &BaseRecordComponent::unitSI, &PatchRecordComponent::setUnitSI) +void init_PatchRecordComponent(py::module &m) +{ + py::class_( + m, "Patch_Record_Component") + .def_property( + "unit_SI", + &BaseRecordComponent::unitSI, + &PatchRecordComponent::setUnitSI) .def("reset_dataset", &PatchRecordComponent::resetDataset) - .def_property_readonly("ndims", &PatchRecordComponent::getDimensionality) + .def_property_readonly( + "ndims", &PatchRecordComponent::getDimensionality) .def_property_readonly("shape", &PatchRecordComponent::getExtent) - .def("load", [](PatchRecordComponent & prc) { + .def( + "load", + [](PatchRecordComponent &prc) { + auto const dtype = dtype_to_numpy(prc.getDatatype()); + auto a = py::array(dtype, prc.getExtent()[0]); - auto const dtype = dtype_to_numpy( prc.getDatatype() ); - auto a = py::array( dtype, prc.getExtent()[0] ); + if (prc.getDatatype() == Datatype::CHAR) + prc.load(shareRaw((char *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::UCHAR) + prc.load( + shareRaw((unsigned char *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::SHORT) + prc.load(shareRaw((short *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::INT) + prc.load(shareRaw((int *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::LONG) + prc.load(shareRaw((long *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::LONGLONG) + prc.load( + shareRaw((long long *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::USHORT) + prc.load( + shareRaw((unsigned short *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::UINT) + prc.load( + shareRaw((unsigned int *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::ULONG) + prc.load( + shareRaw((unsigned long *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::ULONGLONG) + prc.load( + shareRaw((unsigned long long *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::LONG_DOUBLE) + prc.load( + shareRaw((long double *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::DOUBLE) + prc.load(shareRaw((double *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::FLOAT) + prc.load(shareRaw((float *)a.mutable_data())); + else if (prc.getDatatype() == Datatype::BOOL) + prc.load(shareRaw((bool *)a.mutable_data())); + else + throw std::runtime_error( + std::string("Datatype not known in 'load'!")); - if( prc.getDatatype() == Datatype::CHAR ) - prc.load(shareRaw((char*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::UCHAR ) - prc.load(shareRaw((unsigned char*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::SHORT ) - prc.load(shareRaw((short*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::INT ) - prc.load(shareRaw((int*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::LONG ) - prc.load(shareRaw((long*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::LONGLONG ) - prc.load(shareRaw((long long*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::USHORT ) - prc.load(shareRaw((unsigned short*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::UINT ) - prc.load(shareRaw((unsigned int*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::ULONG ) - prc.load(shareRaw((unsigned long*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::ULONGLONG ) - prc.load(shareRaw((unsigned long long*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::LONG_DOUBLE ) - prc.load(shareRaw((long double*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::DOUBLE ) - prc.load(shareRaw((double*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::FLOAT ) - prc.load(shareRaw((float*) a.mutable_data())); - else if( prc.getDatatype() == Datatype::BOOL ) - prc.load(shareRaw((bool*) a.mutable_data())); - else - throw std::runtime_error(std::string("Datatype not known in 'load'!")); - - return a; - }) + return a; + }) // all buffer types - .def("store", [](PatchRecordComponent & prc, uint64_t idx, py::buffer a) { - py::buffer_info buf = a.request(); - auto const dtype = dtype_from_bufferformat( buf.format ); + .def( + "store", + [](PatchRecordComponent &prc, uint64_t idx, py::buffer a) { + py::buffer_info buf = a.request(); + auto const dtype = dtype_from_bufferformat(buf.format); - using DT = Datatype; + using DT = Datatype; - // allow one-element n-dimensional buffers as well - py::ssize_t numElements = 1; - if( buf.ndim > 0 ) { - for( auto d = 0; d < buf.ndim; ++d ) - numElements *= buf.shape.at(d); - } + // allow one-element n-dimensional buffers as well + py::ssize_t numElements = 1; + if (buf.ndim > 0) + { + for (auto d = 0; d < buf.ndim; ++d) + numElements *= buf.shape.at(d); + } - // Numpy: Handling of arrays and scalars - // work-around for https://github.com/pybind/pybind11/issues/1224 - // -> passing numpy scalars as buffers needs numpy 1.15+ - // https://github.com/numpy/numpy/issues/10265 - // https://github.com/pybind/pybind11/issues/1224#issuecomment-354357392 - // scalars, see PEP 3118 - // requires Numpy 1.15+ - if( numElements == 1 ) { - // refs: - // https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.interface.html - // https://docs.python.org/3/library/struct.html#format-characters - // std::cout << " scalar type '" << buf.format << "'" << std::endl; - // typestring: encoding + type + number of bytes - switch( dtype ) + // Numpy: Handling of arrays and scalars + // work-around for + // https://github.com/pybind/pybind11/issues/1224 + // -> passing numpy scalars as buffers needs numpy 1.15+ + // https://github.com/numpy/numpy/issues/10265 + // https://github.com/pybind/pybind11/issues/1224#issuecomment-354357392 + // scalars, see PEP 3118 + // requires Numpy 1.15+ + if (numElements == 1) { + // refs: + // https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.interface.html + // https://docs.python.org/3/library/struct.html#format-characters + // std::cout << " scalar type '" << buf.format << "'" << + // std::endl; typestring: encoding + type + number of bytes + switch (dtype) + { case DT::BOOL: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store(idx, *static_cast(buf.ptr)); break; case DT::SHORT: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store(idx, *static_cast(buf.ptr)); break; case DT::INT: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store(idx, *static_cast(buf.ptr)); break; case DT::LONG: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store(idx, *static_cast(buf.ptr)); break; case DT::LONGLONG: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store( + idx, *static_cast(buf.ptr)); break; case DT::USHORT: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store( + idx, *static_cast(buf.ptr)); break; case DT::UINT: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store( + idx, *static_cast(buf.ptr)); break; case DT::ULONG: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store( + idx, *static_cast(buf.ptr)); break; case DT::ULONGLONG: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store( + idx, *static_cast(buf.ptr)); break; case DT::FLOAT: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store(idx, *static_cast(buf.ptr)); break; case DT::DOUBLE: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store(idx, *static_cast(buf.ptr)); break; case DT::LONG_DOUBLE: - return prc.store( idx, *static_cast(buf.ptr) ); + return prc.store( + idx, *static_cast(buf.ptr)); break; default: - throw std::runtime_error("store: " + throw std::runtime_error( + "store: " "Unknown Datatype!"); + } + } + else + { + throw std::runtime_error( + "store: " + "Only scalar values supported!"); } - } - else - { - throw std::runtime_error("store: " - "Only scalar values supported!"); - } - }, py::arg("idx"), py::arg("data") - ) + }, + py::arg("idx"), + py::arg("data")) // allowed python intrinsics, after (!) buffer matching - .def("store", &PatchRecordComponent::store, - py::arg("idx"), py::arg("data")) - .def("store", &PatchRecordComponent::store, - py::arg("idx"), py::arg("data")) + .def( + "store", + &PatchRecordComponent::store, + py::arg("idx"), + py::arg("data")) + .def( + "store", + &PatchRecordComponent::store, + py::arg("idx"), + py::arg("data")) // TODO implement convenient, patch-object level store/load // TODO remove in future versions (deprecated) - .def("set_unit_SI", &PatchRecordComponent::setUnitSI) - ; + .def("set_unit_SI", &PatchRecordComponent::setUnitSI); } diff --git a/src/binding/python/Record.cpp b/src/binding/python/Record.cpp index e442b05e36..51821d13f3 100644 --- a/src/binding/python/Record.cpp +++ b/src/binding/python/Record.cpp @@ -22,10 +22,10 @@ #include #include "openPMD/Record.hpp" -#include "openPMD/backend/BaseRecord.hpp" #include "openPMD/RecordComponent.hpp" -#include "openPMD/binding/python/UnitDimension.hpp" +#include "openPMD/backend/BaseRecord.hpp" #include "openPMD/binding/python/Pickle.hpp" +#include "openPMD/binding/python/UnitDimension.hpp" #include #include @@ -33,38 +33,40 @@ namespace py = pybind11; using namespace openPMD; +void init_Record(py::module &m) +{ + py::class_> cl(m, "Record"); + cl.def(py::init()) -void init_Record(py::module &m) { - py::class_ > cl(m, "Record"); - cl - .def(py::init()) - - .def("__repr__", - [](Record const &) { - return ""; - } - ) + .def("__repr__", [](Record const &) { return ""; }) - .def_property("unit_dimension", - &Record::unitDimension, - &Record::setUnitDimension, - python::doc_unit_dimension) + .def_property( + "unit_dimension", + &Record::unitDimension, + &Record::setUnitDimension, + python::doc_unit_dimension) - .def_property("time_offset", &Record::timeOffset, &Record::setTimeOffset) - .def_property("time_offset", &Record::timeOffset, &Record::setTimeOffset) - .def_property("time_offset", &Record::timeOffset, &Record::setTimeOffset) + .def_property( + "time_offset", + &Record::timeOffset, + &Record::setTimeOffset) + .def_property( + "time_offset", + &Record::timeOffset, + &Record::setTimeOffset) + .def_property( + "time_offset", + &Record::timeOffset, + &Record::setTimeOffset) // TODO remove in future versions (deprecated) .def("set_unit_dimension", &Record::setUnitDimension) .def("set_time_offset", &Record::setTimeOffset) .def("set_time_offset", &Record::setTimeOffset) - .def("set_time_offset", &Record::setTimeOffset) - ; + .def("set_time_offset", &Record::setTimeOffset); add_pickle( - cl, - [](openPMD::Series & series, std::vector< std::string > const & group ) { + cl, [](openPMD::Series &series, std::vector const &group) { uint64_t const n_it = std::stoull(group.at(1)); return series.iterations[n_it].particles[group.at(3)][group.at(4)]; - } - ); + }); } diff --git a/src/binding/python/RecordComponent.cpp b/src/binding/python/RecordComponent.cpp index 0f1369e764..a036f98f83 100644 --- a/src/binding/python/RecordComponent.cpp +++ b/src/binding/python/RecordComponent.cpp @@ -18,16 +18,16 @@ * and the GNU Lesser General Public License along with openPMD-api. * If not, see . */ -#include #include +#include #include +#include "openPMD/DatatypeHelpers.hpp" #include "openPMD/RecordComponent.hpp" -#include "openPMD/backend/BaseRecordComponent.hpp" +#include "openPMD/Series.hpp" #include "openPMD/auxiliary/ShareRaw.hpp" +#include "openPMD/backend/BaseRecordComponent.hpp" #include "openPMD/binding/python/Numpy.hpp" -#include "openPMD/DatatypeHelpers.hpp" -#include "openPMD/Series.hpp" #include "openPMD/binding/python/Pickle.hpp" #include @@ -44,14 +44,14 @@ namespace py = pybind11; using namespace openPMD; - /** Convert a py::tuple of py::slices to Offset & Extent * * https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.indexing.html * https://github.com/numpy/numpy/blob/v1.16.1/numpy/core/src/multiarray/mapping.c#L348-L375 */ -inline std::tuple< Offset, Extent, std::vector > -parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const & slices) { +inline std::tuple> parseTupleSlices( + uint8_t const ndim, Extent const &full_extent, py::tuple const &slices) +{ uint8_t const numSlices = py::len(slices); Offset offset(ndim, 0u); @@ -60,27 +60,24 @@ parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const int16_t curAxis = -1; int16_t posEllipsis = -1; - for( uint8_t i = 0u; i < numSlices; ++i ) + for (uint8_t i = 0u; i < numSlices; ++i) { ++curAxis; - if( - i >= ndim && - posEllipsis == -1 && - slices[i].ptr() != Py_Ellipsis - ) + if (i >= ndim && posEllipsis == -1 && slices[i].ptr() != Py_Ellipsis) throw py::index_error( "too many indices for dimension of record component!"); - if( slices[i].ptr() == Py_Ellipsis ) + if (slices[i].ptr() == Py_Ellipsis) { // only allowed once - if( posEllipsis != -1 ) - throw py::index_error("an index can only have a single ellipsis ('...')"); + if (posEllipsis != -1) + throw py::index_error( + "an index can only have a single ellipsis ('...')"); posEllipsis = curAxis; // might be omitted if all other indices are given as well - if( numSlices == ndim + 1 ) + if (numSlices == ndim + 1) { --curAxis; continue; @@ -88,19 +85,17 @@ parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const // how many slices were given after the ellipsis uint8_t const numSlicesAfterEllipsis = - numSlices - - uint8_t(posEllipsis) - - 1u; + numSlices - uint8_t(posEllipsis) - 1u; // how many slices does the ellipsis represent - uint8_t const numSlicesEllipsis = - numSlices - - uint8_t(posEllipsis) // slices before - - numSlicesAfterEllipsis; // slices after + uint8_t const numSlicesEllipsis = numSlices - + uint8_t(posEllipsis) // slices before + - numSlicesAfterEllipsis; // slices after // fill ellipsis indices // note: if enough further indices are given, the ellipsis // might stand for no axis: valid and ignored - for( ; curAxis < posEllipsis + int16_t(numSlicesEllipsis); ++curAxis ) + for (; curAxis < posEllipsis + int16_t(numSlicesEllipsis); + ++curAxis) { offset.at(curAxis) = 0; extent.at(curAxis) = full_extent.at(curAxis); @@ -110,21 +105,29 @@ parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const continue; } - if( PySlice_Check( slices[i].ptr() ) ) + if (PySlice_Check(slices[i].ptr())) { - py::slice slice = py::cast< py::slice >( slices[i] ); + py::slice slice = py::cast(slices[i]); size_t start, stop, step, slicelength; - if( !slice.compute( full_extent.at(curAxis), &start, &stop, &step, &slicelength ) ) + if (!slice.compute( + full_extent.at(curAxis), + &start, + &stop, + &step, + &slicelength)) throw py::error_already_set(); // TODO PySlice_AdjustIndices: Python 3.6.1+ - // Adjust start/end slice indices assuming a sequence of the specified length. - // Out of bounds indices are clipped in a manner consistent with the handling of normal slices. - // slicelength = PySlice_AdjustIndices(full_extent[curAxis], (ssize_t*)&start, (ssize_t*)&stop, step); + // Adjust start/end slice indices assuming a sequence of the + // specified length. Out of bounds indices are clipped in a + // manner consistent with the handling of normal slices. + // slicelength = PySlice_AdjustIndices(full_extent[curAxis], + // (ssize_t*)&start, (ssize_t*)&stop, step); - if( step != 1u ) - throw py::index_error("strides in selection are inefficient, not implemented!"); + if (step != 1u) + throw py::index_error( + "strides in selection are inefficient, not implemented!"); // verified for size later in C++ API offset.at(curAxis) = start; @@ -135,9 +138,9 @@ parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const try { - auto const index = py::cast< std::int64_t >( slices[i] ); + auto const index = py::cast(slices[i]); - if( index < 0 ) + if (index < 0) offset.at(curAxis) = full_extent.at(curAxis) + index; else offset.at(curAxis) = index; @@ -145,23 +148,21 @@ parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const extent.at(curAxis) = 1; flatten.at(curAxis) = true; // indices flatten the dimension - if( offset.at(curAxis) >= full_extent.at(curAxis) ) + if (offset.at(curAxis) >= full_extent.at(curAxis)) throw py::index_error( - std::string("index ") + - std::to_string( offset.at(curAxis) ) + + std::string("index ") + std::to_string(offset.at(curAxis)) + std::string(" is out of bounds for axis ") + - std::to_string(i) + - std::string(" with size ") + - std::to_string(full_extent.at(curAxis)) - ); + std::to_string(i) + std::string(" with size ") + + std::to_string(full_extent.at(curAxis))); continue; } - catch (const py::cast_error& e) { + catch (const py::cast_error &e) + { // not an index } - if( slices[i].ptr() == Py_None ) + if (slices[i].ptr() == Py_None) { // py::none newaxis = py::cast< py::none >( slices[i] );; throw py::index_error("None (newaxis) not implemented!"); @@ -177,7 +178,7 @@ parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const } // fill omitted higher indices with "select all" - for( ++curAxis; curAxis < int16_t(ndim); ++curAxis ) + for (++curAxis; curAxis < int16_t(ndim); ++curAxis) { extent.at(curAxis) = full_extent.at(curAxis); } @@ -192,23 +193,23 @@ parseTupleSlices(uint8_t const ndim, Extent const & full_extent, py::tuple const * - not strided with paddings * - not a view in another buffer that results in striding */ -inline void -check_buffer_is_contiguous( py::array & a ) { +inline void check_buffer_is_contiguous(py::array &a) +{ - auto* view = new Py_buffer(); + auto *view = new Py_buffer(); int flags = PyBUF_STRIDES | PyBUF_FORMAT; - if( PyObject_GetBuffer( a.ptr(), view, flags ) != 0 ) + if (PyObject_GetBuffer(a.ptr(), view, flags) != 0) { delete view; throw py::error_already_set(); } - bool isContiguous = ( PyBuffer_IsContiguous( view, 'A' ) != 0 ); - PyBuffer_Release( view ); + bool isContiguous = (PyBuffer_IsContiguous(view, 'A') != 0); + PyBuffer_Release(view); delete view; - if( !isContiguous ) + if (!isContiguous) throw py::index_error( - "strides in chunk are inefficient, not implemented!"); + "strides in chunk are inefficient, not implemented!"); // @todo in order to implement stride handling, one needs to // loop over the input data strides in store/load calls } @@ -221,121 +222,126 @@ check_buffer_is_contiguous( py::array & a ) { * Size checks of the requested chunk (spanned data is in valid bounds) * will be performed at C++ API part in RecordComponent::storeChunk . */ -inline void -store_chunk(RecordComponent & r, py::array & a, Offset const & offset, Extent const & extent, std::vector const & flatten) { +inline void store_chunk( + RecordComponent &r, + py::array &a, + Offset const &offset, + Extent const &extent, + std::vector const &flatten) +{ // @todo keep locked until flush() is performed // a.flags.writable = false; // a.flags.owndata = false; // verify offset + extend fit in dataset extent - // some one-size dimensions might be flattended in our r due to selections by index - size_t const numFlattenDims = std::count(flatten.begin(), flatten.end(), true); + // some one-size dimensions might be flattended in our r due to selections + // by index + size_t const numFlattenDims = + std::count(flatten.begin(), flatten.end(), true); auto const r_extent = r.getExtent(); - auto const s_extent(extent); // selected extent in r - std::vector< std::uint64_t > r_shape(r_extent.size() - numFlattenDims); - std::vector< std::uint64_t > s_shape(s_extent.size() - numFlattenDims); + auto const s_extent(extent); // selected extent in r + std::vector r_shape(r_extent.size() - numFlattenDims); + std::vector s_shape(s_extent.size() - numFlattenDims); auto maskIt = flatten.begin(); std::copy_if( std::begin(r_extent), std::end(r_extent), std::begin(r_shape), - [&maskIt](std::uint64_t){ - return !*(maskIt++); - } - ); + [&maskIt](std::uint64_t) { return !*(maskIt++); }); maskIt = flatten.begin(); std::copy_if( std::begin(s_extent), std::end(s_extent), std::begin(s_shape), - [&maskIt](std::uint64_t){ - return !*(maskIt++); - } - ); + [&maskIt](std::uint64_t) { return !*(maskIt++); }); // verify shape and extent - if( size_t(a.ndim()) != r_shape.size() ) + if (size_t(a.ndim()) != r_shape.size()) throw py::index_error( - std::string("dimension of chunk (") + - std::to_string(a.ndim()) + + std::string("dimension of chunk (") + std::to_string(a.ndim()) + std::string("D) does not fit dimension of selection " "in record component (") + - std::to_string(r_shape.size()) + - std::string("D)") - ); + std::to_string(r_shape.size()) + std::string("D)")); - for( auto d = 0; d < a.ndim(); ++d ) + for (auto d = 0; d < a.ndim(); ++d) { // selection causes overflow of r - if( offset.at(d) + extent.at(d) > r_shape.at(d) ) + if (offset.at(d) + extent.at(d) > r_shape.at(d)) throw py::index_error( - std::string("slice ") + - std::to_string( offset.at(d) ) + - std::string(":") + - std::to_string( extent.at(d) ) + - std::string(" is out of bounds for axis ") + - std::to_string(d) + - std::string(" with size ") + - std::to_string(r_shape.at(d)) - ); + std::string("slice ") + std::to_string(offset.at(d)) + + std::string(":") + std::to_string(extent.at(d)) + + std::string(" is out of bounds for axis ") + std::to_string(d) + + std::string(" with size ") + std::to_string(r_shape.at(d))); // underflow of selection in r for given a - if( s_shape.at(d) != std::uint64_t(a.shape()[d]) ) + if (s_shape.at(d) != std::uint64_t(a.shape()[d])) throw py::index_error( - std::string("size of chunk (") + - std::to_string( a.shape()[d] ) + - std::string(") for axis ") + - std::to_string(d) + + std::string("size of chunk (") + std::to_string(a.shape()[d]) + + std::string(") for axis ") + std::to_string(d) + std::string(" does not match selection ") + std::string("size in record component (") + - std::to_string( s_extent.at(d) ) + - std::string(")") - ); + std::to_string(s_extent.at(d)) + std::string(")")); } - check_buffer_is_contiguous( a ); + check_buffer_is_contiguous(a); // here, we increase a reference on the user-passed data so that // temporary and lost-scope variables stay alive until we flush // note: this does not yet prevent the user, as in C++, to build // a race condition by manipulating the data they passed - auto store_data = [ &r, &a, &offset, &extent ]( auto cxxtype ) { + auto store_data = [&r, &a, &offset, &extent](auto cxxtype) { using CXXType = decltype(cxxtype); a.inc_ref(); - void* data = a.mutable_data(); - std::shared_ptr< CXXType > shared( ( CXXType * )data, - [ a ]( CXXType * ) { a.dec_ref(); } ); - r.storeChunk( std::move( shared ), offset, extent ); + void *data = a.mutable_data(); + std::shared_ptr shared( + (CXXType *)data, [a](CXXType *) { a.dec_ref(); }); + r.storeChunk(std::move(shared), offset, extent); }; // store - auto const dtype = dtype_from_numpy( a.dtype() ); - if( dtype == Datatype::CHAR ) store_data( char() ); - else if( dtype == Datatype::UCHAR ) store_data( (unsigned char)0 ); - else if( dtype == Datatype::SHORT ) store_data( short() ); - else if( dtype == Datatype::INT ) store_data( int() ); - else if( dtype == Datatype::LONG ) store_data( long() ); - else if( dtype == Datatype::LONGLONG ) store_data( (long long)0 ); - else if( dtype == Datatype::USHORT ) store_data( (unsigned short)0 ); - else if( dtype == Datatype::UINT ) store_data( (unsigned int)0 ); - else if( dtype == Datatype::ULONG ) store_data( (unsigned long)0 ); - else if( dtype == Datatype::ULONGLONG ) store_data( (unsigned long long)0 ); - else if( dtype == Datatype::LONG_DOUBLE ) store_data( (long double)0 ); - else if( dtype == Datatype::DOUBLE ) store_data( double() ); - else if( dtype == Datatype::FLOAT ) store_data( float() ); - else if( dtype == Datatype::CLONG_DOUBLE ) store_data( std::complex() ); - else if( dtype == Datatype::CDOUBLE ) store_data( std::complex() ); - else if( dtype == Datatype::CFLOAT ) store_data( std::complex() ); -/* @todo -.value("STRING", Datatype::STRING) -.value("VEC_STRING", Datatype::VEC_STRING) -.value("ARR_DBL_7", Datatype::ARR_DBL_7) -*/ - else if( dtype == Datatype::BOOL ) store_data( bool() ); + auto const dtype = dtype_from_numpy(a.dtype()); + if (dtype == Datatype::CHAR) + store_data(char()); + else if (dtype == Datatype::UCHAR) + store_data((unsigned char)0); + else if (dtype == Datatype::SHORT) + store_data(short()); + else if (dtype == Datatype::INT) + store_data(int()); + else if (dtype == Datatype::LONG) + store_data(long()); + else if (dtype == Datatype::LONGLONG) + store_data((long long)0); + else if (dtype == Datatype::USHORT) + store_data((unsigned short)0); + else if (dtype == Datatype::UINT) + store_data((unsigned int)0); + else if (dtype == Datatype::ULONG) + store_data((unsigned long)0); + else if (dtype == Datatype::ULONGLONG) + store_data((unsigned long long)0); + else if (dtype == Datatype::LONG_DOUBLE) + store_data((long double)0); + else if (dtype == Datatype::DOUBLE) + store_data(double()); + else if (dtype == Datatype::FLOAT) + store_data(float()); + else if (dtype == Datatype::CLONG_DOUBLE) + store_data(std::complex()); + else if (dtype == Datatype::CDOUBLE) + store_data(std::complex()); + else if (dtype == Datatype::CFLOAT) + store_data(std::complex()); + /* @todo + .value("STRING", Datatype::STRING) + .value("VEC_STRING", Datatype::VEC_STRING) + .value("ARR_DBL_7", Datatype::ARR_DBL_7) + */ + else if (dtype == Datatype::BOOL) + store_data(bool()); else throw std::runtime_error( - std::string("Datatype '") + - std::string(py::str(a.dtype())) + + std::string("Datatype '") + std::string(py::str(a.dtype())) + std::string("' not known in 'storeChunk'!")); } @@ -344,7 +350,7 @@ store_chunk(RecordComponent & r, py::array & a, Offset const & offset, Extent co * Called with a py::tuple of slices and a py::array */ inline void -store_chunk(RecordComponent & r, py::array & a, py::tuple const & slices) +store_chunk(RecordComponent &r, py::array &a, py::tuple const &slices) { uint8_t ndim = r.getDimensionality(); auto const full_extent = r.getExtent(); @@ -352,7 +358,8 @@ store_chunk(RecordComponent & r, py::array & a, py::tuple const & slices) Offset offset; Extent extent; std::vector flatten; - std::tie(offset, extent, flatten) = parseTupleSlices(ndim, full_extent, slices); + std::tie(offset, extent, flatten) = + parseTupleSlices(ndim, full_extent, slices); store_chunk(r, a, offset, extent, flatten); } @@ -361,22 +368,21 @@ struct PythonDynamicMemoryView { using ShapeContainer = pybind11::array::ShapeContainer; - template< typename T > + template PythonDynamicMemoryView( - DynamicMemoryView< T > dynamicView, + DynamicMemoryView dynamicView, ShapeContainer arrayShape, - ShapeContainer strides ) - : m_dynamicView( std::shared_ptr< void >( - new DynamicMemoryView< T >( std::move( dynamicView ) ) ) ) - , m_arrayShape( std::move( arrayShape ) ) - , m_strides( std::move( strides ) ) - , m_datatype( determineDatatype< T >() ) - { - } + ShapeContainer strides) + : m_dynamicView(std::shared_ptr( + new DynamicMemoryView(std::move(dynamicView)))) + , m_arrayShape(std::move(arrayShape)) + , m_strides(std::move(strides)) + , m_datatype(determineDatatype()) + {} [[nodiscard]] pybind11::memoryview currentView() const; - std::shared_ptr< void > m_dynamicView; + std::shared_ptr m_dynamicView; ShapeContainer m_arrayShape; ShapeContainer m_strides; Datatype m_datatype; @@ -386,111 +392,110 @@ namespace { struct GetCurrentView { - template< typename T > - static pybind11::memoryview - call( PythonDynamicMemoryView const & dynamicView ) + template + static pybind11::memoryview call(PythonDynamicMemoryView const &dynamicView) { - auto span = static_cast< DynamicMemoryView< T > * >( - dynamicView.m_dynamicView.get() )->currentBuffer(); + auto span = + static_cast *>(dynamicView.m_dynamicView.get()) + ->currentBuffer(); return py::memoryview::from_buffer( span.data(), dynamicView.m_arrayShape, dynamicView.m_strides, - /* readonly = */ false ); + /* readonly = */ false); } - static constexpr char const * errorMsg = "DynamicMemoryView"; + static constexpr char const *errorMsg = "DynamicMemoryView"; }; -template<> +template <> pybind11::memoryview -GetCurrentView::call< std::string >( PythonDynamicMemoryView const & ) +GetCurrentView::call(PythonDynamicMemoryView const &) { - throw std::runtime_error( "[DynamicMemoryView] Only PODs allowed." ); + throw std::runtime_error("[DynamicMemoryView] Only PODs allowed."); } } // namespace pybind11::memoryview PythonDynamicMemoryView::currentView() const { - return switchNonVectorType< GetCurrentView >( m_datatype, *this ); + return switchNonVectorType(m_datatype, *this); } namespace { struct StoreChunkSpan { - template< typename T > - static PythonDynamicMemoryView call( - RecordComponent & r, Offset const & offset, Extent const & extent ) + template + static PythonDynamicMemoryView + call(RecordComponent &r, Offset const &offset, Extent const &extent) { - DynamicMemoryView< T > dynamicView = - r.storeChunk< T >( offset, extent ); + DynamicMemoryView dynamicView = r.storeChunk(offset, extent); pybind11::array::ShapeContainer arrayShape( - extent.begin(), extent.end() ); - std::vector< py::ssize_t > strides( extent.size() ); + extent.begin(), extent.end()); + std::vector strides(extent.size()); { - py::ssize_t accumulator = sizeof( T ); + py::ssize_t accumulator = sizeof(T); size_t dim = extent.size(); - while( dim > 0 ) + while (dim > 0) { --dim; - strides[ dim ] = accumulator; - accumulator *= extent[ dim ]; + strides[dim] = accumulator; + accumulator *= extent[dim]; } } return PythonDynamicMemoryView( - std::move( dynamicView ), - std::move( arrayShape ), - py::array::ShapeContainer( std::move( strides ) ) ); + std::move(dynamicView), + std::move(arrayShape), + py::array::ShapeContainer(std::move(strides))); } - static constexpr char const * errorMsg = "RecordComponent.store_chunk()"; + static constexpr char const *errorMsg = "RecordComponent.store_chunk()"; }; -template<> -PythonDynamicMemoryView StoreChunkSpan::call< std::string >( - RecordComponent &, Offset const &, Extent const & ) +template <> +PythonDynamicMemoryView StoreChunkSpan::call( + RecordComponent &, Offset const &, Extent const &) { throw std::runtime_error( - "[RecordComponent.store_chunk()] Only PODs allowed." ); + "[RecordComponent.store_chunk()] Only PODs allowed."); } } // namespace inline PythonDynamicMemoryView store_chunk_span( - RecordComponent & r, - Offset const & offset, - Extent const & extent, - std::vector< bool > const & flatten ) + RecordComponent &r, + Offset const &offset, + Extent const &extent, + std::vector const &flatten) { // some one-size dimensions might be flattended in our output due to // selections by index size_t const numFlattenDims = - std::count( flatten.begin(), flatten.end(), true ); - std::vector< ptrdiff_t > shape( extent.size() - numFlattenDims ); + std::count(flatten.begin(), flatten.end(), true); + std::vector shape(extent.size() - numFlattenDims); auto maskIt = flatten.begin(); std::copy_if( - std::begin( extent ), - std::end( extent ), - std::begin( shape ), - [ &maskIt ]( std::uint64_t ) { return !*( maskIt++ ); } ); + std::begin(extent), + std::end(extent), + std::begin(shape), + [&maskIt](std::uint64_t) { return !*(maskIt++); }); - return switchNonVectorType< StoreChunkSpan >( - r.getDatatype(), r, offset, extent ); + return switchNonVectorType( + r.getDatatype(), r, offset, extent); } inline PythonDynamicMemoryView -store_chunk_span( RecordComponent & r, py::tuple const & slices ) +store_chunk_span(RecordComponent &r, py::tuple const &slices) { uint8_t ndim = r.getDimensionality(); auto const full_extent = r.getExtent(); Offset offset; Extent extent; - std::vector< bool > flatten; - std::tie( offset, extent, flatten ) = - parseTupleSlices( ndim, full_extent, slices ); + std::vector flatten; + std::tie(offset, extent, flatten) = + parseTupleSlices(ndim, full_extent, slices); - return store_chunk_span( r, offset, extent, flatten ); + return store_chunk_span(r, offset, extent, flatten); } /** Load Chunk @@ -501,72 +506,89 @@ store_chunk_span( RecordComponent & r, py::tuple const & slices ) * Size checks of the requested chunk (spanned data is in valid bounds) * will be performed at C++ API part in RecordComponent::loadChunk . */ -void -load_chunk(RecordComponent & r, py::buffer & buffer, Offset const & offset, Extent const & extent) +void load_chunk( + RecordComponent &r, + py::buffer &buffer, + Offset const &offset, + Extent const &extent) { - auto const dtype = dtype_to_numpy( r.getDatatype() ); - py::buffer_info buffer_info = buffer.request( /* writable = */ true ); + auto const dtype = dtype_to_numpy(r.getDatatype()); + py::buffer_info buffer_info = buffer.request(/* writable = */ true); - auto const & strides = buffer_info.strides; + auto const &strides = buffer_info.strides; // this function requires a contiguous slab of memory, so check the strides // whether we have that - if( strides.size() == 0 ) + if (strides.size() == 0) { throw std::runtime_error( - "[Record_Component::load_chunk()] Empty buffer passed." ); + "[Record_Component::load_chunk()] Empty buffer passed."); } { - py::ssize_t accumulator = toBytes( r.getDatatype() ); + py::ssize_t accumulator = toBytes(r.getDatatype()); size_t dim = strides.size(); - while( dim > 0 ) + while (dim > 0) { --dim; - if( strides[ dim ] != accumulator ) + if (strides[dim] != accumulator) { throw std::runtime_error( "[Record_Component::load_chunk()] Requires contiguous slab" - " of memory." ); + " of memory."); } - accumulator *= extent[ dim ]; + accumulator *= extent[dim]; } } - if( r.getDatatype() == Datatype::CHAR ) - r.loadChunk(shareRaw((char*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::UCHAR ) - r.loadChunk(shareRaw((unsigned char*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::SHORT ) - r.loadChunk(shareRaw((short*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::INT ) - r.loadChunk(shareRaw((int*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::LONG ) - r.loadChunk(shareRaw((long*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::LONGLONG ) - r.loadChunk(shareRaw((long long*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::USHORT ) - r.loadChunk(shareRaw((unsigned short*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::UINT ) - r.loadChunk(shareRaw((unsigned int*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::ULONG ) - r.loadChunk(shareRaw((unsigned long*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::ULONGLONG ) - r.loadChunk(shareRaw((unsigned long long*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::LONG_DOUBLE ) - r.loadChunk(shareRaw((long double*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::DOUBLE ) - r.loadChunk(shareRaw((double*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::FLOAT ) - r.loadChunk(shareRaw((float*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::CLONG_DOUBLE ) - r.loadChunk>(shareRaw((std::complex*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::CDOUBLE ) - r.loadChunk>(shareRaw((std::complex*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::CFLOAT ) - r.loadChunk>(shareRaw((std::complex*) buffer_info.ptr), offset, extent); - else if( r.getDatatype() == Datatype::BOOL ) - r.loadChunk(shareRaw((bool*) buffer_info.ptr), offset, extent); + if (r.getDatatype() == Datatype::CHAR) + r.loadChunk(shareRaw((char *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::UCHAR) + r.loadChunk( + shareRaw((unsigned char *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::SHORT) + r.loadChunk(shareRaw((short *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::INT) + r.loadChunk(shareRaw((int *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::LONG) + r.loadChunk(shareRaw((long *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::LONGLONG) + r.loadChunk( + shareRaw((long long *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::USHORT) + r.loadChunk( + shareRaw((unsigned short *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::UINT) + r.loadChunk( + shareRaw((unsigned int *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::ULONG) + r.loadChunk( + shareRaw((unsigned long *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::ULONGLONG) + r.loadChunk( + shareRaw((unsigned long long *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::LONG_DOUBLE) + r.loadChunk( + shareRaw((long double *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::DOUBLE) + r.loadChunk( + shareRaw((double *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::FLOAT) + r.loadChunk(shareRaw((float *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::CLONG_DOUBLE) + r.loadChunk>( + shareRaw((std::complex *)buffer_info.ptr), + offset, + extent); + else if (r.getDatatype() == Datatype::CDOUBLE) + r.loadChunk>( + shareRaw((std::complex *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::CFLOAT) + r.loadChunk>( + shareRaw((std::complex *)buffer_info.ptr), offset, extent); + else if (r.getDatatype() == Datatype::BOOL) + r.loadChunk(shareRaw((bool *)buffer_info.ptr), offset, extent); else - throw std::runtime_error(std::string("Datatype not known in 'loadChunk'!")); + throw std::runtime_error( + std::string("Datatype not known in 'loadChunk'!")); } /** Load Chunk @@ -577,19 +599,24 @@ load_chunk(RecordComponent & r, py::buffer & buffer, Offset const & offset, Exte * Size checks of the requested chunk (spanned data is in valid bounds) * will be performed at C++ API part in RecordComponent::loadChunk . */ -inline void -load_chunk(RecordComponent & r, py::array & a, Offset const & offset, Extent const & extent) +inline void load_chunk( + RecordComponent &r, + py::array &a, + Offset const &offset, + Extent const &extent) { // check array is large enough size_t s_load = 1u; size_t s_array = 1u; std::string str_extent_shape; std::string str_array_shape; - for( auto & si : extent ) { + for (auto &si : extent) + { s_load *= si; str_extent_shape.append(" ").append(std::to_string(si)); } - for( py::ssize_t d = 0; d < a.ndim(); ++d ) { + for (py::ssize_t d = 0; d < a.ndim(); ++d) + { s_array *= a.shape()[d]; str_array_shape.append(" ").append(std::to_string(a.shape()[d])); } @@ -605,63 +632,76 @@ load_chunk(RecordComponent & r, py::array & a, Offset const & offset, Extent con std::string("D)") ); */ - if( s_array < s_load ) { + if (s_array < s_load) + { throw py::index_error( - std::string("size of array (") + - std::to_string(s_array) + - std::string("; shape:") + - str_array_shape + + std::string("size of array (") + std::to_string(s_array) + + std::string("; shape:") + str_array_shape + std::string(") is smaller than size of selection " "in record component (") + - std::to_string(s_load) + - std::string("; shape:") + - str_extent_shape + - std::string(")") - ); + std::to_string(s_load) + std::string("; shape:") + + str_extent_shape + std::string(")")); } - check_buffer_is_contiguous( a ); + check_buffer_is_contiguous(a); // here, we increase a reference on the user-passed data so that // temporary and lost-scope variables stay alive until we flush // note: this does not yet prevent the user, as in C++, to build // a race condition by manipulating the data they passed - auto load_data = [ &r, &a, &offset, &extent ]( auto cxxtype ) { + auto load_data = [&r, &a, &offset, &extent](auto cxxtype) { using CXXType = decltype(cxxtype); a.inc_ref(); - void* data = a.mutable_data(); - std::shared_ptr< CXXType > shared( ( CXXType * )data, - [ a ]( CXXType * ) { a.dec_ref(); } ); - r.loadChunk( std::move( shared ), offset, extent ); + void *data = a.mutable_data(); + std::shared_ptr shared( + (CXXType *)data, [a](CXXType *) { a.dec_ref(); }); + r.loadChunk(std::move(shared), offset, extent); }; - if( r.getDatatype() == Datatype::CHAR ) load_data( char() ); - else if( r.getDatatype() == Datatype::UCHAR ) load_data( (unsigned char)0 ); - else if( r.getDatatype() == Datatype::SHORT ) load_data( short() ); - else if( r.getDatatype() == Datatype::INT ) load_data( int() ); - else if( r.getDatatype() == Datatype::LONG ) load_data( long() ); - else if( r.getDatatype() == Datatype::LONGLONG ) load_data( (long long)0 ); - else if( r.getDatatype() == Datatype::USHORT ) load_data( (unsigned short)0 ); - else if( r.getDatatype() == Datatype::UINT ) load_data( (unsigned int)0 ); - else if( r.getDatatype() == Datatype::ULONG ) load_data( (unsigned long)0 ); - else if( r.getDatatype() == Datatype::ULONGLONG ) load_data( (unsigned long long)0 ); - else if( r.getDatatype() == Datatype::LONG_DOUBLE ) load_data( (long double)0 ); - else if( r.getDatatype() == Datatype::DOUBLE ) load_data( double() ); - else if( r.getDatatype() == Datatype::FLOAT ) load_data( float() ); - else if( r.getDatatype() == Datatype::CLONG_DOUBLE ) load_data( std::complex() ); - else if( r.getDatatype() == Datatype::CDOUBLE ) load_data( std::complex() ); - else if( r.getDatatype() == Datatype::CFLOAT ) load_data( std::complex() ); - else if( r.getDatatype() == Datatype::BOOL ) load_data( bool() ); + if (r.getDatatype() == Datatype::CHAR) + load_data(char()); + else if (r.getDatatype() == Datatype::UCHAR) + load_data((unsigned char)0); + else if (r.getDatatype() == Datatype::SHORT) + load_data(short()); + else if (r.getDatatype() == Datatype::INT) + load_data(int()); + else if (r.getDatatype() == Datatype::LONG) + load_data(long()); + else if (r.getDatatype() == Datatype::LONGLONG) + load_data((long long)0); + else if (r.getDatatype() == Datatype::USHORT) + load_data((unsigned short)0); + else if (r.getDatatype() == Datatype::UINT) + load_data((unsigned int)0); + else if (r.getDatatype() == Datatype::ULONG) + load_data((unsigned long)0); + else if (r.getDatatype() == Datatype::ULONGLONG) + load_data((unsigned long long)0); + else if (r.getDatatype() == Datatype::LONG_DOUBLE) + load_data((long double)0); + else if (r.getDatatype() == Datatype::DOUBLE) + load_data(double()); + else if (r.getDatatype() == Datatype::FLOAT) + load_data(float()); + else if (r.getDatatype() == Datatype::CLONG_DOUBLE) + load_data(std::complex()); + else if (r.getDatatype() == Datatype::CDOUBLE) + load_data(std::complex()); + else if (r.getDatatype() == Datatype::CFLOAT) + load_data(std::complex()); + else if (r.getDatatype() == Datatype::BOOL) + load_data(bool()); else - throw std::runtime_error(std::string("Datatype not known in 'load_chunk'!")); + throw std::runtime_error( + std::string("Datatype not known in 'load_chunk'!")); } /** Load Chunk * * Called with a py::tuple of slices. */ -inline py::array -load_chunk(RecordComponent & r, py::tuple const & slices) +inline py::array load_chunk(RecordComponent &r, py::tuple const &slices) { uint8_t ndim = r.getDimensionality(); auto const full_extent = r.getExtent(); @@ -669,52 +709,54 @@ load_chunk(RecordComponent & r, py::tuple const & slices) Offset offset; Extent extent; std::vector flatten; - std::tie(offset, extent, flatten) = parseTupleSlices(ndim, full_extent, slices); + std::tie(offset, extent, flatten) = + parseTupleSlices(ndim, full_extent, slices); - // some one-size dimensions might be flattended in our output due to selections by index - size_t const numFlattenDims = std::count(flatten.begin(), flatten.end(), true); - std::vector< ptrdiff_t > shape(extent.size() - numFlattenDims); + // some one-size dimensions might be flattended in our output due to + // selections by index + size_t const numFlattenDims = + std::count(flatten.begin(), flatten.end(), true); + std::vector shape(extent.size() - numFlattenDims); auto maskIt = flatten.begin(); std::copy_if( - std::begin(extent), - std::end(extent), - std::begin(shape), - [&maskIt](std::uint64_t){ - return !*(maskIt++); - } - ); + std::begin(extent), + std::end(extent), + std::begin(shape), + [&maskIt](std::uint64_t) { return !*(maskIt++); }); - auto const dtype = dtype_to_numpy( r.getDatatype() ); - auto a = py::array( dtype, shape ); + auto const dtype = dtype_to_numpy(r.getDatatype()); + auto a = py::array(dtype, shape); load_chunk(r, a, offset, extent); return a; } -void init_RecordComponent(py::module &m) { +void init_RecordComponent(py::module &m) +{ py::class_(m, "Dynamic_Memory_View") - .def("__repr__", - [](PythonDynamicMemoryView const & view) { - return "size()) + "'>"; - } - ) - .def("current_buffer", - [](PythonDynamicMemoryView const & view) { - return view.currentView(); - } - ); + .def( + "__repr__", + [](PythonDynamicMemoryView const &view) { + return "size()) + "'>"; + }) + .def("current_buffer", [](PythonDynamicMemoryView const &view) { + return view.currentView(); + }); py::class_ cl(m, "Record_Component"); - cl - .def("__repr__", - [](RecordComponent const & rc) { - return ""; - } - ) - - .def_property("unit_SI", &BaseRecordComponent::unitSI, &RecordComponent::setUnitSI) + cl.def( + "__repr__", + [](RecordComponent const &rc) { + return ""; + }) + + .def_property( + "unit_SI", + &BaseRecordComponent::unitSI, + &RecordComponent::setUnitSI) .def("reset_dataset", &RecordComponent::resetDataset) @@ -723,120 +765,144 @@ void init_RecordComponent(py::module &m) { .def_property_readonly("empty", &RecordComponent::empty) // buffer types - .def("make_constant", [](RecordComponent & rc, py::buffer & a) { - py::buffer_info buf = a.request(); - auto const dtype = dtype_from_bufferformat( buf.format ); + .def( + "make_constant", + [](RecordComponent &rc, py::buffer &a) { + py::buffer_info buf = a.request(); + auto const dtype = dtype_from_bufferformat(buf.format); - using DT = Datatype; + using DT = Datatype; - // allow one-element n-dimensional buffers as well - py::ssize_t numElements = 1; - if( buf.ndim > 0 ) { - for( auto d = 0; d < buf.ndim; ++d ) - numElements *= buf.shape.at(d); - } + // allow one-element n-dimensional buffers as well + py::ssize_t numElements = 1; + if (buf.ndim > 0) + { + for (auto d = 0; d < buf.ndim; ++d) + numElements *= buf.shape.at(d); + } - // Numpy: Handling of arrays and scalars - // work-around for https://github.com/pybind/pybind11/issues/1224 - // -> passing numpy scalars as buffers needs numpy 1.15+ - // https://github.com/numpy/numpy/issues/10265 - // https://github.com/pybind/pybind11/issues/1224#issuecomment-354357392 - // scalars, see PEP 3118 - // requires Numpy 1.15+ - if( numElements == 1 ) { - // refs: - // https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.interface.html - // https://docs.python.org/3/library/struct.html#format-characters - // std::cout << " scalar type '" << buf.format << "'" << std::endl; - // typestring: encoding + type + number of bytes - switch( dtype ) + // Numpy: Handling of arrays and scalars + // work-around for + // https://github.com/pybind/pybind11/issues/1224 + // -> passing numpy scalars as buffers needs numpy 1.15+ + // https://github.com/numpy/numpy/issues/10265 + // https://github.com/pybind/pybind11/issues/1224#issuecomment-354357392 + // scalars, see PEP 3118 + // requires Numpy 1.15+ + if (numElements == 1) { + // refs: + // https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.interface.html + // https://docs.python.org/3/library/struct.html#format-characters + // std::cout << " scalar type '" << buf.format << "'" << + // std::endl; typestring: encoding + type + number of bytes + switch (dtype) + { case DT::BOOL: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant(*static_cast(buf.ptr)); break; case DT::CHAR: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant(*static_cast(buf.ptr)); break; case DT::SHORT: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant(*static_cast(buf.ptr)); break; case DT::INT: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant(*static_cast(buf.ptr)); break; case DT::LONG: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant(*static_cast(buf.ptr)); break; case DT::LONGLONG: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant( + *static_cast(buf.ptr)); break; case DT::UCHAR: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant( + *static_cast(buf.ptr)); break; case DT::USHORT: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant( + *static_cast(buf.ptr)); break; case DT::UINT: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant( + *static_cast(buf.ptr)); break; case DT::ULONG: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant( + *static_cast(buf.ptr)); break; case DT::ULONGLONG: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant( + *static_cast(buf.ptr)); break; case DT::FLOAT: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant(*static_cast(buf.ptr)); break; case DT::DOUBLE: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant(*static_cast(buf.ptr)); break; case DT::LONG_DOUBLE: - return rc.makeConstant( *static_cast(buf.ptr) ); + return rc.makeConstant( + *static_cast(buf.ptr)); break; case DT::CFLOAT: - return rc.makeConstant( *static_cast*>(buf.ptr) ); + return rc.makeConstant( + *static_cast *>(buf.ptr)); break; case DT::CDOUBLE: - return rc.makeConstant( *static_cast*>(buf.ptr) ); + return rc.makeConstant( + *static_cast *>(buf.ptr)); break; case DT::CLONG_DOUBLE: - return rc.makeConstant( *static_cast*>(buf.ptr) ); + return rc.makeConstant( + *static_cast *>(buf.ptr)); break; default: - throw std::runtime_error("make_constant: " + throw std::runtime_error( + "make_constant: " "Unknown Datatype!"); + } } - } - else - { - throw std::runtime_error("make_constant: " - "Only scalar values supported!"); - } - - }, py::arg("value") - ) + else + { + throw std::runtime_error( + "make_constant: " + "Only scalar values supported!"); + } + }, + py::arg("value")) // allowed python intrinsics, after (!) buffer matching - .def("make_constant", &RecordComponent::makeConstant, + .def( + "make_constant", + &RecordComponent::makeConstant, py::arg("value")) - .def("make_constant", &RecordComponent::makeConstant, + .def( + "make_constant", + &RecordComponent::makeConstant, py::arg("value")) - .def("make_constant", &RecordComponent::makeConstant, + .def( + "make_constant", + &RecordComponent::makeConstant, py::arg("value")) - .def("make_constant", &RecordComponent::makeConstant, + .def( + "make_constant", + &RecordComponent::makeConstant, py::arg("value")) - .def("make_empty", - []( RecordComponent & rc, Datatype dt, uint8_t dimensionality ) - { - return rc.makeEmpty( dt, dimensionality ); + .def( + "make_empty", + [](RecordComponent &rc, Datatype dt, uint8_t dimensionality) { + return rc.makeEmpty(dt, dimensionality); }, - py::arg("datatype"), py::arg("dimensionality")) - .def("make_empty", - []( - RecordComponent & rc, - pybind11::dtype const dt, - uint8_t dimensionality ) - { - return rc.makeEmpty( dtype_from_numpy( dt ), dimensionality ); + py::arg("datatype"), + py::arg("dimensionality")) + .def( + "make_empty", + [](RecordComponent &rc, + pybind11::dtype const dt, + uint8_t dimensionality) { + return rc.makeEmpty(dtype_from_numpy(dt), dimensionality); }) // TODO if we also want to support scalar arrays, we have to switch @@ -844,173 +910,192 @@ void init_RecordComponent(py::module &m) { // https://github.com/pybind/pybind11/pull/1537 // slicing protocol - .def("__getitem__", [](RecordComponent & r, py::tuple const & slices) { - return load_chunk(r, slices); - }, - py::arg("tuple of index slices") - ) - .def("__getitem__", [](RecordComponent & r, py::slice const & slice_obj) { - auto const slices = py::make_tuple(slice_obj); - return load_chunk(r, slices); - }, - py::arg("slice") - ) - .def("__getitem__", [](RecordComponent & r, py::int_ const & slice_obj) { - auto const slices = py::make_tuple(slice_obj); - return load_chunk(r, slices); - }, - py::arg("axis index") - ) - - .def("__setitem__", [](RecordComponent & r, py::tuple const & slices, py::array & a ) { - store_chunk(r, a, slices); - }, + .def( + "__getitem__", + [](RecordComponent &r, py::tuple const &slices) { + return load_chunk(r, slices); + }, + py::arg("tuple of index slices")) + .def( + "__getitem__", + [](RecordComponent &r, py::slice const &slice_obj) { + auto const slices = py::make_tuple(slice_obj); + return load_chunk(r, slices); + }, + py::arg("slice")) + .def( + "__getitem__", + [](RecordComponent &r, py::int_ const &slice_obj) { + auto const slices = py::make_tuple(slice_obj); + return load_chunk(r, slices); + }, + py::arg("axis index")) + + .def( + "__setitem__", + [](RecordComponent &r, py::tuple const &slices, py::array &a) { + store_chunk(r, a, slices); + }, py::arg("tuple of index slices"), - py::arg("array with values to assign") - ) - .def("__setitem__", [](RecordComponent & r, py::slice const & slice_obj, py::array & a ) { - auto const slices = py::make_tuple(slice_obj); - store_chunk(r, a, slices); - }, + py::arg("array with values to assign")) + .def( + "__setitem__", + [](RecordComponent &r, py::slice const &slice_obj, py::array &a) { + auto const slices = py::make_tuple(slice_obj); + store_chunk(r, a, slices); + }, py::arg("slice"), - py::arg("array with values to assign") - ) - .def("__setitem__", [](RecordComponent & r, py::int_ const & slice_obj, py::array & a ) { - auto const slices = py::make_tuple(slice_obj); - store_chunk(r, a, slices); - }, + py::arg("array with values to assign")) + .def( + "__setitem__", + [](RecordComponent &r, py::int_ const &slice_obj, py::array &a) { + auto const slices = py::make_tuple(slice_obj); + store_chunk(r, a, slices); + }, py::arg("axis index"), - py::arg("array with values to assign") - ) + py::arg("array with values to assign")) // deprecated: pass-through C++ API - .def("load_chunk", [](RecordComponent & r, Offset const & offset_in, Extent const & extent_in) { - uint8_t ndim = r.getDimensionality(); - - // default arguments - // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = offset_in; - if( offset_in.size() == 1u && offset_in.at(0) == 0u ) - offset = Offset(ndim, 0u); - - // extent = {-1u}: take full size - Extent extent(ndim, 1u); - if( extent_in.size() == 1u && extent_in.at(0) == -1u ) - { - extent = r.getExtent(); - for( uint8_t i = 0u; i < ndim; ++i ) - extent[i] -= offset[i]; - } - else - extent = extent_in; - - std::vector< ptrdiff_t > shape(extent.size()); - std::copy(std::begin(extent), std::end(extent), std::begin(shape)); - auto const dtype = dtype_to_numpy( r.getDatatype() ); - auto a = py::array( dtype, shape ); - load_chunk(r, a, offset, extent); - - return a; - }, - py::arg_v("offset", Offset(1, 0u), "np.zeros(Record_Component.shape)"), - py::arg_v("extent", Extent(1, -1u), "Record_Component.shape") - ) - .def("load_chunk", []( - RecordComponent & r, - py::buffer buffer, - Offset const & offset_in, - Extent const & extent_in) - { - uint8_t ndim = r.getDimensionality(); + .def( + "load_chunk", + [](RecordComponent &r, + Offset const &offset_in, + Extent const &extent_in) { + uint8_t ndim = r.getDimensionality(); + + // default arguments + // offset = {0u}: expand to right dim {0u, 0u, ...} + Offset offset = offset_in; + if (offset_in.size() == 1u && offset_in.at(0) == 0u) + offset = Offset(ndim, 0u); + + // extent = {-1u}: take full size + Extent extent(ndim, 1u); + if (extent_in.size() == 1u && extent_in.at(0) == -1u) + { + extent = r.getExtent(); + for (uint8_t i = 0u; i < ndim; ++i) + extent[i] -= offset[i]; + } + else + extent = extent_in; - // default arguments - // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = offset_in; - if( offset_in.size() == 1u && offset_in.at(0) == 0u ) - offset = Offset(ndim, 0u); + std::vector shape(extent.size()); + std::copy( + std::begin(extent), std::end(extent), std::begin(shape)); + auto const dtype = dtype_to_numpy(r.getDatatype()); + auto a = py::array(dtype, shape); + load_chunk(r, a, offset, extent); - // extent = {-1u}: take full size - Extent extent(ndim, 1u); - if( extent_in.size() == 1u && extent_in.at(0) == -1u ) - { - extent = r.getExtent(); - for( uint8_t i = 0u; i < ndim; ++i ) - extent[i] -= offset[i]; - } - else - extent = extent_in; + return a; + }, + py::arg_v( + "offset", Offset(1, 0u), "np.zeros(Record_Component.shape)"), + py::arg_v("extent", Extent(1, -1u), "Record_Component.shape")) + .def( + "load_chunk", + [](RecordComponent &r, + py::buffer buffer, + Offset const &offset_in, + Extent const &extent_in) { + uint8_t ndim = r.getDimensionality(); + + // default arguments + // offset = {0u}: expand to right dim {0u, 0u, ...} + Offset offset = offset_in; + if (offset_in.size() == 1u && offset_in.at(0) == 0u) + offset = Offset(ndim, 0u); + + // extent = {-1u}: take full size + Extent extent(ndim, 1u); + if (extent_in.size() == 1u && extent_in.at(0) == -1u) + { + extent = r.getExtent(); + for (uint8_t i = 0u; i < ndim; ++i) + extent[i] -= offset[i]; + } + else + extent = extent_in; - std::vector flatten(ndim, false); - load_chunk(r, buffer, offset, extent); - }, + std::vector flatten(ndim, false); + load_chunk(r, buffer, offset, extent); + }, py::arg("pre-allocated buffer"), - py::arg_v("offset", Offset(1, 0u), "np.zeros(Record_Component.shape)"), - py::arg_v("extent", Extent(1, -1u), "Record_Component.shape") - ) + py::arg_v( + "offset", Offset(1, 0u), "np.zeros(Record_Component.shape)"), + py::arg_v("extent", Extent(1, -1u), "Record_Component.shape")) // deprecated: pass-through C++ API - .def("store_chunk", [](RecordComponent & r, py::array & a, Offset const & offset_in, Extent const & extent_in) { - // default arguments - // offset = {0u}: expand to right dim {0u, 0u, ...} - Offset offset = offset_in; - if( offset_in.size() == 1u && offset_in.at(0) == 0u && a.ndim() > 1u ) - offset = Offset(a.ndim(), 0u); - - // extent = {-1u}: take full size - Extent extent(a.ndim(), 1u); - if( extent_in.size() == 1u && extent_in.at(0) == -1u ) - for( auto d = 0; d < a.ndim(); ++d ) - extent.at(d) = a.shape()[d]; - else - extent = extent_in; - - std::vector flatten(r.getDimensionality(), false); - store_chunk(r, a, offset, extent, flatten); - }, + .def( + "store_chunk", + [](RecordComponent &r, + py::array &a, + Offset const &offset_in, + Extent const &extent_in) { + // default arguments + // offset = {0u}: expand to right dim {0u, 0u, ...} + Offset offset = offset_in; + if (offset_in.size() == 1u && offset_in.at(0) == 0u && + a.ndim() > 1u) + offset = Offset(a.ndim(), 0u); + + // extent = {-1u}: take full size + Extent extent(a.ndim(), 1u); + if (extent_in.size() == 1u && extent_in.at(0) == -1u) + for (auto d = 0; d < a.ndim(); ++d) + extent.at(d) = a.shape()[d]; + else + extent = extent_in; + + std::vector flatten(r.getDimensionality(), false); + store_chunk(r, a, offset, extent, flatten); + }, py::arg("array"), - py::arg_v("offset", Offset(1, 0u), "np.zeros_like(array)"), - py::arg_v("extent", Extent(1, -1u), "array.shape") - ) - .def("store_chunk", [](RecordComponent & r, Offset const & offset_in, Extent const & extent_in) { - // default arguments - // offset = {0u}: expand to right dim {0u, 0u, ...} - unsigned dimensionality = r.getDimensionality(); - Extent const & totalExtent = r.getExtent(); - Offset offset = offset_in; - if( offset_in.size() == 1u && offset_in.at(0) == 0u && dimensionality > 1u ) - offset = Offset(dimensionality, 0u); - - // extent = {-1u}: take full size - Extent extent(dimensionality, 1u); - if( extent_in.size() == 1u && extent_in.at(0) == -1u ) - for( unsigned d = 0; d < dimensionality; ++d ) - extent.at(d) = totalExtent[d]; - else - extent = extent_in; - - std::vector flatten(r.getDimensionality(), false); - return store_chunk_span(r, offset, extent, flatten); - }, - py::arg_v("offset", Offset(1, 0u), "np.zeros_like(array)"), - py::arg_v("extent", Extent(1, -1u), "array.shape") - ) + py::arg_v("offset", Offset(1, 0u), "np.zeros_like(array)"), + py::arg_v("extent", Extent(1, -1u), "array.shape")) + .def( + "store_chunk", + [](RecordComponent &r, + Offset const &offset_in, + Extent const &extent_in) { + // default arguments + // offset = {0u}: expand to right dim {0u, 0u, ...} + unsigned dimensionality = r.getDimensionality(); + Extent const &totalExtent = r.getExtent(); + Offset offset = offset_in; + if (offset_in.size() == 1u && offset_in.at(0) == 0u && + dimensionality > 1u) + offset = Offset(dimensionality, 0u); + + // extent = {-1u}: take full size + Extent extent(dimensionality, 1u); + if (extent_in.size() == 1u && extent_in.at(0) == -1u) + for (unsigned d = 0; d < dimensionality; ++d) + extent.at(d) = totalExtent[d]; + else + extent = extent_in; + + std::vector flatten(r.getDimensionality(), false); + return store_chunk_span(r, offset, extent, flatten); + }, + py::arg_v("offset", Offset(1, 0u), "np.zeros_like(array)"), + py::arg_v("extent", Extent(1, -1u), "array.shape")) - .def_property_readonly_static("SCALAR", [](py::object){ return RecordComponent::SCALAR; }) + .def_property_readonly_static( + "SCALAR", [](py::object) { return RecordComponent::SCALAR; }) // TODO remove in future versions (deprecated) .def("set_unit_SI", &RecordComponent::setUnitSI) // deprecated - ; + ; add_pickle( - cl, - [](openPMD::Series & series, std::vector< std::string > const & group ) { + cl, [](openPMD::Series &series, std::vector const &group) { uint64_t const n_it = std::stoull(group.at(1)); - return series.iterations[n_it].particles[group.at(3)][group.at(4)][group.at(5)]; - } - ); + return series.iterations[n_it] + .particles[group.at(3)][group.at(4)][group.at(5)]; + }); py::enum_(m, "Allocation") .value("USER", RecordComponent::Allocation::USER) .value("API", RecordComponent::Allocation::API) - .value("AUTO", RecordComponent::Allocation::AUTO) - ; + .value("AUTO", RecordComponent::Allocation::AUTO); } diff --git a/src/binding/python/Series.cpp b/src/binding/python/Series.cpp index b8bb2e8298..04edf7957b 100644 --- a/src/binding/python/Series.cpp +++ b/src/binding/python/Series.cpp @@ -22,13 +22,13 @@ #include #include -#include "openPMD/config.hpp" #include "openPMD/Series.hpp" +#include "openPMD/config.hpp" #if openPMD_HAVE_MPI // re-implemented signatures: // include -# include +#include #endif #include @@ -37,136 +37,165 @@ namespace py = pybind11; using namespace openPMD; #if openPMD_HAVE_MPI - /** mpi4py communicator wrapper - * - * refs: - * - https://github.com/mpi4py/mpi4py/blob/3.0.0/src/mpi4py/libmpi.pxd#L35-L36 - * - https://github.com/mpi4py/mpi4py/blob/3.0.0/src/mpi4py/MPI.pxd#L100-L105 - * - installed: include/mpi4py/mpi4py.MPI.h - */ - struct openPMD_PyMPICommObject - { - PyObject_HEAD - MPI_Comm ob_mpi; - unsigned int flags; - }; - using openPMD_PyMPIIntracommObject = openPMD_PyMPICommObject; +/** mpi4py communicator wrapper + * + * refs: + * - https://github.com/mpi4py/mpi4py/blob/3.0.0/src/mpi4py/libmpi.pxd#L35-L36 + * - https://github.com/mpi4py/mpi4py/blob/3.0.0/src/mpi4py/MPI.pxd#L100-L105 + * - installed: include/mpi4py/mpi4py.MPI.h + */ +struct openPMD_PyMPICommObject +{ + PyObject_HEAD MPI_Comm ob_mpi; + unsigned int flags; +}; +using openPMD_PyMPIIntracommObject = openPMD_PyMPICommObject; #endif - -void init_Series(py::module &m) { +void init_Series(py::module &m) +{ using iterations_key_t = decltype(Series::iterations)::key_type; py::class_(m, "WriteIterations") - .def("__getitem__", - [](WriteIterations writeIterations, iterations_key_t key){ + .def( + "__getitem__", + [](WriteIterations writeIterations, iterations_key_t key) { return writeIterations[key]; - }, - // keep container alive while iterator exists - py::keep_alive<0, 1>()) - ; + }, + // keep container alive while iterator exists + py::keep_alive<0, 1>()); py::class_(m, "IndexedIteration") - .def_readonly( - "iteration_index", &IndexedIteration::iterationIndex) - ; + .def_readonly("iteration_index", &IndexedIteration::iterationIndex); py::class_(m, "ReadIterations") - .def("__iter__", [](ReadIterations & readIterations) { - return py::make_iterator( - readIterations.begin(), readIterations.end()); - }, - // keep handle alive while iterator exists - py::keep_alive<0, 1>()) - ; + .def( + "__iter__", + [](ReadIterations &readIterations) { + return py::make_iterator( + readIterations.begin(), readIterations.end()); + }, + // keep handle alive while iterator exists + py::keep_alive<0, 1>()); py::class_(m, "Series") - .def(py::init(), + .def( + py::init(), py::arg("filepath"), py::arg("access"), py::arg("options") = "{}") #if openPMD_HAVE_MPI - .def(py::init([]( - std::string const& filepath, - Access at, - py::object &comm, - std::string const& options ){ - //! TODO perform mpi4py import test and check min-version - //! careful: double MPI_Init risk? only import mpi4py.MPI? - //! required C-API init? probably just checks: - //! refs: - //! - https://bitbucket.org/mpi4py/mpi4py/src/3.0.0/demo/wrap-c/helloworld.c - //! - installed: include/mpi4py/mpi4py.MPI_api.h - // if( import_mpi4py() < 0 ) { here be dragons } - - if( comm.ptr() == Py_None ) - throw std::runtime_error("Series: MPI communicator cannot be None."); - if( comm.ptr() == nullptr ) - throw std::runtime_error("Series: MPI communicator is a nullptr."); - - // check type string to see if this is mpi4py - // __str__ (pretty) - // __repr__ (unambiguous) - // mpi4py: - // pyMPI: ... (TODO) - py::str const comm_pystr = py::repr(comm); - std::string const comm_str = comm_pystr.cast(); - if( comm_str.substr(0, 12) != std::string(" >(comm.get_type()) ) - // TODO add mpi4py version from above import check to error message - throw std::runtime_error("Series: comm has unexpected type layout in " + - comm_str + - " (Mismatched MPI at compile vs. runtime? " - "Breaking mpi4py release?)"); - - // todo other possible implementations: - // - pyMPI (inactive since 2008?): import mpi; mpi.WORLD - - // reimplementation of mpi4py's: - // MPI_Comm* mpiCommPtr = PyMPIComm_Get(comm.ptr()); - MPI_Comm* mpiCommPtr = &((openPMD_PyMPIIntracommObject*)(comm.ptr()))->ob_mpi; - - if( PyErr_Occurred() ) - throw std::runtime_error("Series: MPI communicator access error."); - if( mpiCommPtr == nullptr ) { - throw std::runtime_error("Series: MPI communicator cast failed. " - "(Mismatched MPI at compile vs. runtime?)"); - } - - return new Series(filepath, at, *mpiCommPtr, options ); - }), + .def( + py::init([](std::string const &filepath, + Access at, + py::object &comm, + std::string const &options) { + //! TODO perform mpi4py import test and check min-version + //! careful: double MPI_Init risk? only import mpi4py.MPI? + //! required C-API init? probably just checks: + //! refs: + //! - + //! https://bitbucket.org/mpi4py/mpi4py/src/3.0.0/demo/wrap-c/helloworld.c + //! - installed: include/mpi4py/mpi4py.MPI_api.h + // if( import_mpi4py() < 0 ) { here be dragons } + + if (comm.ptr() == Py_None) + throw std::runtime_error( + "Series: MPI communicator cannot be None."); + if (comm.ptr() == nullptr) + throw std::runtime_error( + "Series: MPI communicator is a nullptr."); + + // check type string to see if this is mpi4py + // __str__ (pretty) + // __repr__ (unambiguous) + // mpi4py: + // pyMPI: ... (TODO) + py::str const comm_pystr = py::repr(comm); + std::string const comm_str = comm_pystr.cast(); + if (comm_str.substr(0, 12) != std::string(">( + comm.get_type())) + // TODO add mpi4py version from above import check to error + // message + throw std::runtime_error( + "Series: comm has unexpected type layout in " + + comm_str + + " (Mismatched MPI at compile vs. runtime? " + "Breaking mpi4py release?)"); + + // todo other possible implementations: + // - pyMPI (inactive since 2008?): import mpi; mpi.WORLD + + // reimplementation of mpi4py's: + // MPI_Comm* mpiCommPtr = PyMPIComm_Get(comm.ptr()); + MPI_Comm *mpiCommPtr = + &((openPMD_PyMPIIntracommObject *)(comm.ptr()))->ob_mpi; + + if (PyErr_Occurred()) + throw std::runtime_error( + "Series: MPI communicator access error."); + if (mpiCommPtr == nullptr) + { + throw std::runtime_error( + "Series: MPI communicator cast failed. " + "(Mismatched MPI at compile vs. runtime?)"); + } + + return new Series(filepath, at, *mpiCommPtr, options); + }), py::arg("filepath"), py::arg("access"), py::arg("mpi_communicator"), - py::arg("options") = "{}" - ) + py::arg("options") = "{}") #endif .def_property("openPMD", &Series::openPMD, &Series::setOpenPMD) - .def_property("openPMD_extension", &Series::openPMDextension, &Series::setOpenPMDextension) + .def_property( + "openPMD_extension", + &Series::openPMDextension, + &Series::setOpenPMDextension) .def_property("base_path", &Series::basePath, &Series::setBasePath) - .def_property("meshes_path", &Series::meshesPath, &Series::setMeshesPath) - .def_property("particles_path", &Series::particlesPath, &Series::setParticlesPath) + .def_property( + "meshes_path", &Series::meshesPath, &Series::setMeshesPath) + .def_property( + "particles_path", &Series::particlesPath, &Series::setParticlesPath) .def_property("author", &Series::author, &Series::setAuthor) - .def_property("machine", + .def_property( + "machine", &Series::machine, &Series::setMachine, "Indicate the machine or relevant hardware that created the file.") .def_property_readonly("software", &Series::software) - .def("set_software", &Series::setSoftware, - py::arg("name"), py::arg("version") = std::string("unspecified")) + .def( + "set_software", + &Series::setSoftware, + py::arg("name"), + py::arg("version") = std::string("unspecified")) .def_property_readonly("software_version", &Series::softwareVersion) - .def("set_software_version", [](Series & s, std::string const& softwareVersion) { - py::print("Series.set_software_version is deprecated. Set the version with the second argument of Series.set_software"); - s.setSoftware(s.software(), softwareVersion); - }) + .def( + "set_software_version", + [](Series &s, std::string const &softwareVersion) { + py::print( + "Series.set_software_version is deprecated. Set the " + "version with the second argument of Series.set_software"); + s.setSoftware(s.software(), softwareVersion); + }) // softwareDependencies // machine .def_property("date", &Series::date, &Series::setDate) - .def_property("iteration_encoding", &Series::iterationEncoding, &Series::setIterationEncoding) - .def_property("iteration_format", &Series::iterationFormat, &Series::setIterationFormat) + .def_property( + "iteration_encoding", + &Series::iterationEncoding, + &Series::setIterationEncoding) + .def_property( + "iteration_format", + &Series::iterationFormat, + &Series::setIterationFormat) .def_property("name", &Series::name, &Series::setName) .def("flush", &Series::flush) @@ -184,12 +213,15 @@ void init_Series(py::module &m) { .def("set_iteration_format", &Series::setIterationFormat) .def("set_name", &Series::setName) - .def_readwrite("iterations", &Series::iterations, + .def_readwrite( + "iterations", + &Series::iterations, py::return_value_policy::reference, // garbage collection: return value must be freed before Series py::keep_alive<1, 0>()) .def("read_iterations", &Series::readIterations, py::keep_alive<0, 1>()) - .def("write_iterations", - &Series::writeIterations, py::keep_alive<0, 1>()) - ; + .def( + "write_iterations", + &Series::writeIterations, + py::keep_alive<0, 1>()); } diff --git a/src/binding/python/UnitDimension.cpp b/src/binding/python/UnitDimension.cpp index e1477eca25..72074cde78 100644 --- a/src/binding/python/UnitDimension.cpp +++ b/src/binding/python/UnitDimension.cpp @@ -26,8 +26,8 @@ namespace py = pybind11; using namespace openPMD; - -void init_UnitDimension(py::module &m) { +void init_UnitDimension(py::module &m) +{ py::enum_(m, "Unit_Dimension") .value("L", UnitDimension::L) .value("M", UnitDimension::M) @@ -35,6 +35,5 @@ void init_UnitDimension(py::module &m) { .value("I", UnitDimension::I) .value("theta", UnitDimension::theta) .value("N", UnitDimension::N) - .value("J", UnitDimension::J) - ; + .value("J", UnitDimension::J); } diff --git a/src/binding/python/openPMD.cpp b/src/binding/python/openPMD.cpp index 555c885f27..5133c66d21 100644 --- a/src/binding/python/openPMD.cpp +++ b/src/binding/python/openPMD.cpp @@ -24,13 +24,12 @@ #include "openPMD/config.hpp" #include "openPMD/version.hpp" -#include #include #include +#include namespace py = pybind11; - // forward declarations of exposed classes void init_Access(py::module &); void init_Attributable(py::module &); @@ -55,8 +54,8 @@ void init_RecordComponent(py::module &); void init_Series(py::module &); void init_UnitDimension(py::module &); - -PYBIND11_MODULE(openpmd_api_cxx, m) { +PYBIND11_MODULE(openpmd_api_cxx, m) +{ m.doc() = R"pbdoc( openPMD-api ----------- diff --git a/src/cli/ls.cpp b/src/cli/ls.cpp index bea296a5a2..65bb226602 100644 --- a/src/cli/ls.cpp +++ b/src/cli/ls.cpp @@ -24,14 +24,11 @@ #include #include - -int main( - int argc, - char * argv[] -) +int main(int argc, char *argv[]) { - std::vector< std::string > str_argv; - for( int i = 0; i < argc; ++i ) str_argv.emplace_back(argv[i]); + std::vector str_argv; + for (int i = 0; i < argc; ++i) + str_argv.emplace_back(argv[i]); - return openPMD::cli::ls::run( str_argv ); + return openPMD::cli::ls::run(str_argv); } diff --git a/src/config.cpp b/src/config.cpp index 06b9c58640..d37d9cc88e 100644 --- a/src/config.cpp +++ b/src/config.cpp @@ -28,23 +28,19 @@ #include #include - -std::map< std::string, bool > -openPMD::getVariants( ) +std::map openPMD::getVariants() { - return std::map< std::string, bool >{ - {"mpi", bool(openPMD_HAVE_MPI)}, - {"json", true}, - {"hdf5", bool(openPMD_HAVE_HDF5)}, - {"adios1", bool(openPMD_HAVE_ADIOS1)}, - {"adios2", bool(openPMD_HAVE_ADIOS2)} - }; + return std::map{ + {"mpi", bool(openPMD_HAVE_MPI)}, + {"json", true}, + {"hdf5", bool(openPMD_HAVE_HDF5)}, + {"adios1", bool(openPMD_HAVE_ADIOS1)}, + {"adios2", bool(openPMD_HAVE_ADIOS2)}}; } -std::vector< std::string > -openPMD::getFileExtensions() +std::vector openPMD::getFileExtensions() { - std::vector< std::string > fext; + std::vector fext; fext.emplace_back("json"); #if openPMD_HAVE_ADIOS1 || openPMD_HAVE_ADIOS2 fext.emplace_back("bp"); diff --git a/src/helper/list_series.cpp b/src/helper/list_series.cpp index da4b1f7c57..eeb7523bb4 100644 --- a/src/helper/list_series.cpp +++ b/src/helper/list_series.cpp @@ -25,99 +25,136 @@ #include "openPMD/Mesh.hpp" #include "openPMD/ParticleSpecies.hpp" -#include #include #include #include - +#include namespace openPMD::helper { - std::ostream & - listSeries( - Series & series, - bool const longer, - std::ostream & out - ) - { - out << "openPMD series: " << series.name() << "\n"; - out << "openPMD standard: " << series.openPMD() << "\n"; - out << "openPMD extensions: " << series.openPMDextension() << "\n\n"; // TODO improve listing of extensions +std::ostream &listSeries(Series &series, bool const longer, std::ostream &out) +{ + out << "openPMD series: " << series.name() << "\n"; + out << "openPMD standard: " << series.openPMD() << "\n"; + out << "openPMD extensions: " << series.openPMDextension() + << "\n\n"; // TODO improve listing of extensions - if( longer ) + if (longer) + { + out << "data author: "; + try { - out << "data author: "; - try{ out << series.author() << "\n"; } catch( no_such_attribute_error const & ) { out << "unknown\n"; } - out << "data created: "; - try{ out << series.date() << "\n"; } catch( no_such_attribute_error const & ) { out << "unknown\n"; } - out << "data backend: " << series.backend() << "\n"; - out << "generating machine: "; - try{ out << series.machine() << "\n"; } catch( no_such_attribute_error const & ) { out << "unknown\n"; } - out << "generating software: "; - try{ out << series.software(); } catch( no_such_attribute_error const & ) { out << "unknown"; } - out << " (version: "; - try{ out << series.softwareVersion() << ")\n"; } catch( no_such_attribute_error const & ) { out << "unknown)\n"; } - out << "generating software dependencies: "; - try{ out << series.softwareDependencies() << "\n"; } catch( no_such_attribute_error const & ) { out << "unknown\n"; } - - out << "\n"; + out << series.author() << "\n"; } - - std::set< std::string > meshes; //! unique mesh names in all iterations - std::set< std::string > particles; //! unique particle species names in all iterations - - out << "number of iterations: " << series.iterations.size(); - if( longer ) - out << " (" << series.iterationEncoding() << ")"; - out << "\n"; - if( series.iterations.size() > 0u ) + catch (no_such_attribute_error const &) { - if( longer ) - out << " all iterations: "; - - for( auto const& i : series.readIterations() ) { - if( longer ) - out << i.iterationIndex << " "; - - // find unique record names - std::transform( - i.meshes.begin(), - i.meshes.end(), - std::inserter( meshes, meshes.end() ), - []( std::pair< std::string, Mesh > const & p ) - { return p.first; } - ); - std::transform( - i.particles.begin(), - i.particles.end(), - std::inserter( particles, particles.end() ), - []( std::pair< std::string, ParticleSpecies > const & p ) - { return p.first; } - ); - } - - if( longer ) - out << "\n"; + out << "unknown\n"; } - - out << "\n"; - out << "number of meshes: " << meshes.size() << "\n"; - if( longer && meshes.size() > 0u ) + out << "data created: "; + try + { + out << series.date() << "\n"; + } + catch (no_such_attribute_error const &) + { + out << "unknown\n"; + } + out << "data backend: " << series.backend() << "\n"; + out << "generating machine: "; + try + { + out << series.machine() << "\n"; + } + catch (no_such_attribute_error const &) + { + out << "unknown\n"; + } + out << "generating software: "; + try { - out << " all meshes:\n"; - for( auto const& m : meshes ) - out << " " << m << "\n"; + out << series.software(); + } + catch (no_such_attribute_error const &) + { + out << "unknown"; + } + out << " (version: "; + try + { + out << series.softwareVersion() << ")\n"; + } + catch (no_such_attribute_error const &) + { + out << "unknown)\n"; + } + out << "generating software dependencies: "; + try + { + out << series.softwareDependencies() << "\n"; + } + catch (no_such_attribute_error const &) + { + out << "unknown\n"; } out << "\n"; - out << "number of particle species: " << particles.size() << "\n"; - if( longer && particles.size() > 0u ) + } + + std::set meshes; //! unique mesh names in all iterations + std::set + particles; //! unique particle species names in all iterations + + out << "number of iterations: " << series.iterations.size(); + if (longer) + out << " (" << series.iterationEncoding() << ")"; + out << "\n"; + if (series.iterations.size() > 0u) + { + if (longer) + out << " all iterations: "; + + for (auto const &i : series.readIterations()) { - out << " all particle species:\n"; - for( auto const& p : particles ) - out << " " << p << "\n"; + if (longer) + out << i.iterationIndex << " "; + + // find unique record names + std::transform( + i.meshes.begin(), + i.meshes.end(), + std::inserter(meshes, meshes.end()), + [](std::pair const &p) { return p.first; }); + std::transform( + i.particles.begin(), + i.particles.end(), + std::inserter(particles, particles.end()), + [](std::pair const &p) { + return p.first; + }); } - return out; + if (longer) + out << "\n"; + } + + out << "\n"; + out << "number of meshes: " << meshes.size() << "\n"; + if (longer && meshes.size() > 0u) + { + out << " all meshes:\n"; + for (auto const &m : meshes) + out << " " << m << "\n"; + } + + out << "\n"; + out << "number of particle species: " << particles.size() << "\n"; + if (longer && particles.size() > 0u) + { + out << " all particle species:\n"; + for (auto const &p : particles) + out << " " << p << "\n"; } -} // openPMD + + return out; +} +} // namespace openPMD::helper diff --git a/src/version.cpp b/src/version.cpp index 7cba20f98d..6fa0a9ecfa 100644 --- a/src/version.cpp +++ b/src/version.cpp @@ -23,33 +23,27 @@ #include #include - -std::string -openPMD::getVersion( ) +std::string openPMD::getVersion() { std::stringstream api; - api << OPENPMDAPI_VERSION_MAJOR << "." - << OPENPMDAPI_VERSION_MINOR << "." + api << OPENPMDAPI_VERSION_MAJOR << "." << OPENPMDAPI_VERSION_MINOR << "." << OPENPMDAPI_VERSION_PATCH; - if( std::string( OPENPMDAPI_VERSION_LABEL ).size() > 0 ) + if (std::string(OPENPMDAPI_VERSION_LABEL).size() > 0) api << "-" << OPENPMDAPI_VERSION_LABEL; std::string const apistr = api.str(); return apistr; } -std::string -openPMD::getStandard( ) +std::string openPMD::getStandard() { std::stringstream standard; - standard << OPENPMD_STANDARD_MAJOR << "." - << OPENPMD_STANDARD_MINOR << "." + standard << OPENPMD_STANDARD_MAJOR << "." << OPENPMD_STANDARD_MINOR << "." << OPENPMD_STANDARD_PATCH; std::string const standardstr = standard.str(); return standardstr; } -std::string -openPMD::getStandardMinimum( ) +std::string openPMD::getStandardMinimum() { std::stringstream standardMin; standardMin << OPENPMD_STANDARD_MIN_MAJOR << "." diff --git a/test/AuxiliaryTest.cpp b/test/AuxiliaryTest.cpp index c56fbb84d8..4a67507cbd 100644 --- a/test/AuxiliaryTest.cpp +++ b/test/AuxiliaryTest.cpp @@ -1,19 +1,19 @@ // expose private and protected members for invasive testing #if openPMD_USE_INVASIVE_TESTS -# define OPENPMD_private public -# define OPENPMD_protected public +#define OPENPMD_private public: +#define OPENPMD_protected public: #endif -#include "openPMD/config.hpp" -#include "openPMD/backend/Writable.hpp" -#include "openPMD/backend/Attributable.hpp" -#include "openPMD/backend/Container.hpp" +#include "openPMD/Dataset.hpp" +#include "openPMD/IO/AbstractIOHandler.hpp" +#include "openPMD/IO/AbstractIOHandlerHelper.hpp" #include "openPMD/auxiliary/DerefDynamicCast.hpp" #include "openPMD/auxiliary/Filesystem.hpp" #include "openPMD/auxiliary/StringManip.hpp" #include "openPMD/auxiliary/Variant.hpp" -#include "openPMD/IO/AbstractIOHandler.hpp" -#include "openPMD/IO/AbstractIOHandlerHelper.hpp" -#include "openPMD/Dataset.hpp" +#include "openPMD/backend/Attributable.hpp" +#include "openPMD/backend/Container.hpp" +#include "openPMD/backend/Writable.hpp" +#include "openPMD/config.hpp" #include @@ -22,51 +22,65 @@ #include #include #include -#include #include +#include using namespace openPMD; - namespace openPMD::test { struct TestHelper : public Attributable { TestHelper() { - writable().IOHandler = createIOHandler(".", Access::CREATE, Format::JSON); + writable().IOHandler = + createIOHandler(".", Access::CREATE, Format::JSON); } }; -} // openPMD - +} // namespace openPMD::test -TEST_CASE( "deref_cast_test", "[auxiliary]" ) { +TEST_CASE("deref_cast_test", "[auxiliary]") +{ using namespace auxiliary; - struct A { double m_x; A(double x) : m_x(x){} virtual ~A() = default; }; - struct B : virtual A { B(double x) : A(x){}}; - struct C { float m_x; }; + struct A + { + double m_x; + A(double x) : m_x(x) + {} + virtual ~A() = default; + }; + struct B : virtual A + { + B(double x) : A(x) + {} + }; + struct C + { + float m_x; + }; B const value = {123.45}; - B const * const ptr = &value; + B const *const ptr = &value; auto const a = deref_dynamic_cast(ptr); - auto const& ra = deref_dynamic_cast(ptr); + auto const &ra = deref_dynamic_cast(ptr); (void)a; (void)ra; REQUIRE_THROWS_AS(deref_dynamic_cast(ptr), std::runtime_error); - A * const nptr = nullptr; + A *const nptr = nullptr; REQUIRE_THROWS_AS(deref_dynamic_cast(nptr), std::runtime_error); } -TEST_CASE( "string_test", "[auxiliary]" ) +TEST_CASE("string_test", "[auxiliary]") { using namespace auxiliary; - std::string s = "Man muss noch Chaos in sich haben, " - "um einen tanzenden Stern gebaeren zu koennen."; + std::string s = + "Man muss noch Chaos in sich haben, " + "um einen tanzenden Stern gebaeren zu koennen."; REQUIRE(starts_with(s, 'M')); REQUIRE(starts_with(s, "Man")); REQUIRE(starts_with(s, "Man muss noch")); @@ -84,24 +98,32 @@ TEST_CASE( "string_test", "[auxiliary]" ) REQUIRE("String" == replace_first("string", "s", "S")); REQUIRE("sTRING" == replace_first("string", "tring", "TRING")); REQUIRE("string" == replace_first("string", " ", "_")); - REQUIRE("strinGstringstring" == replace_first("stringstringstring", "g", "G")); - REQUIRE("#stringstring" == replace_first("stringstringstring", "string", "#")); + REQUIRE( + "strinGstringstring" == replace_first("stringstringstring", "g", "G")); + REQUIRE( + "#stringstring" == replace_first("stringstringstring", "string", "#")); - REQUIRE("stringstringstrinG" == replace_last("stringstringstring", "g", "G")); - REQUIRE("stringstring#" == replace_last("stringstringstring", "string", "#")); + REQUIRE( + "stringstringstrinG" == replace_last("stringstringstring", "g", "G")); + REQUIRE( + "stringstring#" == replace_last("stringstringstring", "string", "#")); REQUIRE("/normal/path" == replace_all("////normal//////path", "//", "/")); - std::vector< std::string > expected1{"0", "string", " ", "1234", "te st"}; - std::vector< std::string > expected2{"0_DELIM_", "string_DELIM_", " _DELIM_", "1234_DELIM_", "te st_DELIM_"}; - std::vector< std::string > expected3{"path", "to", "relevant", "data"}; - std::string s2 = "_DELIM_0_DELIM_string_DELIM_ _DELIM_1234_DELIM_te st_DELIM_"; + std::vector expected1{"0", "string", " ", "1234", "te st"}; + std::vector expected2{ + "0_DELIM_", "string_DELIM_", " _DELIM_", "1234_DELIM_", "te st_DELIM_"}; + std::vector expected3{"path", "to", "relevant", "data"}; + std::string s2 = + "_DELIM_0_DELIM_string_DELIM_ _DELIM_1234_DELIM_te st_DELIM_"; REQUIRE(expected1 == split(s2, "_DELIM_", false)); REQUIRE(expected2 == split(s2, "_DELIM_", true)); REQUIRE(expected3 == split("/path/to/relevant/data/", "/")); - REQUIRE("stringstringstring" == strip("\t string\tstring string\0", { '\0', '\t', ' '})); - REQUIRE("stringstringstring" == strip("stringstringstring", { })); + REQUIRE( + "stringstringstring" == + strip("\t string\tstring string\0", {'\0', '\t', ' '})); + REQUIRE("stringstringstring" == strip("stringstringstring", {})); REQUIRE("1,2,3,4" == join({"1", "2", "3", "4"}, ",")); REQUIRE("1234" == join({"1", "2", "3", "4"}, "")); @@ -115,16 +137,15 @@ namespace openPMD::test { struct S : public TestHelper { - S() - : TestHelper() - { } + S() : TestHelper() + {} }; -} // openPMD +} // namespace openPMD::test -TEST_CASE( "container_default_test", "[auxiliary]") +TEST_CASE("container_default_test", "[auxiliary]") { #if openPMD_USE_INVASIVE_TESTS - Container< openPMD::test::S > c = Container< openPMD::test::S >(); + Container c = Container(); c.writable().IOHandler = createIOHandler(".", Access::CREATE, Format::JSON); REQUIRE(c.empty()); @@ -138,28 +159,37 @@ namespace openPMD::test { struct structure : public TestHelper { - structure() - : TestHelper() - { } + structure() : TestHelper() + {} std::string string_ = "Hello, world!"; int int_ = 42; float float_ = 3.14f; - [[nodiscard]] std::string text() const { return std::get< std::string >(getAttribute("text").getResource()); } - structure& setText(std::string newText) { setAttribute("text", newText); return *this; } + [[nodiscard]] std::string text() const + { + return std::get(getAttribute("text").getResource()); + } + structure &setText(std::string newText) + { + setAttribute("text", newText); + return *this; + } }; -} // openPMD +} // namespace openPMD::test -TEST_CASE( "container_retrieve_test", "[auxiliary]" ) +TEST_CASE("container_retrieve_test", "[auxiliary]") { #if openPMD_USE_INVASIVE_TESTS using structure = openPMD::test::structure; - Container< structure > c = Container< structure >(); + Container c = Container(); c.writable().IOHandler = createIOHandler(".", Access::CREATE, Format::JSON); structure s; - std::string text = "The openPMD standard, short for open standard for particle-mesh data files is not a file format per se. It is a standard for meta data and naming schemes."; + std::string text = + "The openPMD standard, short for open standard for particle-mesh data " + "files is not a file format per se. It is a standard for meta data and " + "naming schemes."; s.setText(text); c["entry"] = s; REQUIRE(c["entry"].string_ == "Hello, world!"); @@ -168,7 +198,6 @@ TEST_CASE( "container_retrieve_test", "[auxiliary]" ) REQUIRE(c["entry"].text() == text); REQUIRE(s.text() == text); - structure s2 = c["entry"]; REQUIRE(s2.string_ == "Hello, world!"); REQUIRE(s2.int_ == 42); @@ -176,7 +205,6 @@ TEST_CASE( "container_retrieve_test", "[auxiliary]" ) REQUIRE(s2.text() == text); REQUIRE(c["entry"].text() == text); - s2.string_ = "New string"; s2.int_ = -1; s2.float_ = 0.0f; @@ -214,21 +242,19 @@ namespace openPMD::test { struct Widget : public TestHelper { - Widget() - : TestHelper() - { } + Widget() : TestHelper() + {} - Widget(int) - : TestHelper() - { } + Widget(int) : TestHelper() + {} }; -} // openPMD +} // namespace openPMD::test -TEST_CASE( "container_access_test", "[auxiliary]" ) +TEST_CASE("container_access_test", "[auxiliary]") { #if openPMD_USE_INVASIVE_TESTS using Widget = openPMD::test::Widget; - Container< Widget > c = Container< Widget >(); + Container c = Container(); c.writable().IOHandler = createIOHandler(".", Access::CREATE, Format::JSON); c["1firstWidget"] = Widget(0); @@ -261,7 +287,7 @@ TEST_CASE( "container_access_test", "[auxiliary]" ) #endif } -TEST_CASE( "attributable_default_test", "[auxiliary]" ) +TEST_CASE("attributable_default_test", "[auxiliary]") { Attributable a; @@ -272,35 +298,34 @@ namespace openPMD::test { struct AttributedWidget : public TestHelper { - AttributedWidget() - : TestHelper() - { } + AttributedWidget() : TestHelper() + {} Attribute::resource get(std::string key) { return getAttribute(key).getResource(); } }; -} // openPMD +} // namespace openPMD::test -TEST_CASE( "attributable_access_test", "[auxiliary]" ) +TEST_CASE("attributable_access_test", "[auxiliary]") { using AttributedWidget = openPMD::test::AttributedWidget; AttributedWidget a = AttributedWidget(); a.setAttribute("key", std::string("value")); REQUIRE(a.numAttributes() == 1); - REQUIRE(std::get< std::string >(a.get("key")) == "value"); + REQUIRE(std::get(a.get("key")) == "value"); a.setAttribute("key", std::string("newValue")); REQUIRE(a.numAttributes() == 1); - REQUIRE(std::get< std::string >(a.get("key")) == "newValue"); + REQUIRE(std::get(a.get("key")) == "newValue"); - using array_t = std::array< double, 7 >; + using array_t = std::array; array_t arr{{1, 2, 3, 4, 5, 6, 7}}; a.setAttribute("array", arr); REQUIRE(a.numAttributes() == 2); - REQUIRE(std::get< array_t >(a.get("array")) == arr); + REQUIRE(std::get(a.get("array")) == arr); REQUIRE(a.deleteAttribute("nonExistentKey") == false); REQUIRE(a.numAttributes() == 2); REQUIRE(a.deleteAttribute("key") == true); @@ -317,24 +342,44 @@ namespace openPMD::test { struct Dotty : public TestHelper { - Dotty() - : TestHelper() + Dotty() : TestHelper() { setAtt1(1); setAtt2(2); setAtt3("3"); } - [[nodiscard]] int att1() const { return std::get< int >(getAttribute("att1").getResource()); } - [[nodiscard]] double att2() const { return std::get< double >(getAttribute("att2").getResource()); } - [[nodiscard]] std::string att3() const { return std::get< std::string >(getAttribute("att3").getResource()); } - Dotty& setAtt1(int i) { setAttribute("att1", i); return *this; } - Dotty& setAtt2(double d) { setAttribute("att2", d); return *this; } - Dotty& setAtt3(std::string s) { setAttribute("att3", s); return *this; } + [[nodiscard]] int att1() const + { + return std::get(getAttribute("att1").getResource()); + } + [[nodiscard]] double att2() const + { + return std::get(getAttribute("att2").getResource()); + } + [[nodiscard]] std::string att3() const + { + return std::get(getAttribute("att3").getResource()); + } + Dotty &setAtt1(int i) + { + setAttribute("att1", i); + return *this; + } + Dotty &setAtt2(double d) + { + setAttribute("att2", d); + return *this; + } + Dotty &setAtt3(std::string s) + { + setAttribute("att3", s); + return *this; + } }; -} // openPMD +} // namespace openPMD::test -TEST_CASE( "dot_test", "[auxiliary]" ) +TEST_CASE("dot_test", "[auxiliary]") { openPMD::test::Dotty d; REQUIRE(d.att1() == 1); @@ -347,7 +392,7 @@ TEST_CASE( "dot_test", "[auxiliary]" ) REQUIRE(d.att3() == "30"); } -TEST_CASE( "filesystem_test", "[auxiliary]" ) +TEST_CASE("filesystem_test", "[auxiliary]") { using auxiliary::create_directories; using auxiliary::file_exists; @@ -356,27 +401,25 @@ TEST_CASE( "filesystem_test", "[auxiliary]" ) using auxiliary::remove_directory; using auxiliary::remove_file; - auto contains = - [](std::vector< std::string > const & entries, std::string const & path) -> bool - { return std::find(entries.cbegin(), entries.cend(), path) != entries.cend(); }; - - auto random_string = - [](std::string::size_type length) -> std::string - { - auto randchar = - []() -> char - { - char const charset[] = - "0123456789" - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "abcdefghijklmnopqrstuvwxyz"; - size_t const max_index = (sizeof(charset) - 1); - return charset[rand() % max_index]; - }; - std::string str(length, 0); - std::generate_n(str.begin(), length, randchar); - return str; + auto contains = [](std::vector const &entries, + std::string const &path) -> bool { + return std::find(entries.cbegin(), entries.cend(), path) != + entries.cend(); + }; + + auto random_string = [](std::string::size_type length) -> std::string { + auto randchar = []() -> char { + char const charset[] = + "0123456789" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "abcdefghijklmnopqrstuvwxyz"; + size_t const max_index = (sizeof(charset) - 1); + return charset[rand() % max_index]; }; + std::string str(length, 0); + std::generate_n(str.begin(), length, randchar); + return str; + }; #ifdef _WIN32 REQUIRE(directory_exists("C:\\")); @@ -391,7 +434,7 @@ TEST_CASE( "filesystem_test", "[auxiliary]" ) REQUIRE(!contains(dir_entries, "nonexistent_folder_in_C_drive")); std::string new_directory = random_string(10); - while( directory_exists(new_directory) ) + while (directory_exists(new_directory)) new_directory = random_string(10); REQUIRE(create_directories(new_directory)); REQUIRE(create_directories(new_directory)); @@ -413,9 +456,9 @@ TEST_CASE( "filesystem_test", "[auxiliary]" ) REQUIRE(!remove_file(".\\nonexistent_file_in_cmake_bin_directory")); #else REQUIRE(directory_exists("/")); - //REQUIRE(directory_exists("/boot")); - //REQUIRE(directory_exists("/etc")); - //REQUIRE(directory_exists("/home")); + // REQUIRE(directory_exists("/boot")); + // REQUIRE(directory_exists("/etc")); + // REQUIRE(directory_exists("/home")); REQUIRE(!directory_exists("/nonexistent_folder_in_root_directory")); REQUIRE(directory_exists("../bin")); @@ -424,14 +467,14 @@ TEST_CASE( "filesystem_test", "[auxiliary]" ) auto dir_entries = list_directory("/"); REQUIRE(!dir_entries.empty()); - //REQUIRE(contains(dir_entries, "boot")); - //REQUIRE(contains(dir_entries, "etc")); - //REQUIRE(contains(dir_entries, "home")); - //REQUIRE(contains(dir_entries, "root")); + // REQUIRE(contains(dir_entries, "boot")); + // REQUIRE(contains(dir_entries, "etc")); + // REQUIRE(contains(dir_entries, "home")); + // REQUIRE(contains(dir_entries, "root")); REQUIRE(!contains(dir_entries, "nonexistent_folder_in_root_directory")); std::string new_directory = random_string(10); - while( directory_exists(new_directory) ) + while (directory_exists(new_directory)) new_directory = random_string(10); std::string new_sub_directory = new_directory + "/" + random_string(10); REQUIRE(create_directories(new_sub_directory)); diff --git a/test/CatchRunner.cpp b/test/CatchRunner.cpp index 3603e6ef70..d24a5b27e6 100644 --- a/test/CatchRunner.cpp +++ b/test/CatchRunner.cpp @@ -2,7 +2,7 @@ #include #if openPMD_HAVE_MPI -# include +#include int main(int argc, char *argv[]) { @@ -12,9 +12,9 @@ int main(int argc, char *argv[]) int result = 0; { // Indicates a command line parsing - result = session.applyCommandLine( argc, argv ); + result = session.applyCommandLine(argc, argv); // RT tests - if( result == 0 ) + if (result == 0) result = session.run(); } MPI_Finalize(); @@ -27,9 +27,9 @@ int main(int argc, char *argv[]) int result = 0; { // Indicates a command line parsing - result = session.applyCommandLine( argc, argv ); + result = session.applyCommandLine(argc, argv); // RT tests - if( result == 0 ) + if (result == 0) result = session.run(); } return result; diff --git a/test/CoreTest.cpp b/test/CoreTest.cpp index 9677aec2be..d2eb185714 100644 --- a/test/CoreTest.cpp +++ b/test/CoreTest.cpp @@ -1,7 +1,7 @@ // expose private and protected members for invasive testing #if openPMD_USE_INVASIVE_TESTS -# define OPENPMD_private public -# define OPENPMD_protected public +#define OPENPMD_private public: +#define OPENPMD_protected public: #endif #include "openPMD/openPMD.hpp" @@ -24,128 +24,128 @@ using namespace openPMD; -TEST_CASE( "versions_test", "[core]" ) +TEST_CASE("versions_test", "[core]") { - auto const apiVersion = getVersion( ); - auto const is_dot = []( char const c ){ return c == '.'; }; + auto const apiVersion = getVersion(); + auto const is_dot = [](char const c) { return c == '.'; }; REQUIRE(2u == std::count_if(apiVersion.begin(), apiVersion.end(), is_dot)); - auto const standard = getStandard( ); + auto const standard = getStandard(); REQUIRE(standard == "1.1.0"); - auto const standardMin = getStandardMinimum( ); + auto const standardMin = getStandardMinimum(); REQUIRE(standardMin == "1.0.0"); - auto const featureVariants = getVariants( ); + auto const featureVariants = getVariants(); REQUIRE(featureVariants.at("json") == true); } -TEST_CASE( "attribute_dtype_test", "[core]" ) +TEST_CASE("attribute_dtype_test", "[core]") { - Attribute a = Attribute(static_cast< char >(' ')); + Attribute a = Attribute(static_cast(' ')); REQUIRE(Datatype::CHAR == a.dtype); - a = Attribute(static_cast< unsigned char >(' ')); + a = Attribute(static_cast(' ')); REQUIRE(Datatype::UCHAR == a.dtype); - a = Attribute(static_cast< short >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::SHORT == a.dtype); - a = Attribute(static_cast< int >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::INT == a.dtype); - a = Attribute(static_cast< long >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::LONG == a.dtype); - a = Attribute(static_cast< long long >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::LONGLONG == a.dtype); - a = Attribute(static_cast< unsigned short >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::USHORT == a.dtype); - a = Attribute(static_cast< unsigned int >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::UINT == a.dtype); - a = Attribute(static_cast< unsigned long >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::ULONG == a.dtype); - a = Attribute(static_cast< unsigned long long >(0)); + a = Attribute(static_cast(0)); REQUIRE(Datatype::ULONGLONG == a.dtype); - a = Attribute(static_cast< float >(0.)); + a = Attribute(static_cast(0.)); REQUIRE(Datatype::FLOAT == a.dtype); - a = Attribute(static_cast< double >(0.)); + a = Attribute(static_cast(0.)); REQUIRE(Datatype::DOUBLE == a.dtype); - a = Attribute(static_cast< long double >(0.)); + a = Attribute(static_cast(0.)); REQUIRE(Datatype::LONG_DOUBLE == a.dtype); - a = Attribute(static_cast< std::complex< float > >(0.)); + a = Attribute(static_cast>(0.)); REQUIRE(Datatype::CFLOAT == a.dtype); - a = Attribute(static_cast< std::complex< double > >(0.)); + a = Attribute(static_cast>(0.)); REQUIRE(Datatype::CDOUBLE == a.dtype); - a = Attribute(static_cast< std::complex< long double > >(0.)); + a = Attribute(static_cast>(0.)); REQUIRE(Datatype::CLONG_DOUBLE == a.dtype); a = Attribute(std::string("")); REQUIRE(Datatype::STRING == a.dtype); - a = Attribute(std::vector< char >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_CHAR == a.dtype); - a = Attribute(std::vector< short >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_SHORT == a.dtype); - a = Attribute(std::vector< int >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_INT == a.dtype); - a = Attribute(std::vector< long >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_LONG == a.dtype); - a = Attribute(std::vector< long long >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_LONGLONG == a.dtype); - a = Attribute(std::vector< unsigned char >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_UCHAR == a.dtype); - a = Attribute(std::vector< unsigned short >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_USHORT == a.dtype); - a = Attribute(std::vector< unsigned int >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_UINT == a.dtype); - a = Attribute(std::vector< unsigned long >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_ULONG == a.dtype); - a = Attribute(std::vector< unsigned long long >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_ULONGLONG == a.dtype); - a = Attribute(std::vector< float >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_FLOAT == a.dtype); - a = Attribute(std::vector< double >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_DOUBLE == a.dtype); - a = Attribute(std::vector< long double >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_LONG_DOUBLE == a.dtype); - a = Attribute(std::vector< std::string >()); + a = Attribute(std::vector()); REQUIRE(Datatype::VEC_STRING == a.dtype); - a = Attribute(std::array< double, 7 >()); + a = Attribute(std::array()); REQUIRE(Datatype::ARR_DBL_7 == a.dtype); - a = Attribute(static_cast< bool >(false)); + a = Attribute(static_cast(false)); REQUIRE(Datatype::BOOL == a.dtype); // fixed size integers - a = Attribute(static_cast< int16_t >(0)); - REQUIRE(determineDatatype< int16_t >() == a.dtype); - a = Attribute(static_cast< int32_t >(0)); - REQUIRE(determineDatatype< int32_t >() == a.dtype); - a = Attribute(static_cast< int64_t >(0)); - REQUIRE(determineDatatype< int64_t >() == a.dtype); - a = Attribute(static_cast< uint16_t >(0)); - REQUIRE(determineDatatype< uint16_t >() == a.dtype); - a = Attribute(static_cast< uint32_t >(0)); - REQUIRE(determineDatatype< uint32_t >() == a.dtype); - a = Attribute(static_cast< uint64_t >(0)); - REQUIRE(determineDatatype< uint64_t >() == a.dtype); + a = Attribute(static_cast(0)); + REQUIRE(determineDatatype() == a.dtype); + a = Attribute(static_cast(0)); + REQUIRE(determineDatatype() == a.dtype); + a = Attribute(static_cast(0)); + REQUIRE(determineDatatype() == a.dtype); + a = Attribute(static_cast(0)); + REQUIRE(determineDatatype() == a.dtype); + a = Attribute(static_cast(0)); + REQUIRE(determineDatatype() == a.dtype); + a = Attribute(static_cast(0)); + REQUIRE(determineDatatype() == a.dtype); // TODO fixed size floats // same implementation types (not necessary aliases) detection - if( sizeof(long) == sizeof(long long) ) + if (sizeof(long) == sizeof(long long)) { - a = Attribute(static_cast< long >(0)); + a = Attribute(static_cast(0)); REQUIRE(isSame(Datatype::LONGLONG, a.dtype)); #if !defined(_MSC_VER) REQUIRE(Datatype::LONGLONG == a.dtype); #endif - a = Attribute(static_cast< long long >(0)); + a = Attribute(static_cast(0)); REQUIRE(isSame(Datatype::LONG, a.dtype)); #if !defined(_MSC_VER) REQUIRE(Datatype::LONG == a.dtype); #endif } - if( sizeof(int) == sizeof(long) ) + if (sizeof(int) == sizeof(long)) { - a = Attribute(static_cast< long >(0)); + a = Attribute(static_cast(0)); REQUIRE(isSame(Datatype::INT, a.dtype)); #if !defined(_MSC_VER) REQUIRE(Datatype::INT == a.dtype); #endif - a = Attribute(static_cast< int >(0)); + a = Attribute(static_cast(0)); REQUIRE(isSame(Datatype::LONG, a.dtype)); #if !defined(_MSC_VER) REQUIRE(Datatype::LONG == a.dtype); @@ -153,116 +153,111 @@ TEST_CASE( "attribute_dtype_test", "[core]" ) } } -TEST_CASE( "myPath", "[core]" ) +TEST_CASE("myPath", "[core]") { #if openPMD_USE_INVASIVE_TESTS - using vec_t = std::vector< std::string >; - auto pathOf = []( Attributable & attr ) - { + using vec_t = std::vector; + auto pathOf = [](Attributable &attr) { auto res = attr.myPath(); #if false std::cout << "Directory:\t" << res.directory << "\nSeries name:\t" << res.seriesName << "\nSeries ext:\t" << res.seriesExtension << std::endl; #endif - REQUIRE( res.directory == "../samples/" ); - REQUIRE( res.seriesName == "myPath" ); - REQUIRE( res.seriesExtension == ".json" ); - REQUIRE( res.filePath() == "../samples/myPath.json" ); + REQUIRE(res.directory == "../samples/"); + REQUIRE(res.seriesName == "myPath"); + REQUIRE(res.seriesExtension == ".json"); + REQUIRE(res.filePath() == "../samples/myPath.json"); return res.group; }; - Series series( "../samples/myPath.json", Access::CREATE ); - REQUIRE( pathOf( series ) == vec_t{} ); - auto iteration = series.iterations[ 1234 ]; - REQUIRE( pathOf( iteration ) == vec_t{ "iterations", "1234" } ); + Series series("../samples/myPath.json", Access::CREATE); + REQUIRE(pathOf(series) == vec_t{}); + auto iteration = series.iterations[1234]; + REQUIRE(pathOf(iteration) == vec_t{"iterations", "1234"}); - auto writeSomething = []( auto & recordComponent ) - { - recordComponent.resetDataset( { Datatype::INT, { 100 } } ); - recordComponent.template makeConstant< int >( 5678 ); + auto writeSomething = [](auto &recordComponent) { + recordComponent.resetDataset({Datatype::INT, {100}}); + recordComponent.template makeConstant(5678); }; - REQUIRE( - pathOf( iteration.meshes ) == vec_t{ "iterations", "1234", "meshes" } ); + REQUIRE(pathOf(iteration.meshes) == vec_t{"iterations", "1234", "meshes"}); - auto scalarMesh = iteration.meshes[ "e_chargeDensity" ]; + auto scalarMesh = iteration.meshes["e_chargeDensity"]; REQUIRE( - pathOf( scalarMesh ) == - vec_t{ "iterations", "1234", "meshes", "e_chargeDensity" } ); - auto scalarMeshComponent = scalarMesh[ RecordComponent::SCALAR ]; + pathOf(scalarMesh) == + vec_t{"iterations", "1234", "meshes", "e_chargeDensity"}); + auto scalarMeshComponent = scalarMesh[RecordComponent::SCALAR]; REQUIRE( - pathOf( scalarMeshComponent ) == + pathOf(scalarMeshComponent) == vec_t{ "iterations", "1234", "meshes", "e_chargeDensity", - RecordComponent::SCALAR } ); - writeSomething( scalarMeshComponent ); + RecordComponent::SCALAR}); + writeSomething(scalarMeshComponent); - auto vectorMesh = iteration.meshes[ "E" ]; - REQUIRE( - pathOf( vectorMesh ) == vec_t{ "iterations", "1234", "meshes", "E" } ); - auto vectorMeshComponent = vectorMesh[ "x" ]; + auto vectorMesh = iteration.meshes["E"]; + REQUIRE(pathOf(vectorMesh) == vec_t{"iterations", "1234", "meshes", "E"}); + auto vectorMeshComponent = vectorMesh["x"]; REQUIRE( - pathOf( vectorMeshComponent ) == - vec_t{ "iterations", "1234", "meshes", "E", "x" } ); + pathOf(vectorMeshComponent) == + vec_t{"iterations", "1234", "meshes", "E", "x"}); REQUIRE( - pathOf( iteration.particles ) == - vec_t{ "iterations", "1234", "particles" } ); + pathOf(iteration.particles) == + vec_t{"iterations", "1234", "particles"}); - auto speciesE = iteration.particles[ "e" ]; - REQUIRE( - pathOf( speciesE ) == vec_t{ "iterations", "1234", "particles", "e" } ); + auto speciesE = iteration.particles["e"]; + REQUIRE(pathOf(speciesE) == vec_t{"iterations", "1234", "particles", "e"}); - auto speciesPosition = speciesE[ "position" ]; + auto speciesPosition = speciesE["position"]; REQUIRE( - pathOf( speciesPosition ) == - vec_t{ "iterations", "1234", "particles", "e", "position" } ); + pathOf(speciesPosition) == + vec_t{"iterations", "1234", "particles", "e", "position"}); - auto speciesPositionX = speciesPosition[ "x" ]; + auto speciesPositionX = speciesPosition["x"]; REQUIRE( - pathOf( speciesPositionX ) == - vec_t{ "iterations", "1234", "particles", "e", "position", "x" } ); - writeSomething( speciesPositionX ); + pathOf(speciesPositionX) == + vec_t{"iterations", "1234", "particles", "e", "position", "x"}); + writeSomething(speciesPositionX); - auto speciesWeighting = speciesE[ "weighting" ]; + auto speciesWeighting = speciesE["weighting"]; REQUIRE( - pathOf( speciesWeighting ) == - vec_t{ "iterations", "1234", "particles", "e", "weighting" } ); + pathOf(speciesWeighting) == + vec_t{"iterations", "1234", "particles", "e", "weighting"}); - auto speciesWeightingX = speciesWeighting[ RecordComponent::SCALAR ]; + auto speciesWeightingX = speciesWeighting[RecordComponent::SCALAR]; REQUIRE( - pathOf( speciesWeightingX ) == + pathOf(speciesWeightingX) == vec_t{ "iterations", "1234", "particles", "e", "weighting", - RecordComponent::SCALAR } ); - writeSomething( speciesWeightingX ); + RecordComponent::SCALAR}); + writeSomething(speciesWeightingX); REQUIRE( - pathOf( speciesE.particlePatches ) == - vec_t{ "iterations", "1234", "particles", "e", "particlePatches" } ); + pathOf(speciesE.particlePatches) == + vec_t{"iterations", "1234", "particles", "e", "particlePatches"}); - auto patchExtent = speciesE.particlePatches[ "extent" ]; + auto patchExtent = speciesE.particlePatches["extent"]; REQUIRE( - pathOf( patchExtent ) == + pathOf(patchExtent) == vec_t{ "iterations", "1234", "particles", "e", "particlePatches", - "extent" } ); + "extent"}); - auto patchExtentX = patchExtent[ "x" ]; + auto patchExtentX = patchExtent["x"]; REQUIRE( - pathOf( patchExtentX ) == + pathOf(patchExtentX) == vec_t{ "iterations", "1234", @@ -270,23 +265,23 @@ TEST_CASE( "myPath", "[core]" ) "e", "particlePatches", "extent", - "x" } ); + "x"}); - auto patchNumParticles = speciesE.particlePatches[ "numParticles" ]; + auto patchNumParticles = speciesE.particlePatches["numParticles"]; REQUIRE( - pathOf( patchNumParticles ) == + pathOf(patchNumParticles) == vec_t{ "iterations", "1234", "particles", "e", "particlePatches", - "numParticles" } ); + "numParticles"}); auto patchNumParticlesComponent = - patchNumParticles[ RecordComponent::SCALAR ]; + patchNumParticles[RecordComponent::SCALAR]; REQUIRE( - pathOf( patchNumParticlesComponent ) == + pathOf(patchNumParticlesComponent) == vec_t{ "iterations", "1234", @@ -294,11 +289,11 @@ TEST_CASE( "myPath", "[core]" ) "e", "particlePatches", "numParticles", - RecordComponent::SCALAR } ); + RecordComponent::SCALAR}); #endif } -TEST_CASE( "output_default_test", "[core]" ) +TEST_CASE("output_default_test", "[core]") { using IE = IterationEncoding; Series o = Series("./new_openpmd_output_%T.json", Access::CREATE); @@ -309,20 +304,24 @@ TEST_CASE( "output_default_test", "[core]" ) REQUIRE(o.iterationEncoding() == IE::fileBased); REQUIRE(o.iterationFormat() == "new_openpmd_output_%T"); REQUIRE(o.iterations.empty()); - REQUIRE(o.numAttributes() == 8); /* openPMD, openPMDextension, basePath, iterationEncoding, iterationFormat, date, software, softwareVersion */ + REQUIRE( + o.numAttributes() == + 8); /* openPMD, openPMDextension, basePath, iterationEncoding, + iterationFormat, date, software, softwareVersion */ REQUIRE(o.name() == "new_openpmd_output_%T"); o.iterations[0]; } -TEST_CASE( "output_constructor_test", "[core]" ) +TEST_CASE("output_constructor_test", "[core]") { using IE = IterationEncoding; Series o = Series("./MyCustomOutput.json", Access::CREATE); o.setMeshesPath("customMeshesPath").setParticlesPath("customParticlesPath"); - o.iterations[1].meshes["foo"]["baz"].resetDataset(Dataset(Datatype::DOUBLE, {1})); + o.iterations[1].meshes["foo"]["baz"].resetDataset( + Dataset(Datatype::DOUBLE, {1})); auto species = o.iterations[1].particles["bar"]; auto dset = Dataset(Datatype::DOUBLE, {1}); species["position"][RecordComponent::SCALAR].resetDataset(dset); @@ -336,11 +335,15 @@ TEST_CASE( "output_constructor_test", "[core]" ) REQUIRE(o.iterationEncoding() == IE::groupBased); REQUIRE(o.iterationFormat() == "/data/%T/"); REQUIRE(o.iterations.size() == 1); - REQUIRE(o.numAttributes() == 10); /* openPMD, openPMDextension, basePath, meshesPath, particlesPath, iterationEncoding, iterationFormat, date, software, softwareVersion */ + REQUIRE( + o.numAttributes() == + 10); /* openPMD, openPMDextension, basePath, meshesPath, particlesPath, + iterationEncoding, iterationFormat, date, software, + softwareVersion */ REQUIRE(o.name() == "MyCustomOutput"); } -TEST_CASE( "output_modification_test", "[core]" ) +TEST_CASE("output_modification_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); @@ -365,45 +368,45 @@ TEST_CASE( "output_modification_test", "[core]" ) o.iterations[0]; } -TEST_CASE( "iteration_default_test", "[core]" ) +TEST_CASE("iteration_default_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); - Iteration& i = o.iterations[42]; + Iteration &i = o.iterations[42]; - REQUIRE(i.time< double >() == static_cast(0)); - REQUIRE(i.dt< double >() == static_cast(1)); + REQUIRE(i.time() == static_cast(0)); + REQUIRE(i.dt() == static_cast(1)); REQUIRE(i.timeUnitSI() == static_cast(1)); REQUIRE(i.numAttributes() == 3); REQUIRE(i.meshes.empty()); REQUIRE(i.particles.empty()); } -TEST_CASE( "iteration_modification_test", "[core]" ) +TEST_CASE("iteration_modification_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); - Iteration& i = o.iterations[42]; + Iteration &i = o.iterations[42]; float time = 0.314f; i.setTime(time); - REQUIRE(i.time< float >() == time); + REQUIRE(i.time() == time); double dt = 0.42; i.setDt(dt); - REQUIRE(i.dt< long double >() == static_cast< long double >(dt)); + REQUIRE(i.dt() == static_cast(dt)); i.setTimeUnitSI(0.000000000001); - REQUIRE(i.timeUnitSI() == static_cast< double >(0.000000000001)); + REQUIRE(i.timeUnitSI() == static_cast(0.000000000001)); } -TEST_CASE( "particleSpecies_modification_test", "[core]" ) +TEST_CASE("particleSpecies_modification_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); - auto& particles = o.iterations[42].particles; + auto &particles = o.iterations[42].particles; REQUIRE(0 == particles.numAttributes()); - auto& species = particles["species"]; + auto &species = particles["species"]; REQUIRE(1 == particles.size()); REQUIRE(1 == particles.count("species")); REQUIRE(0 == species.numAttributes()); @@ -411,27 +414,26 @@ TEST_CASE( "particleSpecies_modification_test", "[core]" ) species["position"][RecordComponent::SCALAR].resetDataset(dset); species["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); REQUIRE(1 == species.count("positionOffset")); - auto& patches = species.particlePatches; + auto &patches = species.particlePatches; REQUIRE(2 == patches.size()); REQUIRE(0 == patches.numAttributes()); - auto& offset = patches["offset"]; + auto &offset = patches["offset"]; REQUIRE(0 == offset.size()); - REQUIRE(1 == offset.numAttributes()); //unitDimension - std::array< double, 7 > zeros{{0., 0., 0., 0., 0., 0., 0.}}; + REQUIRE(1 == offset.numAttributes()); // unitDimension + std::array zeros{{0., 0., 0., 0., 0., 0., 0.}}; REQUIRE(zeros == offset.unitDimension()); - auto& off_x = offset["x"]; + auto &off_x = offset["x"]; off_x.resetDataset(dset); REQUIRE(1 == off_x.unitSI()); } - -TEST_CASE( "record_constructor_test", "[core]" ) +TEST_CASE("record_constructor_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); ParticleSpecies ps = o.iterations[42].particles["species"]; - Record& r = ps["record"]; + Record &r = ps["record"]; auto dset = Dataset(Datatype::DOUBLE, {1}); ps["position"][RecordComponent::SCALAR].resetDataset(dset); ps["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); @@ -442,46 +444,43 @@ TEST_CASE( "record_constructor_test", "[core]" ) REQUIRE(r["y"].numAttributes() == 1); /* unitSI */ REQUIRE(r["z"].unitSI() == 1); REQUIRE(r["z"].numAttributes() == 1); /* unitSI */ - std::array< double, 7 > zeros{{0., 0., 0., 0., 0., 0., 0.}}; + std::array zeros{{0., 0., 0., 0., 0., 0., 0.}}; REQUIRE(r.unitDimension() == zeros); - REQUIRE(r.timeOffset< float >() == static_cast(0)); + REQUIRE(r.timeOffset() == static_cast(0)); REQUIRE(r.numAttributes() == 2); /* timeOffset, unitDimension */ } -TEST_CASE( "record_modification_test", "[core]" ) +TEST_CASE("record_modification_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); auto species = o.iterations[42].particles["species"]; - Record& r = species["position"]; + Record &r = species["position"]; auto dset = Dataset(Datatype::DOUBLE, {1}); species["position"][RecordComponent::SCALAR].resetDataset(dset); species["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); using RUD = UnitDimension; - r.setUnitDimension({{RUD::L, 1.}, - {RUD::M, 1.}, - {RUD::T, -3.}, - {RUD::I, -1.}}); - std::array< double, 7 > e_field_unitDimension{{1., 1., -3., -1., 0., 0., 0.}}; + r.setUnitDimension( + {{RUD::L, 1.}, {RUD::M, 1.}, {RUD::T, -3.}, {RUD::I, -1.}}); + std::array e_field_unitDimension{{1., 1., -3., -1., 0., 0., 0.}}; REQUIRE(r.unitDimension() == e_field_unitDimension); - r.setUnitDimension({{RUD::L, 0.}, - {RUD::T, -2.}}); - std::array< double, 7 > b_field_unitDimension{{0., 1., -2., -1., 0., 0., 0.}}; + r.setUnitDimension({{RUD::L, 0.}, {RUD::T, -2.}}); + std::array b_field_unitDimension{{0., 1., -2., -1., 0., 0., 0.}}; REQUIRE(r.unitDimension() == b_field_unitDimension); float timeOffset = 0.314f; r.setTimeOffset(timeOffset); - REQUIRE(r.timeOffset< float >() == timeOffset); + REQUIRE(r.timeOffset() == timeOffset); } -TEST_CASE( "recordComponent_modification_test", "[core]" ) +TEST_CASE("recordComponent_modification_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); ParticleSpecies ps = o.iterations[42].particles["species"]; - Record& r = ps["record"]; + Record &r = ps["record"]; auto dset = Dataset(Datatype::DOUBLE, {1}); ps["position"][RecordComponent::SCALAR].resetDataset(dset); ps["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); @@ -498,35 +497,38 @@ TEST_CASE( "recordComponent_modification_test", "[core]" ) REQUIRE(r["z"].numAttributes() == 1); /* unitSI */ } -TEST_CASE( "mesh_constructor_test", "[core]" ) +TEST_CASE("mesh_constructor_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); Mesh &m = o.iterations[42].meshes["E"]; - std::vector< double > pos{0}; + std::vector pos{0}; REQUIRE(m["x"].unitSI() == 1); REQUIRE(m["x"].numAttributes() == 2); /* unitSI, position */ - REQUIRE(m["x"].position< double >() == pos); + REQUIRE(m["x"].position() == pos); REQUIRE(m["y"].unitSI() == 1); REQUIRE(m["y"].numAttributes() == 2); /* unitSI, position */ - REQUIRE(m["y"].position< double >() == pos); + REQUIRE(m["y"].position() == pos); REQUIRE(m["z"].unitSI() == 1); REQUIRE(m["z"].numAttributes() == 2); /* unitSI, position */ - REQUIRE(m["z"].position< double >() == pos); + REQUIRE(m["z"].position() == pos); REQUIRE(m.geometry() == Mesh::Geometry::cartesian); REQUIRE(m.dataOrder() == Mesh::DataOrder::C); - std::vector< std::string > al{"x"}; + std::vector al{"x"}; REQUIRE(m.axisLabels() == al); - std::vector< double > gs{1}; - REQUIRE(m.gridSpacing< double >() == gs); - std::vector< double > ggo{0}; + std::vector gs{1}; + REQUIRE(m.gridSpacing() == gs); + std::vector ggo{0}; REQUIRE(m.gridGlobalOffset() == ggo); REQUIRE(m.gridUnitSI() == static_cast(1)); - REQUIRE(m.numAttributes() == 8); /* axisLabels, dataOrder, geometry, gridGlobalOffset, gridSpacing, gridUnitSI, timeOffset, unitDimension */ + REQUIRE( + m.numAttributes() == + 8); /* axisLabels, dataOrder, geometry, gridGlobalOffset, gridSpacing, + gridUnitSI, timeOffset, unitDimension */ } -TEST_CASE( "mesh_modification_test", "[core]" ) +TEST_CASE("mesh_modification_test", "[core]") { Series o = Series("./MyOutput_%T.json", Access::CREATE); @@ -541,15 +543,15 @@ TEST_CASE( "mesh_modification_test", "[core]" ) m.setDataOrder(Mesh::DataOrder::F); REQUIRE(m.dataOrder() == Mesh::DataOrder::F); REQUIRE(m.numAttributes() == 8); - std::vector< std::string > al{"z_", "y_", "x_"}; + std::vector al{"z_", "y_", "x_"}; m.setAxisLabels({"z_", "y_", "x_"}); REQUIRE(m.axisLabels() == al); REQUIRE(m.numAttributes() == 8); - std::vector< double > gs{1e-5, 2e-5, 3e-5}; + std::vector gs{1e-5, 2e-5, 3e-5}; m.setGridSpacing(gs); - REQUIRE(m.gridSpacing< double >() == gs); + REQUIRE(m.gridSpacing() == gs); REQUIRE(m.numAttributes() == 8); - std::vector< double > ggo{1e-10, 2e-10, 3e-10}; + std::vector ggo{1e-10, 2e-10, 3e-10}; m.setGridGlobalOffset({1e-10, 2e-10, 3e-10}); REQUIRE(m.gridGlobalOffset() == ggo); REQUIRE(m.numAttributes() == 8); @@ -561,11 +563,11 @@ TEST_CASE( "mesh_modification_test", "[core]" ) REQUIRE(m.geometryParameters() == gp); REQUIRE(m.numAttributes() == 9); - m["x"].setPosition(std::vector< float >{0, 0, 0}); + m["x"].setPosition(std::vector{0, 0, 0}); REQUIRE(m.numAttributes() == 9); } -TEST_CASE( "structure_test", "[core]" ) +TEST_CASE("structure_test", "[core]") { #if openPMD_USE_INVASIVE_TESTS Series o = Series("./new_openpmd_output_%T.json", Access::CREATE); @@ -585,113 +587,205 @@ TEST_CASE( "structure_test", "[core]" ) REQUIRE(m.IOHandler()); REQUIRE(o.iterations[1].meshes["M"].IOHandler()); REQUIRE(m.parent() == getWritable(&o.iterations[1].meshes)); - REQUIRE(o.iterations[1].meshes["M"].parent() == getWritable(&o.iterations[1].meshes)); + REQUIRE( + o.iterations[1].meshes["M"].parent() == + getWritable(&o.iterations[1].meshes)); MeshRecordComponent mrc = o.iterations[1].meshes["M"]["MRC"]; REQUIRE(mrc.IOHandler()); REQUIRE(o.iterations[1].meshes["M"]["MRC"].IOHandler()); REQUIRE(mrc.parent() == getWritable(&o.iterations[1].meshes["M"])); - REQUIRE(o.iterations[1].meshes["M"]["MRC"].parent() == getWritable(&o.iterations[1].meshes["M"])); + REQUIRE( + o.iterations[1].meshes["M"]["MRC"].parent() == + getWritable(&o.iterations[1].meshes["M"])); mrc = o.iterations[1].meshes["M"]["MRC"].makeConstant(1.0); REQUIRE(mrc.IOHandler()); REQUIRE(o.iterations[1].meshes["M"]["MRC"].IOHandler()); REQUIRE(mrc.parent() == getWritable(&o.iterations[1].meshes["M"])); - REQUIRE(o.iterations[1].meshes["M"]["MRC"].parent() == getWritable(&o.iterations[1].meshes["M"])); + REQUIRE( + o.iterations[1].meshes["M"]["MRC"].parent() == + getWritable(&o.iterations[1].meshes["M"])); - MeshRecordComponent scalar_mrc = o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR]; + MeshRecordComponent scalar_mrc = + o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR]; REQUIRE(scalar_mrc.IOHandler()); REQUIRE(o.iterations[1].meshes["M2"].IOHandler()); - REQUIRE(o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].IOHandler()); + REQUIRE( + o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].IOHandler()); REQUIRE(scalar_mrc.parent() == getWritable(&o.iterations[1].meshes)); - REQUIRE(o.iterations[1].meshes["M2"].parent() == getWritable(&o.iterations[1].meshes)); - REQUIRE(o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].parent() == getWritable(&o.iterations[1].meshes)); - scalar_mrc = o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].makeConstant(1.0); + REQUIRE( + o.iterations[1].meshes["M2"].parent() == + getWritable(&o.iterations[1].meshes)); + REQUIRE( + o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].parent() == + getWritable(&o.iterations[1].meshes)); + scalar_mrc = + o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].makeConstant( + 1.0); REQUIRE(scalar_mrc.IOHandler()); REQUIRE(o.iterations[1].meshes["M2"].IOHandler()); - REQUIRE(o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].IOHandler()); + REQUIRE( + o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].IOHandler()); REQUIRE(scalar_mrc.parent() == getWritable(&o.iterations[1].meshes)); - REQUIRE(o.iterations[1].meshes["M2"].parent() == getWritable(&o.iterations[1].meshes)); - REQUIRE(o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].parent() == getWritable(&o.iterations[1].meshes)); + REQUIRE( + o.iterations[1].meshes["M2"].parent() == + getWritable(&o.iterations[1].meshes)); + REQUIRE( + o.iterations[1].meshes["M2"][MeshRecordComponent::SCALAR].parent() == + getWritable(&o.iterations[1].meshes)); ParticleSpecies ps = o.iterations[1].particles["P"]; REQUIRE(ps.IOHandler()); REQUIRE(o.iterations[1].particles["P"].IOHandler()); REQUIRE(ps.parent() == getWritable(&o.iterations[1].particles)); - REQUIRE(o.iterations[1].particles["P"].parent() == getWritable(&o.iterations[1].particles)); + REQUIRE( + o.iterations[1].particles["P"].parent() == + getWritable(&o.iterations[1].particles)); REQUIRE(o.iterations[1].particles["P"].particlePatches.IOHandler()); - REQUIRE(o.iterations[1].particles["P"].particlePatches.parent() == getWritable(&o.iterations[1].particles["P"])); + REQUIRE( + o.iterations[1].particles["P"].particlePatches.parent() == + getWritable(&o.iterations[1].particles["P"])); auto dset = Dataset(Datatype::DOUBLE, {1}); - o.iterations[1].particles["P"]["position"][RecordComponent::SCALAR].resetDataset(dset); - o.iterations[1].particles["P"]["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); + o.iterations[1] + .particles["P"]["position"][RecordComponent::SCALAR] + .resetDataset(dset); + o.iterations[1] + .particles["P"]["positionOffset"][RecordComponent::SCALAR] + .resetDataset(dset); Record r = o.iterations[1].particles["P"]["PR"]; REQUIRE(r.IOHandler()); REQUIRE(o.iterations[1].particles["P"]["PR"].IOHandler()); REQUIRE(r.parent() == getWritable(&o.iterations[1].particles["P"])); - REQUIRE(o.iterations[1].particles["P"]["PR"].parent() == getWritable(&o.iterations[1].particles["P"])); + REQUIRE( + o.iterations[1].particles["P"]["PR"].parent() == + getWritable(&o.iterations[1].particles["P"])); RecordComponent rc = o.iterations[1].particles["P"]["PR"]["PRC"]; REQUIRE(rc.IOHandler()); REQUIRE(o.iterations[1].particles["P"]["PR"]["PRC"].IOHandler()); REQUIRE(rc.parent() == getWritable(&o.iterations[1].particles["P"]["PR"])); - REQUIRE(o.iterations[1].particles["P"]["PR"]["PRC"].parent() == getWritable(&o.iterations[1].particles["P"]["PR"])); + REQUIRE( + o.iterations[1].particles["P"]["PR"]["PRC"].parent() == + getWritable(&o.iterations[1].particles["P"]["PR"])); rc = o.iterations[1].particles["P"]["PR"]["PRC"].makeConstant(1.0); REQUIRE(rc.IOHandler()); REQUIRE(o.iterations[1].particles["P"]["PR"]["PRC"].IOHandler()); REQUIRE(rc.parent() == getWritable(&o.iterations[1].particles["P"]["PR"])); - REQUIRE(o.iterations[1].particles["P"]["PR"]["PRC"].parent() == getWritable(&o.iterations[1].particles["P"]["PR"])); + REQUIRE( + o.iterations[1].particles["P"]["PR"]["PRC"].parent() == + getWritable(&o.iterations[1].particles["P"]["PR"])); - RecordComponent scalar_rc = o.iterations[1].particles["P"]["PR2"][RecordComponent::SCALAR]; + RecordComponent scalar_rc = + o.iterations[1].particles["P"]["PR2"][RecordComponent::SCALAR]; REQUIRE(scalar_rc.IOHandler()); - REQUIRE(o.iterations[1].particles["P"]["PR2"][RecordComponent::SCALAR].IOHandler()); + REQUIRE(o.iterations[1] + .particles["P"]["PR2"][RecordComponent::SCALAR] + .IOHandler()); REQUIRE(scalar_rc.parent() == getWritable(&o.iterations[1].particles["P"])); - REQUIRE(o.iterations[1].particles["P"]["PR2"][RecordComponent::SCALAR].parent() == getWritable(&o.iterations[1].particles["P"])); - scalar_rc = o.iterations[1].particles["P"]["PR2"][RecordComponent::SCALAR].makeConstant(1.0); + REQUIRE( + o.iterations[1] + .particles["P"]["PR2"][RecordComponent::SCALAR] + .parent() == getWritable(&o.iterations[1].particles["P"])); + scalar_rc = o.iterations[1] + .particles["P"]["PR2"][RecordComponent::SCALAR] + .makeConstant(1.0); REQUIRE(scalar_rc.IOHandler()); - REQUIRE(o.iterations[1].particles["P"]["PR2"][RecordComponent::SCALAR].IOHandler()); + REQUIRE(o.iterations[1] + .particles["P"]["PR2"][RecordComponent::SCALAR] + .IOHandler()); REQUIRE(scalar_rc.parent() == getWritable(&o.iterations[1].particles["P"])); - REQUIRE(o.iterations[1].particles["P"]["PR2"][RecordComponent::SCALAR].parent() == getWritable(&o.iterations[1].particles["P"])); + REQUIRE( + o.iterations[1] + .particles["P"]["PR2"][RecordComponent::SCALAR] + .parent() == getWritable(&o.iterations[1].particles["P"])); - REQUIRE(1 == o.iterations[1].particles["P"].particlePatches.count("numParticles")); - REQUIRE(1 == o.iterations[1].particles["P"].particlePatches.count("numParticlesOffset")); + REQUIRE( + 1 == + o.iterations[1].particles["P"].particlePatches.count("numParticles")); + REQUIRE( + 1 == + o.iterations[1].particles["P"].particlePatches.count( + "numParticlesOffset")); ParticlePatches pp = o.iterations[1].particles["P"].particlePatches; REQUIRE(pp.IOHandler()); REQUIRE(o.iterations[1].particles["P"].particlePatches.IOHandler()); REQUIRE(pp.parent() == getWritable(&o.iterations[1].particles["P"])); - REQUIRE(o.iterations[1].particles["P"].particlePatches.parent() == getWritable(&o.iterations[1].particles["P"])); + REQUIRE( + o.iterations[1].particles["P"].particlePatches.parent() == + getWritable(&o.iterations[1].particles["P"])); - PatchRecord pr = o.iterations[1].particles["P"].particlePatches["numParticles"]; + PatchRecord pr = + o.iterations[1].particles["P"].particlePatches["numParticles"]; REQUIRE(pr.IOHandler()); - REQUIRE(o.iterations[1].particles["P"].particlePatches["numParticles"].IOHandler()); - REQUIRE(pr.parent() == getWritable(&o.iterations[1].particles["P"].particlePatches)); - REQUIRE(o.iterations[1].particles["P"].particlePatches["numParticles"].parent() == getWritable(&o.iterations[1].particles["P"].particlePatches)); + REQUIRE(o.iterations[1] + .particles["P"] + .particlePatches["numParticles"] + .IOHandler()); + REQUIRE( + pr.parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches)); + REQUIRE( + o.iterations[1] + .particles["P"] + .particlePatches["numParticles"] + .parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches)); pr = o.iterations[1].particles["P"].particlePatches["extent"]; REQUIRE(pr.IOHandler()); - REQUIRE(o.iterations[1].particles["P"].particlePatches["extent"].IOHandler()); - REQUIRE(pr.parent() == getWritable(&o.iterations[1].particles["P"].particlePatches)); - REQUIRE(o.iterations[1].particles["P"].particlePatches["extent"].parent() == getWritable(&o.iterations[1].particles["P"].particlePatches)); + REQUIRE( + o.iterations[1].particles["P"].particlePatches["extent"].IOHandler()); + REQUIRE( + pr.parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches)); + REQUIRE( + o.iterations[1].particles["P"].particlePatches["extent"].parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches)); - PatchRecordComponent scalar_prc = o.iterations[1].particles["P"].particlePatches["numParticles"][RecordComponent::SCALAR]; + PatchRecordComponent scalar_prc = + o.iterations[1].particles["P"].particlePatches["numParticles"] + [RecordComponent::SCALAR]; REQUIRE(scalar_prc.IOHandler()); - REQUIRE(o.iterations[1].particles["P"].particlePatches["numParticles"][RecordComponent::SCALAR].IOHandler()); - REQUIRE(scalar_prc.parent() == getWritable(&o.iterations[1].particles["P"].particlePatches)); - REQUIRE(o.iterations[1].particles["P"].particlePatches["numParticles"][RecordComponent::SCALAR].parent() == getWritable(&o.iterations[1].particles["P"].particlePatches)); - - PatchRecordComponent prc = o.iterations[1].particles["P"].particlePatches["extent"]["x"]; + REQUIRE(o.iterations[1] + .particles["P"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .IOHandler()); + REQUIRE( + scalar_prc.parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches)); + REQUIRE( + o.iterations[1] + .particles["P"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches)); + + PatchRecordComponent prc = + o.iterations[1].particles["P"].particlePatches["extent"]["x"]; REQUIRE(prc.IOHandler()); - REQUIRE(o.iterations[1].particles["P"].particlePatches["extent"]["x"].IOHandler()); - REQUIRE(prc.parent() == getWritable(&o.iterations[1].particles["P"].particlePatches["extent"])); - REQUIRE(o.iterations[1].particles["P"].particlePatches["extent"]["x"].parent() == getWritable(&o.iterations[1].particles["P"].particlePatches["extent"])); + REQUIRE(o.iterations[1] + .particles["P"] + .particlePatches["extent"]["x"] + .IOHandler()); + REQUIRE( + prc.parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches["extent"])); + REQUIRE( + o.iterations[1] + .particles["P"] + .particlePatches["extent"]["x"] + .parent() == + getWritable(&o.iterations[1].particles["P"].particlePatches["extent"])); prc.resetDataset(dset); #else std::cerr << "Invasive tests not enabled. Hierarchy is not visible.\n"; #endif } -TEST_CASE( "wrapper_test", "[core]" ) +TEST_CASE("wrapper_test", "[core]") { Series o = Series("./new_openpmd_output_%T.json", Access::CREATE); @@ -708,20 +802,24 @@ TEST_CASE( "wrapper_test", "[core]" ) REQUIRE(o.iterationEncoding() == IterationEncoding::groupBased); REQUIRE(o.name() == "other_name"); - o.iterations[1].meshes["E"]["x"].resetDataset(Dataset(Datatype::USHORT, {42})); + o.iterations[1].meshes["E"]["x"].resetDataset( + Dataset(Datatype::USHORT, {42})); MeshRecordComponent mrc = o.iterations[1].meshes["E"]["x"]; REQUIRE(mrc.getDatatype() == Datatype::USHORT); REQUIRE(mrc.getExtent() == Extent{42}); mrc.resetDataset(Dataset(Datatype::LONG_DOUBLE, {7})); - REQUIRE(o.iterations[1].meshes["E"]["x"].getDatatype() == Datatype::LONG_DOUBLE); + REQUIRE( + o.iterations[1].meshes["E"]["x"].getDatatype() == + Datatype::LONG_DOUBLE); REQUIRE(o.iterations[1].meshes["E"]["x"].getExtent() == Extent{7}); - Container< Iteration, uint64_t > its = o.iterations; + Container its = o.iterations; its[1].meshes["E"]["y"].resetDataset(Dataset(Datatype::CHAR, {2})); REQUIRE(o.iterations[1].meshes["E"].count("y") == 1); REQUIRE(o.iterations[1].meshes["E"]["y"].getDatatype() == Datatype::CHAR); REQUIRE(o.iterations[1].meshes["E"]["y"].getExtent() == Extent{2}); - o.iterations[1].meshes["E"]["z"].resetDataset(Dataset(Datatype::FLOAT, {1234})); + o.iterations[1].meshes["E"]["z"].resetDataset( + Dataset(Datatype::FLOAT, {1234})); REQUIRE(its[1].meshes["E"].count("z") == 1); REQUIRE(its[1].meshes["E"]["z"].getDatatype() == Datatype::FLOAT); REQUIRE(its[1].meshes["E"]["z"].getExtent() == Extent{1234}); @@ -732,7 +830,8 @@ TEST_CASE( "wrapper_test", "[core]" ) REQUIRE(o.iterations.count(3) == 1); double value = 42.; - o.iterations[4].meshes["E"]["y"].resetDataset(Dataset(Datatype::DOUBLE, {1})); + o.iterations[4].meshes["E"]["y"].resetDataset( + Dataset(Datatype::DOUBLE, {1})); o.iterations[4].meshes["E"]["y"].makeConstant(value); MeshRecordComponent mrc2 = o.iterations[4].meshes["E"]["y"]; REQUIRE(mrc2.constant()); @@ -740,12 +839,13 @@ TEST_CASE( "wrapper_test", "[core]" ) mrc2.loadChunk(shareRaw(&loadData), {0}, {1}); o.flush(); REQUIRE(loadData == value); - // TODO: do we want to be able to make data constant after already writing it once? - // value = 43.; - // mrc2.makeConstant(value); - REQUIRE_THROWS_WITH(mrc2.makeConstant(value), - Catch::Equals("A recordComponent can not (yet) be made constant after it has been written.")); - std::array< double, 1 > moreData = {{ 112233. }}; + // TODO: do we want to be able to make data constant after already writing + // it once? value = 43.; mrc2.makeConstant(value); + REQUIRE_THROWS_WITH( + mrc2.makeConstant(value), + Catch::Equals("A recordComponent can not (yet) be made constant after " + "it has been written.")); + std::array moreData = {{112233.}}; o.iterations[4].meshes["E"]["y"].loadChunk(shareRaw(moreData), {0}, {1}); o.flush(); REQUIRE(moreData[0] == value); @@ -758,11 +858,15 @@ TEST_CASE( "wrapper_test", "[core]" ) #endif MeshRecordComponent mrc3 = o.iterations[5].meshes["E"]["y"]; - o.iterations[5].meshes["E"]["y"].resetDataset(Dataset(Datatype::DOUBLE, {1})); + o.iterations[5].meshes["E"]["y"].resetDataset( + Dataset(Datatype::DOUBLE, {1})); int wrongData = 42; - REQUIRE_THROWS_WITH(o.iterations[5].meshes["E"]["y"].storeChunk(shareRaw(&wrongData), {0}, {1}), - Catch::Equals("Datatypes of chunk data (INT) and record component (DOUBLE) do not match.")); - std::shared_ptr< double > storeData = std::make_shared< double >(44); + REQUIRE_THROWS_WITH( + o.iterations[5].meshes["E"]["y"].storeChunk( + shareRaw(&wrongData), {0}, {1}), + Catch::Equals("Datatypes of chunk data (INT) and record component " + "(DOUBLE) do not match.")); + std::shared_ptr storeData = std::make_shared(44); o.iterations[5].meshes["E"]["y"].storeChunk(storeData, {0}, {1}); #if openPMD_USE_INVASIVE_TESTS REQUIRE(o.iterations[5].meshes["E"]["y"].get().m_chunks.size() == 1); @@ -774,50 +878,97 @@ TEST_CASE( "wrapper_test", "[core]" ) REQUIRE(mrc3.get().m_chunks.empty()); #endif - o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR].resetDataset(Dataset(determineDatatype< uint64_t >(), {4})); + o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .resetDataset(Dataset(determineDatatype(), {4})); auto dset = Dataset(Datatype::DOUBLE, {1}); - o.iterations[6].particles["electrons"]["position"][RecordComponent::SCALAR].resetDataset(dset); - o.iterations[6].particles["electrons"]["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); + o.iterations[6] + .particles["electrons"]["position"][RecordComponent::SCALAR] + .resetDataset(dset); + o.iterations[6] + .particles["electrons"]["positionOffset"][RecordComponent::SCALAR] + .resetDataset(dset); ParticlePatches pp = o.iterations[6].particles["electrons"].particlePatches; - REQUIRE(pp["numParticles"][RecordComponent::SCALAR].getDatatype() == determineDatatype< uint64_t >()); - REQUIRE(pp["numParticles"][RecordComponent::SCALAR].getExtent() == Extent{4}); + REQUIRE( + pp["numParticles"][RecordComponent::SCALAR].getDatatype() == + determineDatatype()); + REQUIRE( + pp["numParticles"][RecordComponent::SCALAR].getExtent() == Extent{4}); pp["prop"]["x"].resetDataset(Dataset(Datatype::DOUBLE, {7})); - REQUIRE(o.iterations[6].particles["electrons"].particlePatches["prop"]["x"].getDatatype() == Datatype::DOUBLE); - REQUIRE(o.iterations[6].particles["electrons"].particlePatches["prop"]["x"].getExtent() == Extent{7}); + REQUIRE( + o.iterations[6] + .particles["electrons"] + .particlePatches["prop"]["x"] + .getDatatype() == Datatype::DOUBLE); + REQUIRE( + o.iterations[6] + .particles["electrons"] + .particlePatches["prop"]["x"] + .getExtent() == Extent{7}); size_t idx = 0; uint64_t val = 10; #if openPMD_USE_INVASIVE_TESTS - REQUIRE(o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR].get().m_chunks.empty()); + REQUIRE(o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .get() + .m_chunks.empty()); REQUIRE(pp["numParticles"][RecordComponent::SCALAR].get().m_chunks.empty()); #endif pp["numParticles"][RecordComponent::SCALAR].store(idx, val); #if openPMD_USE_INVASIVE_TESTS - REQUIRE(o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR].get().m_chunks.size() == 1); - REQUIRE(pp["numParticles"][RecordComponent::SCALAR].get().m_chunks.size() == 1); + REQUIRE( + o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .get() + .m_chunks.size() == 1); + REQUIRE( + pp["numParticles"][RecordComponent::SCALAR].get().m_chunks.size() == 1); #endif std::stringstream u64str; u64str << determineDatatype(); - REQUIRE_THROWS_WITH(o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR].store(idx+1, 42.), - Catch::Equals("Datatypes of patch data (DOUBLE) and dataset (" + u64str.str() + ") do not match.")); - o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR].store(idx+1, val+1); + REQUIRE_THROWS_WITH( + o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .store(idx + 1, 42.), + Catch::Equals( + "Datatypes of patch data (DOUBLE) and dataset (" + u64str.str() + + ") do not match.")); + o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .store(idx + 1, val + 1); #if openPMD_USE_INVASIVE_TESTS - REQUIRE(o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR].get().m_chunks.size() == 2); - REQUIRE(pp["numParticles"][RecordComponent::SCALAR].get().m_chunks.size() == 2); + REQUIRE( + o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .get() + .m_chunks.size() == 2); + REQUIRE( + pp["numParticles"][RecordComponent::SCALAR].get().m_chunks.size() == 2); #endif o.flush(); #if openPMD_USE_INVASIVE_TESTS - REQUIRE(o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR].get().m_chunks.empty()); + REQUIRE(o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR] + .get() + .m_chunks.empty()); REQUIRE(pp["numParticles"][RecordComponent::SCALAR].get().m_chunks.empty()); #endif } -TEST_CASE( "use_count_test", "[core]" ) +TEST_CASE("use_count_test", "[core]") { Series o = Series("./new_openpmd_output.json", Access::CREATE); MeshRecordComponent mrc = o.iterations[1].meshes["E"]["x"]; mrc.resetDataset(Dataset(determineDatatype(), {42})); - std::shared_ptr< uint16_t > storeData = std::make_shared< uint16_t >(44); + std::shared_ptr storeData = std::make_shared(44); REQUIRE(storeData.use_count() == 1); mrc.storeChunk(storeData, {0}, {1}); REQUIRE(storeData.use_count() == 2); @@ -825,70 +976,84 @@ TEST_CASE( "use_count_test", "[core]" ) REQUIRE(storeData.use_count() == 1); #if openPMD_USE_INVASIVE_TESTS - PatchRecordComponent pprc = o.iterations[6].particles["electrons"].particlePatches["numParticles"][RecordComponent::SCALAR]; + PatchRecordComponent pprc = + o.iterations[6] + .particles["electrons"] + .particlePatches["numParticles"][RecordComponent::SCALAR]; auto dset = Dataset(Datatype::DOUBLE, {1}); - o.iterations[6].particles["electrons"]["position"][RecordComponent::SCALAR].resetDataset(dset); - o.iterations[6].particles["electrons"]["positionOffset"][RecordComponent::SCALAR].resetDataset(dset); + o.iterations[6] + .particles["electrons"]["position"][RecordComponent::SCALAR] + .resetDataset(dset); + o.iterations[6] + .particles["electrons"]["positionOffset"][RecordComponent::SCALAR] + .resetDataset(dset); pprc.resetDataset(Dataset(determineDatatype(), {4})); - pprc.store(0, static_cast< uint64_t >(1)); - REQUIRE(static_cast< Parameter< Operation::WRITE_DATASET >* >(pprc.get().m_chunks.front().parameter.get())->data.use_count() == 1); + pprc.store(0, static_cast(1)); + REQUIRE( + static_cast *>( + pprc.get().m_chunks.front().parameter.get()) + ->data.use_count() == 1); #endif } -TEST_CASE( "empty_record_test", "[core]" ) +TEST_CASE("empty_record_test", "[core]") { Series o = Series("./new_openpmd_output.json", Access::CREATE); - o.iterations[1].meshes["E"].setComment("No assumption about contained RecordComponents will be made"); - REQUIRE_THROWS_WITH(o.flush(), - Catch::Equals("A Record can not be written without any contained RecordComponents: E")); - o.iterations[1].meshes["E"][RecordComponent::SCALAR].resetDataset(Dataset(Datatype::DOUBLE, {1})); + o.iterations[1].meshes["E"].setComment( + "No assumption about contained RecordComponents will be made"); + REQUIRE_THROWS_WITH( + o.flush(), + Catch::Equals("A Record can not be written without any contained " + "RecordComponents: E")); + o.iterations[1].meshes["E"][RecordComponent::SCALAR].resetDataset( + Dataset(Datatype::DOUBLE, {1})); o.flush(); } -TEST_CASE( "zero_extent_component", "[core]" ) +TEST_CASE("zero_extent_component", "[core]") { Series o = Series("./new_openpmd_output.json", Access::CREATE); auto E_x = o.iterations[1].meshes["E"]["x"]; E_x.setComment("Datasets must contain dimensions."); - //REQUIRE_THROWS_WITH(E_x.resetDataset(Dataset(Datatype::LONG, {})), + // REQUIRE_THROWS_WITH(E_x.resetDataset(Dataset(Datatype::LONG, {})), // Catch::Equals("Dataset extent must be at least 1D.")); - REQUIRE_THROWS_WITH(E_x.makeEmpty(0), - Catch::Equals("Dataset extent must be at least 1D.")); + REQUIRE_THROWS_WITH( + E_x.makeEmpty(0), + Catch::Equals("Dataset extent must be at least 1D.")); E_x.resetDataset(Dataset(Datatype::DOUBLE, {1})); } -TEST_CASE( "no_file_ending", "[core]" ) +TEST_CASE("no_file_ending", "[core]") { - REQUIRE_THROWS_WITH(Series("./new_openpmd_output", Access::CREATE), - Catch::Equals("Unknown file format! Did you specify a file ending?")); - REQUIRE_THROWS_WITH(Series("./new_openpmd_output_%T", Access::CREATE), - Catch::Equals("Unknown file format! Did you specify a file ending?")); - REQUIRE_THROWS_WITH(Series("./new_openpmd_output_%05T", Access::CREATE), - Catch::Equals("Unknown file format! Did you specify a file ending?")); + REQUIRE_THROWS_WITH( + Series("./new_openpmd_output", Access::CREATE), + Catch::Equals("Unknown file format! Did you specify a file ending?")); + REQUIRE_THROWS_WITH( + Series("./new_openpmd_output_%T", Access::CREATE), + Catch::Equals("Unknown file format! Did you specify a file ending?")); + REQUIRE_THROWS_WITH( + Series("./new_openpmd_output_%05T", Access::CREATE), + Catch::Equals("Unknown file format! Did you specify a file ending?")); { Series( "../samples/no_extension_specified", Access::CREATE, - R"({"backend": "json"})" ); + R"({"backend": "json"})"); } - REQUIRE( - auxiliary::file_exists( "../samples/no_extension_specified.json" ) ); + REQUIRE(auxiliary::file_exists("../samples/no_extension_specified.json")); } -TEST_CASE( "backend_via_json", "[core]" ) +TEST_CASE("backend_via_json", "[core]") { std::string encodingVariableBased = R"({"backend": "json", "iteration_encoding": "variable_based"})"; { Series series( - "../samples/optionsViaJson", - Access::CREATE, - encodingVariableBased ); - REQUIRE( series.backend() == "JSON" ); - REQUIRE( - series.iterationEncoding() == IterationEncoding::variableBased ); + "../samples/optionsViaJson", Access::CREATE, encodingVariableBased); + REQUIRE(series.backend() == "JSON"); + REQUIRE(series.iterationEncoding() == IterationEncoding::variableBased); } #if openPMD_HAVE_ADIOS2 { @@ -899,10 +1064,9 @@ TEST_CASE( "backend_via_json", "[core]" ) Series series( "../samples/optionsViaJson.bp", Access::CREATE, - encodingVariableBased ); - REQUIRE( series.backend() == "JSON" ); - REQUIRE( - series.iterationEncoding() == IterationEncoding::variableBased ); + encodingVariableBased); + REQUIRE(series.backend() == "JSON"); + REQUIRE(series.iterationEncoding() == IterationEncoding::variableBased); } { @@ -912,13 +1076,13 @@ TEST_CASE( "backend_via_json", "[core]" ) Series series( "../samples/optionsViaJsonOverwritesAutomaticDetection.sst", Access::CREATE, - R"({"adios2": {"engine": {"type": "bp4"}}})" ); + R"({"adios2": {"engine": {"type": "bp4"}}})"); } - REQUIRE( auxiliary::directory_exists( - "../samples/optionsViaJsonOverwritesAutomaticDetection.bp" ) ); + REQUIRE(auxiliary::directory_exists( + "../samples/optionsViaJsonOverwritesAutomaticDetection.bp")); #if openPMD_HAVE_ADIOS1 - setenv( "OPENPMD_BP_BACKEND", "ADIOS1", 1 ); + setenv("OPENPMD_BP_BACKEND", "ADIOS1", 1); { /* * ADIOS2 backend should be selected even if OPENPMD_BP_BACKEND is set @@ -928,13 +1092,13 @@ TEST_CASE( "backend_via_json", "[core]" ) Series series( "../samples/optionsPreferJsonOverEnvVar.bp", Access::CREATE, - R"({"backend": "ADIOS2"})" ); - REQUIRE( series.backend() == "ADIOS2" ); + R"({"backend": "ADIOS2"})"); + REQUIRE(series.backend() == "ADIOS2"); } // unset again - unsetenv( "OPENPMD_BP_BACKEND" ); - REQUIRE( auxiliary::directory_exists( - "../samples/optionsPreferJsonOverEnvVar.bp" ) ); + unsetenv("OPENPMD_BP_BACKEND"); + REQUIRE(auxiliary::directory_exists( + "../samples/optionsPreferJsonOverEnvVar.bp")); #endif #endif std::string encodingFileBased = @@ -945,22 +1109,21 @@ TEST_CASE( "backend_via_json", "[core]" ) * pattern is detected in the filename. */ REQUIRE_THROWS_AS( - [ & ]() { + [&]() { Series series( "../samples/optionsViaJson", Access::CREATE, - encodingFileBased ); + encodingFileBased); }(), - error::WrongAPIUsage ); + error::WrongAPIUsage); } { /* - * ... but specifying both the pattern and the option in JSON should work. + * ... but specifying both the pattern and the option in JSON should + * work. */ Series series( - "../samples/optionsViaJson%06T", - Access::CREATE, - encodingFileBased ); + "../samples/optionsViaJson%06T", Access::CREATE, encodingFileBased); series.iterations[1456]; } std::string encodingGroupBased = @@ -974,166 +1137,162 @@ TEST_CASE( "backend_via_json", "[core]" ) Series series( "../samples/optionsViaJsonPseudoFilebased%T.json", Access::CREATE, - encodingGroupBased ); - REQUIRE( series.iterationEncoding() == IterationEncoding::groupBased ); + encodingGroupBased); + REQUIRE(series.iterationEncoding() == IterationEncoding::groupBased); } - REQUIRE( auxiliary::file_exists( - "../samples/optionsViaJsonPseudoFilebased%T.json" ) ); + REQUIRE(auxiliary::file_exists( + "../samples/optionsViaJsonPseudoFilebased%T.json")); } -TEST_CASE( "custom_geometries", "[core]" ) +TEST_CASE("custom_geometries", "[core]") { - std::vector< int > sampleData( 10, 0 ); + std::vector sampleData(10, 0); { - Series write( "../samples/custom_geometry.json", Access::CREATE ); - auto E = write.iterations[ 0 ].meshes[ "E" ]; - E.setAttribute( "geometry", "other:customGeometry" ); - auto E_x = E[ "x" ]; - E_x.resetDataset( { Datatype::INT, { 10 } } ); - E_x.storeChunk( sampleData, { 0 }, { 10 } ); - - auto B = write.iterations[ 0 ].meshes[ "B" ]; - B.setGeometry( "customGeometry" ); - auto B_x = B[ "x" ]; - B_x.resetDataset( { Datatype::INT, { 10 } } ); - B_x.storeChunk( sampleData, { 0 }, { 10 } ); - - auto e_energyDensity = - write.iterations[ 0 ].meshes[ "e_energyDensity" ]; - e_energyDensity.setGeometry( "other:customGeometry" ); - auto e_energyDensity_x = e_energyDensity[ RecordComponent::SCALAR ]; - e_energyDensity_x.resetDataset( { Datatype::INT, { 10 } } ); - e_energyDensity_x.storeChunk( sampleData, { 0 }, { 10 } ); - - auto e_chargeDensity = - write.iterations[ 0 ].meshes[ "e_chargeDensity" ]; - e_chargeDensity.setGeometry( Mesh::Geometry::other ); - auto e_chargeDensity_x = e_chargeDensity[ MeshRecordComponent::SCALAR ]; - e_chargeDensity_x.resetDataset( { Datatype::INT, { 10 } } ); - e_chargeDensity_x.storeChunk( sampleData, { 0 }, { 10 } ); + Series write("../samples/custom_geometry.json", Access::CREATE); + auto E = write.iterations[0].meshes["E"]; + E.setAttribute("geometry", "other:customGeometry"); + auto E_x = E["x"]; + E_x.resetDataset({Datatype::INT, {10}}); + E_x.storeChunk(sampleData, {0}, {10}); + + auto B = write.iterations[0].meshes["B"]; + B.setGeometry("customGeometry"); + auto B_x = B["x"]; + B_x.resetDataset({Datatype::INT, {10}}); + B_x.storeChunk(sampleData, {0}, {10}); + + auto e_energyDensity = write.iterations[0].meshes["e_energyDensity"]; + e_energyDensity.setGeometry("other:customGeometry"); + auto e_energyDensity_x = e_energyDensity[RecordComponent::SCALAR]; + e_energyDensity_x.resetDataset({Datatype::INT, {10}}); + e_energyDensity_x.storeChunk(sampleData, {0}, {10}); + + auto e_chargeDensity = write.iterations[0].meshes["e_chargeDensity"]; + e_chargeDensity.setGeometry(Mesh::Geometry::other); + auto e_chargeDensity_x = e_chargeDensity[MeshRecordComponent::SCALAR]; + e_chargeDensity_x.resetDataset({Datatype::INT, {10}}); + e_chargeDensity_x.storeChunk(sampleData, {0}, {10}); } { - Series read( "../samples/custom_geometry.json", Access::READ_ONLY ); - auto E = read.iterations[ 0 ].meshes[ "E" ]; + Series read("../samples/custom_geometry.json", Access::READ_ONLY); + auto E = read.iterations[0].meshes["E"]; REQUIRE( - E.getAttribute( "geometry" ).get< std::string >() == - "other:customGeometry" ); - REQUIRE( E.geometry() == Mesh::Geometry::other ); - REQUIRE( E.geometryString() == "other:customGeometry" ); + E.getAttribute("geometry").get() == + "other:customGeometry"); + REQUIRE(E.geometry() == Mesh::Geometry::other); + REQUIRE(E.geometryString() == "other:customGeometry"); - auto B = read.iterations[ 0 ].meshes[ "B" ]; + auto B = read.iterations[0].meshes["B"]; REQUIRE( - B.getAttribute( "geometry" ).get< std::string >() == - "other:customGeometry" ); - REQUIRE( B.geometry() == Mesh::Geometry::other ); - REQUIRE( B.geometryString() == "other:customGeometry" ); + B.getAttribute("geometry").get() == + "other:customGeometry"); + REQUIRE(B.geometry() == Mesh::Geometry::other); + REQUIRE(B.geometryString() == "other:customGeometry"); - auto e_energyDensity = read.iterations[ 0 ].meshes[ "e_energyDensity" ]; + auto e_energyDensity = read.iterations[0].meshes["e_energyDensity"]; REQUIRE( - e_energyDensity.getAttribute( "geometry" ).get< std::string >() == - "other:customGeometry" ); - REQUIRE( e_energyDensity.geometry() == Mesh::Geometry::other ); - REQUIRE( e_energyDensity.geometryString() == "other:customGeometry" ); + e_energyDensity.getAttribute("geometry").get() == + "other:customGeometry"); + REQUIRE(e_energyDensity.geometry() == Mesh::Geometry::other); + REQUIRE(e_energyDensity.geometryString() == "other:customGeometry"); - auto e_chargeDensity = read.iterations[ 0 ].meshes[ "e_chargeDensity" ]; + auto e_chargeDensity = read.iterations[0].meshes["e_chargeDensity"]; REQUIRE( - e_chargeDensity.getAttribute( "geometry" ).get< std::string >() == - "other" ); - REQUIRE( e_chargeDensity.geometry() == Mesh::Geometry::other ); - REQUIRE( e_chargeDensity.geometryString() == "other" ); + e_chargeDensity.getAttribute("geometry").get() == + "other"); + REQUIRE(e_chargeDensity.geometry() == Mesh::Geometry::other); + REQUIRE(e_chargeDensity.geometryString() == "other"); } } -TEST_CASE( "load_chunk_wrong_datatype", "[core]" ) +TEST_CASE("load_chunk_wrong_datatype", "[core]") { { - Series write( "../samples/some_float_value.json", Access::CREATE ); - Dataset ds{ Datatype::FLOAT, { 10 } }; - std::vector< float > sampleData( 10, 1234.5 ); - auto rc = - write.iterations[ 0 ].meshes[ "rho" ][ RecordComponent::SCALAR ]; - rc.resetDataset( ds ); - rc.storeChunk( sampleData, { 0 }, { 10 } ); + Series write("../samples/some_float_value.json", Access::CREATE); + Dataset ds{Datatype::FLOAT, {10}}; + std::vector sampleData(10, 1234.5); + auto rc = write.iterations[0].meshes["rho"][RecordComponent::SCALAR]; + rc.resetDataset(ds); + rc.storeChunk(sampleData, {0}, {10}); write.flush(); } { - Series read( "../samples/some_float_value.json", Access::READ_ONLY ); + Series read("../samples/some_float_value.json", Access::READ_ONLY); REQUIRE_THROWS_WITH( - read.iterations[ 0 ] - .meshes[ "rho" ][ RecordComponent::SCALAR ] - .loadChunk< double >( { 0 }, { 10 } ), + read.iterations[0] + .meshes["rho"][RecordComponent::SCALAR] + .loadChunk({0}, {10}), Catch::Equals( - "Type conversion during chunk loading not yet implemented" ) ); + "Type conversion during chunk loading not yet implemented")); } } -TEST_CASE( "DoConvert_single_value_to_vector", "[core]" ) +TEST_CASE("DoConvert_single_value_to_vector", "[core]") { #if openPMD_HAVE_ADIOS2 { - Series write( "../samples/writeSingleMesh.bp", Access::CREATE ); - auto E_x = write.iterations[ 0 ].meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 10 } } ); - E_x.makeConstant( 10 ); + Series write("../samples/writeSingleMesh.bp", Access::CREATE); + auto E_x = write.iterations[0].meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {10}}); + E_x.makeConstant(10); } { - Series read( "../samples/writeSingleMesh.bp", Access::READ_ONLY ); - auto E = read.iterations[ 0 ].meshes[ "E" ]; - REQUIRE( E.axisLabels() == std::vector< std::string >{ "x" } ); + Series read("../samples/writeSingleMesh.bp", Access::READ_ONLY); + auto E = read.iterations[0].meshes["E"]; + REQUIRE(E.axisLabels() == std::vector{"x"}); } #endif { char val = 'x'; - Attribute attr{ val }; + Attribute attr{val}; // the following conversions should be possible - REQUIRE( attr.get< char >() == 'x' ); // no conversion - REQUIRE( attr.get< unsigned char >() == 'x' ); - REQUIRE( attr.get< signed char >() == 'x' ); + REQUIRE(attr.get() == 'x'); // no conversion + REQUIRE(attr.get() == 'x'); + REQUIRE(attr.get() == 'x'); // all the previous ones, but make them single-element vectors now + REQUIRE(attr.get>() == std::vector{'x'}); REQUIRE( - attr.get< std::vector< char > >() == std::vector< char >{ 'x' } ); - REQUIRE( - attr.get< std::vector< unsigned char > >() == - std::vector< unsigned char >{ 'x' } ); + attr.get>() == + std::vector{'x'}); REQUIRE( - attr.get< std::vector< signed char > >() == - std::vector< signed char >{ 'x' } ); + attr.get>() == + std::vector{'x'}); } { - std::array< double, 7 > array{{ 0, 1, 2, 3, 4, 5, 6 }}; - Attribute attr{ array }; + std::array array{{0, 1, 2, 3, 4, 5, 6}}; + Attribute attr{array}; // the following conversions should be possible - REQUIRE( attr.get< std::array< double, 7 > >() == array ); + REQUIRE(attr.get>() == array); // we don't need array-to-array conversions, // so array< int, 7 > cannot be loaded here REQUIRE( - attr.get< std::vector< double > >() == - std::vector< double >{ 0, 1, 2, 3, 4, 5, 6 } ); + attr.get>() == + std::vector{0, 1, 2, 3, 4, 5, 6}); REQUIRE( - attr.get< std::vector< int > >() == - std::vector< int >{ 0, 1, 2, 3, 4, 5, 6 } ); + attr.get>() == + std::vector{0, 1, 2, 3, 4, 5, 6}); } { - std::vector< double > vector{ 0, 1, 2, 3, 4, 5, 6 }; - std::array< double, 7 > arraydouble{{ 0, 1, 2, 3, 4, 5, 6 }}; - std::array< int, 7 > arrayint{{ 0, 1, 2, 3, 4, 5, 6 }}; - Attribute attr{ vector }; + std::vector vector{0, 1, 2, 3, 4, 5, 6}; + std::array arraydouble{{0, 1, 2, 3, 4, 5, 6}}; + std::array arrayint{{0, 1, 2, 3, 4, 5, 6}}; + Attribute attr{vector}; // the following conversions should be possible - REQUIRE( attr.get< std::array< double, 7 > >() == arraydouble ); - REQUIRE( attr.get< std::array< int, 7 > >() == arrayint ); + REQUIRE(attr.get>() == arraydouble); + REQUIRE(attr.get>() == arrayint); REQUIRE_THROWS_WITH( - ( attr.get< std::array< int, 8 > >() ), - Catch::Equals( "getCast: no vector to array conversion possible " - "(wrong requested array size)." ) ); + (attr.get>()), + Catch::Equals("getCast: no vector to array conversion possible " + "(wrong requested array size).")); REQUIRE( - attr.get< std::vector< double > >() == - std::vector< double >{ 0, 1, 2, 3, 4, 5, 6 } ); + attr.get>() == + std::vector{0, 1, 2, 3, 4, 5, 6}); REQUIRE( - attr.get< std::vector< int > >() == - std::vector< int >{ 0, 1, 2, 3, 4, 5, 6 } ); + attr.get>() == + std::vector{0, 1, 2, 3, 4, 5, 6}); } } diff --git a/test/JSONTest.cpp b/test/JSONTest.cpp index 268c92368f..686306700e 100644 --- a/test/JSONTest.cpp +++ b/test/JSONTest.cpp @@ -5,10 +5,9 @@ #include - using namespace openPMD; -TEST_CASE( "json_parsing", "[auxiliary]" ) +TEST_CASE("json_parsing", "[auxiliary]") { std::string wrongValue = R"END( { @@ -18,10 +17,10 @@ TEST_CASE( "json_parsing", "[auxiliary]" ) } })END"; REQUIRE_THROWS_WITH( - json::parseOptions( wrongValue, false ), + json::parseOptions(wrongValue, false), error::BackendConfigSchema( - { "adios2", "duplicate key" }, "JSON config: duplicate keys." ) - .what() ); + {"adios2", "duplicate key"}, "JSON config: duplicate keys.") + .what()); std::string same1 = R"( { "ADIOS2": { @@ -53,12 +52,12 @@ TEST_CASE( "json_parsing", "[auxiliary]" ) } })"; REQUIRE( - json::parseOptions( same1, false ).config.dump() == - json::parseOptions( same2, false ).config.dump() ); + json::parseOptions(same1, false).config.dump() == + json::parseOptions(same2, false).config.dump()); // Only keys should be transformed to lower case, values must stay the same REQUIRE( - json::parseOptions( same1, false ).config.dump() != - json::parseOptions( different, false ).config.dump() ); + json::parseOptions(same1, false).config.dump() != + json::parseOptions(different, false).config.dump()); // Keys forwarded to ADIOS2 should remain untouched std::string upper = R"END( @@ -113,14 +112,14 @@ TEST_CASE( "json_parsing", "[auxiliary]" ) } } )END"; - nlohmann::json jsonUpper = nlohmann::json::parse( upper ); - nlohmann::json jsonLower = nlohmann::json::parse( lower ); - REQUIRE( jsonUpper.dump() != jsonLower.dump() ); - json::lowerCase( jsonUpper ); - REQUIRE( jsonUpper.dump() == jsonLower.dump() ); + nlohmann::json jsonUpper = nlohmann::json::parse(upper); + nlohmann::json jsonLower = nlohmann::json::parse(lower); + REQUIRE(jsonUpper.dump() != jsonLower.dump()); + json::lowerCase(jsonUpper); + REQUIRE(jsonUpper.dump() == jsonLower.dump()); } -TEST_CASE( "json_merging", "auxiliary" ) +TEST_CASE("json_merging", "auxiliary") { std::string defaultVal = R"END( { @@ -170,6 +169,6 @@ TEST_CASE( "json_merging", "auxiliary" ) ] })END"; REQUIRE( - json::merge( defaultVal, overwrite ) == - json::parseOptions( expect, false ).config.dump() ); + json::merge(defaultVal, overwrite) == + json::parseOptions(expect, false).config.dump()); } diff --git a/test/ParallelIOTest.cpp b/test/ParallelIOTest.cpp index f33fbdc4e8..4f9c2acb3c 100644 --- a/test/ParallelIOTest.cpp +++ b/test/ParallelIOTest.cpp @@ -7,24 +7,25 @@ #include #if openPMD_HAVE_MPI -# include - -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include using namespace openPMD; -std::vector getBackends() { +std::vector getBackends() +{ // first component: backend file ending // second component: whether to test 128 bit values std::vector res; @@ -41,61 +42,72 @@ auto const backends = getBackends(); #else -TEST_CASE( "none", "[parallel]" ) -{ } +TEST_CASE("none", "[parallel]") +{} #endif #if openPMD_HAVE_MPI -TEST_CASE( "parallel_multi_series_test", "[parallel]" ) +TEST_CASE("parallel_multi_series_test", "[parallel]") { - std::list< Series > allSeries; + std::list allSeries; auto myBackends = getBackends(); - // this test demonstrates an ADIOS1 (upstream) bug, comment this section to trigger it - auto const rmEnd = std::remove_if( myBackends.begin(), myBackends.end(), [](std::string const & beit) { - return beit == "bp" && - determineFormat("test.bp") == Format::ADIOS1; - }); + // this test demonstrates an ADIOS1 (upstream) bug, comment this section to + // trigger it + auto const rmEnd = std::remove_if( + myBackends.begin(), myBackends.end(), [](std::string const &beit) { + return beit == "bp" && determineFormat("test.bp") == Format::ADIOS1; + }); myBackends.erase(rmEnd, myBackends.end()); // have multiple serial series alive at the same time - for (auto const sn : {1, 2, 3}) { - for (auto const & t: myBackends) + for (auto const sn : {1, 2, 3}) + { + for (auto const &t : myBackends) { auto const file_ending = t; std::cout << file_ending << std::endl; allSeries.emplace_back( - std::string("../samples/parallel_multi_open_test_"). - append(std::to_string(sn)).append(".").append(file_ending), - Access::CREATE, - MPI_COMM_WORLD - ); + std::string("../samples/parallel_multi_open_test_") + .append(std::to_string(sn)) + .append(".") + .append(file_ending), + Access::CREATE, + MPI_COMM_WORLD); allSeries.back().iterations[sn].setAttribute("wululu", sn); allSeries.back().flush(); } } // skip some series: sn=1 auto it = allSeries.begin(); - std::for_each( myBackends.begin(), myBackends.end(), [&it](std::string const &){ - it++; - }); + std::for_each( + myBackends.begin(), myBackends.end(), [&it](std::string const &) { + it++; + }); // remove some series: sn=2 - std::for_each( myBackends.begin(), myBackends.end(), [&it, &allSeries](std::string const &){ - it = allSeries.erase(it); - }); + std::for_each( + myBackends.begin(), + myBackends.end(), + [&it, &allSeries](std::string const &) { it = allSeries.erase(it); }); // write from last series: sn=3 - std::for_each( myBackends.begin(), myBackends.end(), [&it](std::string const &){ - it->iterations[10].setAttribute("wululu", 10); - it->flush(); - it++; - }); + std::for_each( + myBackends.begin(), myBackends.end(), [&it](std::string const &) { + it->iterations[10].setAttribute("wululu", 10); + it->flush(); + it++; + }); // remove all leftover series allSeries.clear(); } -void write_test_zero_extent( bool fileBased, std::string file_ending, bool writeAllChunks, bool declareFromAll ) { +void write_test_zero_extent( + bool fileBased, + std::string file_ending, + bool writeAllChunks, + bool declareFromAll) +{ int mpi_s{-1}; int mpi_r{-1}; MPI_Comm_size(MPI_COMM_WORLD, &mpi_s); @@ -104,17 +116,22 @@ void write_test_zero_extent( bool fileBased, std::string file_ending, bool write auto rank = static_cast(mpi_r); std::string filePath = "../samples/parallel_write_zero_extent"; - if( fileBased ) + if (fileBased) filePath += "_%07T"; - Series o = Series(filePath.append(".").append(file_ending), Access::CREATE, MPI_COMM_WORLD); + Series o = Series( + filePath.append(".").append(file_ending), + Access::CREATE, + MPI_COMM_WORLD); int const max_step = 100; - for( int step=0; step<=max_step; step+=20 ) { + for (int step = 0; step <= max_step; step += 20) + { Iteration it = o.iterations[step]; it.setAttribute("yolo", "yo"); - if( rank != 0 || declareFromAll ) { + if (rank != 0 || declareFromAll) + { ParticleSpecies e = it.particles["e"]; /* every rank n writes n consecutive cells, increasing values @@ -122,102 +139,146 @@ void write_test_zero_extent( bool fileBased, std::string file_ending, bool write * two ranks will result in {1} * three ranks will result in {1, 2, 3} * four ranks will result in {1, 2, 3, 4, 5, 6} */ - uint64_t num_cells = ((size - 1) * (size - 1) + (size - 1)) / 2; /* (n^2 + n) / 2 */ - if (num_cells == 0u) { - std::cerr << "Test can only be run with at least two ranks" << std::endl; + uint64_t num_cells = + ((size - 1) * (size - 1) + (size - 1)) / 2; /* (n^2 + n) / 2 */ + if (num_cells == 0u) + { + std::cerr << "Test can only be run with at least two ranks" + << std::endl; return; } std::vector position_global(num_cells); double pos{1.}; - std::generate(position_global.begin(), position_global.end(), [&pos] { return pos++; }); - std::shared_ptr position_local(new double[rank], [](double const *p) { delete[] p; }); + std::generate( + position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local( + new double[rank], [](double const *p) { delete[] p; }); uint64_t offset; if (rank != 0) offset = ((rank - 1) * (rank - 1) + (rank - 1)) / 2; else offset = 0; - e["position"]["x"].resetDataset(Dataset(determineDatatype(position_local), {num_cells})); + e["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local), {num_cells})); std::vector positionOffset_global(num_cells); uint64_t posOff{1}; - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff] { return posOff++; }); - std::shared_ptr positionOffset_local(new uint64_t[rank], [](uint64_t const *p) { delete[] p; }); + std::generate( + positionOffset_global.begin(), + positionOffset_global.end(), + [&posOff] { return posOff++; }); + std::shared_ptr positionOffset_local( + new uint64_t[rank], [](uint64_t const *p) { delete[] p; }); - e["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local), {num_cells})); + e["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local), {num_cells})); - for (uint64_t i = 0; i < rank; ++i) { + for (uint64_t i = 0; i < rank; ++i) + { position_local.get()[i] = position_global[offset + i]; - positionOffset_local.get()[i] = positionOffset_global[offset + i]; + positionOffset_local.get()[i] = + positionOffset_global[offset + i]; } - if (rank != 0 || writeAllChunks) { + if (rank != 0 || writeAllChunks) + { e["position"]["x"].storeChunk(position_local, {offset}, {rank}); - e["positionOffset"]["x"].storeChunk(positionOffset_local, {offset}, {rank}); + e["positionOffset"]["x"].storeChunk( + positionOffset_local, {offset}, {rank}); } } o.flush(); } - //TODO read back, verify + // TODO read back, verify } #endif #if openPMD_HAVE_HDF5 && openPMD_HAVE_MPI -TEST_CASE( "git_hdf5_sample_content_test", "[parallel][hdf5]" ) +TEST_CASE("git_hdf5_sample_content_test", "[parallel][hdf5]") { int mpi_rank{-1}; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - /* only a 3x3x3 chunk of the actual data is hardcoded. every worker reads 1/3 */ + /* only a 3x3x3 chunk of the actual data is hardcoded. every worker reads + * 1/3 */ uint64_t rank = mpi_rank % 3; try { - Series o = Series("../samples/git-sample/data00000%T.h5", Access::READ_ONLY, MPI_COMM_WORLD); + Series o = Series( + "../samples/git-sample/data00000%T.h5", + Access::READ_ONLY, + MPI_COMM_WORLD); { - double actual[3][3][3] = {{{-1.9080703683727052e-09, -1.5632650729457964e-10, 1.1497536256399599e-09}, - {-1.9979540244463578e-09, -2.5512036927466397e-10, 1.0402234629225404e-09}, - {-1.7353589676361025e-09, -8.0899198451334087e-10, -1.6443779671249104e-10}}, - - {{-2.0029988778702545e-09, -1.9543477947081556e-10, 1.0916454407094989e-09}, - {-2.3890367462087170e-09, -4.7158010829662089e-10, 9.0026075483251589e-10}, - {-1.9033881137886510e-09, -7.5192119197708962e-10, 5.0038861942880430e-10}}, - - {{-1.3271805876513554e-09, -5.9243276950837753e-10, -2.2445734160214670e-10}, - {-7.4578609954301101e-10, -1.1995737736469891e-10, 2.5611823772919706e-10}, - {-9.4806251738077663e-10, -1.5472800818372434e-10, -3.6461900165818406e-10}}}; - MeshRecordComponent& rho = o.iterations[100].meshes["rho"][MeshRecordComponent::SCALAR]; + double actual[3][3][3] = { + {{-1.9080703683727052e-09, + -1.5632650729457964e-10, + 1.1497536256399599e-09}, + {-1.9979540244463578e-09, + -2.5512036927466397e-10, + 1.0402234629225404e-09}, + {-1.7353589676361025e-09, + -8.0899198451334087e-10, + -1.6443779671249104e-10}}, + + {{-2.0029988778702545e-09, + -1.9543477947081556e-10, + 1.0916454407094989e-09}, + {-2.3890367462087170e-09, + -4.7158010829662089e-10, + 9.0026075483251589e-10}, + {-1.9033881137886510e-09, + -7.5192119197708962e-10, + 5.0038861942880430e-10}}, + + {{-1.3271805876513554e-09, + -5.9243276950837753e-10, + -2.2445734160214670e-10}, + {-7.4578609954301101e-10, + -1.1995737736469891e-10, + 2.5611823772919706e-10}, + {-9.4806251738077663e-10, + -1.5472800818372434e-10, + -3.6461900165818406e-10}}}; + MeshRecordComponent &rho = + o.iterations[100].meshes["rho"][MeshRecordComponent::SCALAR]; Offset offset{20 + rank, 20, 190}; Extent extent{1, 3, 3}; auto data = rho.loadChunk(offset, extent); o.flush(); - double* raw_ptr = data.get(); + double *raw_ptr = data.get(); - for( int j = 0; j < 3; ++j ) - for( int k = 0; k < 3; ++k ) - REQUIRE(raw_ptr[j*3 + k] == actual[rank][j][k]); + for (int j = 0; j < 3; ++j) + for (int k = 0; k < 3; ++k) + REQUIRE(raw_ptr[j * 3 + k] == actual[rank][j][k]); } { double constant_value = 9.1093829099999999e-31; - RecordComponent& electrons_mass = o.iterations[100].particles["electrons"]["mass"][RecordComponent::SCALAR]; - Offset offset{(rank+1) * 5}; + RecordComponent &electrons_mass = + o.iterations[100] + .particles["electrons"]["mass"][RecordComponent::SCALAR]; + Offset offset{(rank + 1) * 5}; Extent extent{3}; auto data = electrons_mass.loadChunk(offset, extent); o.flush(); - double* raw_ptr = data.get(); + double *raw_ptr = data.get(); - for( int i = 0; i < 3; ++i ) + for (int i = 0; i < 3; ++i) REQUIRE(raw_ptr[i] == constant_value); } - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; } } -TEST_CASE( "hdf5_write_test", "[parallel][hdf5]" ) +TEST_CASE("hdf5_write_test", "[parallel][hdf5]") { int mpi_s{-1}; int mpi_r{-1}; @@ -225,60 +286,70 @@ TEST_CASE( "hdf5_write_test", "[parallel][hdf5]" ) MPI_Comm_rank(MPI_COMM_WORLD, &mpi_r); auto mpi_size = static_cast(mpi_s); auto mpi_rank = static_cast(mpi_r); - Series o = Series("../samples/parallel_write.h5", Access::CREATE, MPI_COMM_WORLD); + Series o = + Series("../samples/parallel_write.h5", Access::CREATE, MPI_COMM_WORLD); REQUIRE_THROWS_AS(o.setAuthor(""), std::runtime_error); o.setAuthor("Parallel HDF5"); - ParticleSpecies& e = o.iterations[1].particles["e"]; + ParticleSpecies &e = o.iterations[1].particles["e"]; - std::vector< double > position_global(mpi_size); + std::vector position_global(mpi_size); double pos{0.}; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - std::shared_ptr< double > position_local(new double); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local(new double); *position_local = position_global[mpi_rank]; - e["position"]["x"].resetDataset(Dataset(determineDatatype(position_local), {mpi_size})); + e["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local), {mpi_size})); e["position"]["x"].storeChunk(position_local, {mpi_rank}, {1}); - std::vector< uint64_t > positionOffset_global(mpi_size); + std::vector positionOffset_global(mpi_size); uint64_t posOff{0}; - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local(new uint64_t); + std::generate( + positionOffset_global.begin(), positionOffset_global.end(), [&posOff] { + return posOff++; + }); + std::shared_ptr positionOffset_local(new uint64_t); *positionOffset_local = positionOffset_global[mpi_rank]; - e["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local), {mpi_size})); + e["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local), {mpi_size})); e["positionOffset"]["x"].storeChunk(positionOffset_local, {mpi_rank}, {1}); o.flush(); } -TEST_CASE( "hdf5_write_test_zero_extent", "[parallel][hdf5]" ) +TEST_CASE("hdf5_write_test_zero_extent", "[parallel][hdf5]") { - write_test_zero_extent( false, "h5", true, true ); - write_test_zero_extent( true, "h5", true, true ); + write_test_zero_extent(false, "h5", true, true); + write_test_zero_extent(true, "h5", true, true); } -TEST_CASE( "hdf5_write_test_skip_chunk", "[parallel][hdf5]" ) +TEST_CASE("hdf5_write_test_skip_chunk", "[parallel][hdf5]") { //! @todo add via JSON option instead of environment read - auto const hdf5_collective = auxiliary::getEnvString( "OPENPMD_HDF5_INDEPENDENT", "ON" ); - if( hdf5_collective == "ON" ) + auto const hdf5_collective = + auxiliary::getEnvString("OPENPMD_HDF5_INDEPENDENT", "ON"); + if (hdf5_collective == "ON") { - write_test_zero_extent( false, "h5", false, true ); - write_test_zero_extent( true, "h5", false, true ); + write_test_zero_extent(false, "h5", false, true); + write_test_zero_extent(true, "h5", false, true); } else REQUIRE(true); } -TEST_CASE( "hdf5_write_test_skip_declare", "[parallel][hdf5]" ) +TEST_CASE("hdf5_write_test_skip_declare", "[parallel][hdf5]") { //! @todo add via JSON option instead of environment read - auto const hdf5_collective = auxiliary::getEnvString( "OPENPMD_HDF5_INDEPENDENT", "OFF" ); - if( hdf5_collective == "ON" ) + auto const hdf5_collective = + auxiliary::getEnvString("OPENPMD_HDF5_INDEPENDENT", "OFF"); + if (hdf5_collective == "ON") { - write_test_zero_extent( false, "h5", false, false ); - write_test_zero_extent( true, "h5", false, false ); + write_test_zero_extent(false, "h5", false, false); + write_test_zero_extent(true, "h5", false, false); } else REQUIRE(true); @@ -286,7 +357,7 @@ TEST_CASE( "hdf5_write_test_skip_declare", "[parallel][hdf5]" ) #else -TEST_CASE( "no_parallel_hdf5", "[parallel][hdf5]" ) +TEST_CASE("no_parallel_hdf5", "[parallel][hdf5]") { REQUIRE(true); } @@ -295,14 +366,13 @@ TEST_CASE( "no_parallel_hdf5", "[parallel][hdf5]" ) // this one works for both ADIOS1 and ADIOS2 #if (openPMD_HAVE_ADIOS1 || openPMD_HAVE_ADIOS2) && openPMD_HAVE_MPI -void -available_chunks_test( std::string file_ending ) +void available_chunks_test(std::string file_ending) { - int r_mpi_rank{ -1 }, r_mpi_size{ -1 }; - MPI_Comm_rank( MPI_COMM_WORLD, &r_mpi_rank ); - MPI_Comm_size( MPI_COMM_WORLD, &r_mpi_size ); - unsigned mpi_rank{ static_cast< unsigned >( r_mpi_rank ) }, - mpi_size{ static_cast< unsigned >( r_mpi_size ) }; + int r_mpi_rank{-1}, r_mpi_size{-1}; + MPI_Comm_rank(MPI_COMM_WORLD, &r_mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &r_mpi_size); + unsigned mpi_rank{static_cast(r_mpi_rank)}, + mpi_size{static_cast(r_mpi_size)}; std::string name = "../samples/available_chunks." + file_ending; /* @@ -320,126 +390,125 @@ available_chunks_test( std::string file_ending ) "parameters": { "NumAggregators":)END" - << "\"" << std::to_string(mpi_size) << "\"" << R"END( + << "\"" << std::to_string(mpi_size) << "\"" + << R"END( } } } } )END"; - std::vector< int > data{ 2, 4, 6, 8 }; + std::vector data{2, 4, 6, 8}; { - Series write( name, Access::CREATE, MPI_COMM_WORLD, parameters.str() ); - Iteration it0 = write.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { mpi_size, 4 } } ); - E_x.storeChunk( data, { mpi_rank, 0 }, { 1, 4 } ); + Series write(name, Access::CREATE, MPI_COMM_WORLD, parameters.str()); + Iteration it0 = write.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {mpi_size, 4}}); + E_x.storeChunk(data, {mpi_rank, 0}, {1, 4}); it0.close(); } { - Series read( name, Access::READ_ONLY, MPI_COMM_WORLD ); - Iteration it0 = read.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; + Series read(name, Access::READ_ONLY, MPI_COMM_WORLD); + Iteration it0 = read.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; ChunkTable table = E_x.availableChunks(); std::sort( - table.begin(), - table.end(), - []( auto const & lhs, auto const & rhs ) { - return lhs.offset[ 0 ] < rhs.offset[ 0 ]; - } ); - std::vector< int > ranks; - ranks.reserve( table.size() ); - for( size_t i = 0; i < ranks.size(); ++i ) + table.begin(), table.end(), [](auto const &lhs, auto const &rhs) { + return lhs.offset[0] < rhs.offset[0]; + }); + std::vector ranks; + ranks.reserve(table.size()); + for (size_t i = 0; i < ranks.size(); ++i) { - WrittenChunkInfo const & chunk = table[ i ]; - REQUIRE( chunk.offset == Offset{ i, 0 } ); - REQUIRE( chunk.extent == Extent{ 1, 4 } ); - ranks.emplace_back( chunk.sourceID ); + WrittenChunkInfo const &chunk = table[i]; + REQUIRE(chunk.offset == Offset{i, 0}); + REQUIRE(chunk.extent == Extent{1, 4}); + ranks.emplace_back(chunk.sourceID); } /* * In the BP4 engine, sourceID corresponds with the BP subfile. * Since those are in a nondeterministic order, simply check that * they are all present. */ - std::sort( ranks.begin(), ranks.end() ); - for( int i = 0; i < int(ranks.size()); ++i ) + std::sort(ranks.begin(), ranks.end()); + for (int i = 0; i < int(ranks.size()); ++i) { - REQUIRE( ranks[ i ] == i ); + REQUIRE(ranks[i] == i); } } } -TEST_CASE( "available_chunks_test", "[parallel][adios]" ) +TEST_CASE("available_chunks_test", "[parallel][adios]") { - available_chunks_test( "bp" ); + available_chunks_test("bp"); } #endif #if openPMD_HAVE_ADIOS2 && openPMD_HAVE_MPI -void -extendDataset( std::string const & ext, std::string const & jsonConfig ) +void extendDataset(std::string const &ext, std::string const &jsonConfig) { std::string filename = "../samples/parallelExtendDataset." + ext; - int r_mpi_rank{ -1 }, r_mpi_size{ -1 }; - MPI_Comm_rank( MPI_COMM_WORLD, &r_mpi_rank ); - MPI_Comm_size( MPI_COMM_WORLD, &r_mpi_size ); - unsigned mpi_rank{ static_cast< unsigned >( r_mpi_rank ) }, - mpi_size{ static_cast< unsigned >( r_mpi_size ) }; - std::vector< int > data1( 25 ); - std::vector< int > data2( 25 ); - std::iota( data1.begin(), data1.end(), 0 ); - std::iota( data2.begin(), data2.end(), 25 ); + int r_mpi_rank{-1}, r_mpi_size{-1}; + MPI_Comm_rank(MPI_COMM_WORLD, &r_mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &r_mpi_size); + unsigned mpi_rank{static_cast(r_mpi_rank)}, + mpi_size{static_cast(r_mpi_size)}; + std::vector data1(25); + std::vector data2(25); + std::iota(data1.begin(), data1.end(), 0); + std::iota(data2.begin(), data2.end(), 25); { - Series write( filename, Access::CREATE, MPI_COMM_WORLD, jsonConfig ); - if( ext == "bp" && write.backend() != "ADIOS2" ) + Series write(filename, Access::CREATE, MPI_COMM_WORLD, jsonConfig); + if (ext == "bp" && write.backend() != "ADIOS2") { // dataset resizing unsupported in ADIOS1 return; } - Dataset ds1{ Datatype::INT, { mpi_size, 25 } }; - Dataset ds2{ { mpi_size, 50 } }; + Dataset ds1{Datatype::INT, {mpi_size, 25}}; + Dataset ds2{{mpi_size, 50}}; // array record component -> array record component // should work - auto E_x = write.iterations[ 0 ].meshes[ "E" ][ "x" ]; - E_x.resetDataset( ds1 ); - E_x.storeChunk( data1, { mpi_rank, 0 }, { 1, 25 } ); + auto E_x = write.iterations[0].meshes["E"]["x"]; + E_x.resetDataset(ds1); + E_x.storeChunk(data1, {mpi_rank, 0}, {1, 25}); write.flush(); - E_x.resetDataset( ds2 ); - E_x.storeChunk( data2, { mpi_rank, 25 }, { 1, 25 } ); + E_x.resetDataset(ds2); + E_x.storeChunk(data2, {mpi_rank, 25}, {1, 25}); write.flush(); } - MPI_Barrier( MPI_COMM_WORLD ); + MPI_Barrier(MPI_COMM_WORLD); { - Series read( filename, Access::READ_ONLY, jsonConfig ); - auto E_x = read.iterations[ 0 ].meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getExtent() == Extent{ mpi_size, 50 } ); - auto chunk = E_x.loadChunk< int >( { 0, 0 }, { mpi_size, 50 } ); + Series read(filename, Access::READ_ONLY, jsonConfig); + auto E_x = read.iterations[0].meshes["E"]["x"]; + REQUIRE(E_x.getExtent() == Extent{mpi_size, 50}); + auto chunk = E_x.loadChunk({0, 0}, {mpi_size, 50}); read.flush(); - for( size_t rank = 0; rank < mpi_size; ++rank ) + for (size_t rank = 0; rank < mpi_size; ++rank) { - for( size_t i = 0; i < 50; ++i ) + for (size_t i = 0; i < 50; ++i) { - REQUIRE( chunk.get()[ i ] == int( i ) ); + REQUIRE(chunk.get()[i] == int(i)); } } } } -TEST_CASE( "extend_dataset", "[parallel]" ) +TEST_CASE("extend_dataset", "[parallel]") { - extendDataset( "bp", R"({"backend": "adios2"})" ); + extendDataset("bp", R"({"backend": "adios2"})"); } #endif #if openPMD_HAVE_ADIOS1 && openPMD_HAVE_MPI -TEST_CASE( "adios_write_test", "[parallel][adios]" ) +TEST_CASE("adios_write_test", "[parallel][adios]") { - Series o = Series("../samples/parallel_write.bp", Access::CREATE, MPI_COMM_WORLD); + Series o = + Series("../samples/parallel_write.bp", Access::CREATE, MPI_COMM_WORLD); int size{-1}; int rank{-1}; @@ -449,83 +518,96 @@ TEST_CASE( "adios_write_test", "[parallel][adios]" ) auto mpi_rank = static_cast(rank); o.setAuthor("Parallel ADIOS1"); - ParticleSpecies& e = o.iterations[1].particles["e"]; + ParticleSpecies &e = o.iterations[1].particles["e"]; - std::vector< double > position_global(mpi_size); + std::vector position_global(mpi_size); double pos{0.}; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - std::shared_ptr< double > position_local(new double); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local(new double); *position_local = position_global[mpi_rank]; - e["position"]["x"].resetDataset(Dataset(determineDatatype(position_local), {mpi_size})); + e["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local), {mpi_size})); e["position"]["x"].storeChunk(position_local, {mpi_rank}, {1}); - std::vector< uint64_t > positionOffset_global(mpi_size); + std::vector positionOffset_global(mpi_size); uint64_t posOff{0}; - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local(new uint64_t); + std::generate( + positionOffset_global.begin(), positionOffset_global.end(), [&posOff] { + return posOff++; + }); + std::shared_ptr positionOffset_local(new uint64_t); *positionOffset_local = positionOffset_global[mpi_rank]; - e["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local), {mpi_size})); + e["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local), {mpi_size})); e["positionOffset"]["x"].storeChunk(positionOffset_local, {mpi_rank}, {1}); o.flush(); } -TEST_CASE( "adios_write_test_zero_extent", "[parallel][adios]" ) +TEST_CASE("adios_write_test_zero_extent", "[parallel][adios]") { - write_test_zero_extent( false, "bp", true, true ); - write_test_zero_extent( true, "bp", true, true ); + write_test_zero_extent(false, "bp", true, true); + write_test_zero_extent(true, "bp", true, true); } -TEST_CASE( "adios_write_test_skip_chunk", "[parallel][adios]" ) +TEST_CASE("adios_write_test_skip_chunk", "[parallel][adios]") { - write_test_zero_extent( false, "bp", false, true ); - write_test_zero_extent( true, "bp", false, true ); + write_test_zero_extent(false, "bp", false, true); + write_test_zero_extent(true, "bp", false, true); } -TEST_CASE( "adios_write_test_skip_declare", "[parallel][adios]" ) +TEST_CASE("adios_write_test_skip_declare", "[parallel][adios]") { - write_test_zero_extent( false, "bp", false, false ); - write_test_zero_extent( true, "bp", false, false ); + write_test_zero_extent(false, "bp", false, false); + write_test_zero_extent(true, "bp", false, false); } -TEST_CASE( "hzdr_adios_sample_content_test", "[parallel][adios1]" ) +TEST_CASE("hzdr_adios_sample_content_test", "[parallel][adios1]") { int mpi_rank{-1}; MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); - /* only a 3x3x3 chunk of the actual data is hardcoded. every worker reads 1/3 */ + /* only a 3x3x3 chunk of the actual data is hardcoded. every worker reads + * 1/3 */ uint64_t rank = mpi_rank % 3; try { /* development/huebl/lwfa-bgfield-001 */ - Series o = Series("../samples/hzdr-sample/bp/checkpoint_%T.bp", Access::READ_ONLY, MPI_COMM_WORLD); + Series o = Series( + "../samples/hzdr-sample/bp/checkpoint_%T.bp", + Access::READ_ONLY, + MPI_COMM_WORLD); - if( o.iterations.count(0) == 1) + if (o.iterations.count(0) == 1) { - float actual[3][3][3] = {{{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, - {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, - {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, - {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, - {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, - {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, - {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, - {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, - {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}}; - - MeshRecordComponent& B_z = o.iterations[0].meshes["B"]["z"]; + float actual[3][3][3] = { + {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, + {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, + {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, + {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, + {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, + {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, + {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, + {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, + {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}}; + + MeshRecordComponent &B_z = o.iterations[0].meshes["B"]["z"]; Offset offset{20 + rank, 20, 150}; Extent extent{1, 3, 3}; auto data = B_z.loadChunk(offset, extent); o.flush(); - float* raw_ptr = data.get(); + float *raw_ptr = data.get(); - for( int j = 0; j < 3; ++j ) - for( int k = 0; k < 3; ++k ) - REQUIRE(raw_ptr[j*3 + k] == actual[rank][j][k]); + for (int j = 0; j < 3; ++j) + for (int k = 0; k < 3; ++k) + REQUIRE(raw_ptr[j * 3 + k] == actual[rank][j][k]); } - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; @@ -534,8 +616,7 @@ TEST_CASE( "hzdr_adios_sample_content_test", "[parallel][adios1]" ) #endif #if openPMD_HAVE_MPI -void -write_4D_test( std::string file_ending ) +void write_4D_test(std::string file_ending) { int mpi_s{-1}; int mpi_r{-1}; @@ -547,30 +628,29 @@ write_4D_test( std::string file_ending ) Series o = Series(name, Access::CREATE, MPI_COMM_WORLD); auto it = o.iterations[1]; - auto E_x = it.meshes[ "E" ][ "x" ]; + auto E_x = it.meshes["E"]["x"]; // every rank out of mpi_size MPI ranks contributes two writes: // - sliced in first dimension (partioned by rank) // - last dimension: every rank has two chunks to contribute - std::vector< double > data( 2 * 10 * 6 * 4, mpi_rank); + std::vector data(2 * 10 * 6 * 4, mpi_rank); - E_x.resetDataset( { Datatype::DOUBLE, { mpi_size * 2, 10, 6, 8 } } ); - E_x.storeChunk( data, { mpi_rank * 2, 0, 0, 0 }, { 2, 10, 6, 4 } ); - E_x.storeChunk( data, { mpi_rank * 2, 0, 0, 4 }, { 2, 10, 6, 4 } ); + E_x.resetDataset({Datatype::DOUBLE, {mpi_size * 2, 10, 6, 8}}); + E_x.storeChunk(data, {mpi_rank * 2, 0, 0, 0}, {2, 10, 6, 4}); + E_x.storeChunk(data, {mpi_rank * 2, 0, 0, 4}, {2, 10, 6, 4}); o.flush(); } -TEST_CASE( "write_4D_test", "[parallel]" ) +TEST_CASE("write_4D_test", "[parallel]") { - for( auto const & t : getBackends() ) + for (auto const &t : getBackends()) { - write_4D_test( t ); + write_4D_test(t); } } -void -write_makeconst_some( std::string file_ending ) +void write_makeconst_some(std::string file_ending) { int mpi_s{-1}; int mpi_r{-1}; @@ -585,140 +665,142 @@ write_makeconst_some( std::string file_ending ) auto it = o.iterations[1]; // I would have expected we need this, since the first call that writes // data below (makeConstant) is not executed in MPI collective manner - //it.open(); - auto E_x = it.meshes[ "E" ][ "x" ]; + // it.open(); + auto E_x = it.meshes["E"]["x"]; - E_x.resetDataset( { Datatype::DOUBLE, { mpi_size * 2, 10, 6, 8 } } ); + E_x.resetDataset({Datatype::DOUBLE, {mpi_size * 2, 10, 6, 8}}); // HDF5 Attribute writes are unfortunately collective - if( mpi_rank != 0u && file_ending != "h5" ) - E_x.makeConstant( 42 ); + if (mpi_rank != 0u && file_ending != "h5") + E_x.makeConstant(42); } -TEST_CASE( "write_makeconst_some", "[parallel]" ) +TEST_CASE("write_makeconst_some", "[parallel]") { - for( auto const & t : getBackends() ) + for (auto const &t : getBackends()) { - write_makeconst_some( t ); + write_makeconst_some(t); } } -void -close_iteration_test( std::string file_ending ) +void close_iteration_test(std::string file_ending) { - int i_mpi_rank{ -1 }, i_mpi_size{ -1 }; - MPI_Comm_rank( MPI_COMM_WORLD, &i_mpi_rank ); - MPI_Comm_size( MPI_COMM_WORLD, &i_mpi_size ); - unsigned mpi_rank{ static_cast< unsigned >( i_mpi_rank ) }, - mpi_size{ static_cast< unsigned >( i_mpi_size ) }; + int i_mpi_rank{-1}, i_mpi_size{-1}; + MPI_Comm_rank(MPI_COMM_WORLD, &i_mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &i_mpi_size); + unsigned mpi_rank{static_cast(i_mpi_rank)}, + mpi_size{static_cast(i_mpi_size)}; std::string name = "../samples/close_iterations_parallel_%T." + file_ending; - std::vector< int > data{ 2, 4, 6, 8 }; + std::vector data{2, 4, 6, 8}; // { // we do *not* need these parentheses - Series write( name, Access::CREATE, MPI_COMM_WORLD ); + Series write(name, Access::CREATE, MPI_COMM_WORLD); bool isAdios1 = write.backend() == "MPI_ADIOS1"; { - Iteration it0 = write.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { mpi_size, 4 } } ); - E_x.storeChunk( data, { mpi_rank, 0 }, { 1, 4 } ); - it0.close( /* flush = */ false ); + Iteration it0 = write.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {mpi_size, 4}}); + E_x.storeChunk(data, {mpi_rank, 0}, {1, 4}); + it0.close(/* flush = */ false); } write.flush(); // } - if( isAdios1 ) + if (isAdios1) { // run a simplified test for Adios1 since Adios1 has issues opening // twice in the same process - REQUIRE( auxiliary::file_exists( - "../samples/close_iterations_parallel_0.bp" ) ); + REQUIRE(auxiliary::file_exists( + "../samples/close_iterations_parallel_0.bp")); } else { - Series read( name, Access::READ_ONLY, MPI_COMM_WORLD ); - Iteration it0 = read.iterations[ 0 ]; - auto E_x_read = it0.meshes[ "E" ][ "x" ]; - auto chunk = E_x_read.loadChunk< int >( { 0, 0 }, { mpi_size, 4 } ); - it0.close( /* flush = */ false ); + Series read(name, Access::READ_ONLY, MPI_COMM_WORLD); + Iteration it0 = read.iterations[0]; + auto E_x_read = it0.meshes["E"]["x"]; + auto chunk = E_x_read.loadChunk({0, 0}, {mpi_size, 4}); + it0.close(/* flush = */ false); read.flush(); - for( size_t i = 0; i < 4 * mpi_size; ++i ) + for (size_t i = 0; i < 4 * mpi_size; ++i) { - REQUIRE( data[ i % 4 ] == chunk.get()[ i ] ); + REQUIRE(data[i % 4] == chunk.get()[i]); } } { - Iteration it1 = write.iterations[ 1 ]; - auto E_x = it1.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { mpi_size, 4 } } ); - E_x.storeChunk( data, { mpi_rank, 0 }, { 1, 4 } ); - it1.close( /* flush = */ true ); + Iteration it1 = write.iterations[1]; + auto E_x = it1.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {mpi_size, 4}}); + E_x.storeChunk(data, {mpi_rank, 0}, {1, 4}); + it1.close(/* flush = */ true); // illegally access iteration after closing - E_x.storeChunk( data, { mpi_rank, 0 }, { 1, 4 } ); - REQUIRE_THROWS( write.flush() ); + E_x.storeChunk(data, {mpi_rank, 0}, {1, 4}); + REQUIRE_THROWS(write.flush()); } - if( isAdios1 ) + if (isAdios1) { // run a simplified test for Adios1 since Adios1 has issues opening // twice in the same process - REQUIRE( auxiliary::file_exists( - "../samples/close_iterations_parallel_1.bp" ) ); + REQUIRE(auxiliary::file_exists( + "../samples/close_iterations_parallel_1.bp")); } else { - Series read( name, Access::READ_ONLY, MPI_COMM_WORLD ); - Iteration it1 = read.iterations[ 1 ]; - auto E_x_read = it1.meshes[ "E" ][ "x" ]; - auto chunk = E_x_read.loadChunk< int >( { 0, 0 }, { mpi_size, 4 } ); - it1.close( /* flush = */ true ); - for( size_t i = 0; i < 4 * mpi_size; ++i ) + Series read(name, Access::READ_ONLY, MPI_COMM_WORLD); + Iteration it1 = read.iterations[1]; + auto E_x_read = it1.meshes["E"]["x"]; + auto chunk = E_x_read.loadChunk({0, 0}, {mpi_size, 4}); + it1.close(/* flush = */ true); + for (size_t i = 0; i < 4 * mpi_size; ++i) { - REQUIRE( data[ i % 4 ] == chunk.get()[ i ] ); + REQUIRE(data[i % 4] == chunk.get()[i]); } - auto read_again = - E_x_read.loadChunk< int >( { 0, 0 }, { mpi_size, 4 } ); - REQUIRE_THROWS( read.flush() ); + auto read_again = E_x_read.loadChunk({0, 0}, {mpi_size, 4}); + REQUIRE_THROWS(read.flush()); } } -TEST_CASE( "close_iteration_test", "[parallel]" ) +TEST_CASE("close_iteration_test", "[parallel]") { - for( auto const & t : getBackends() ) + for (auto const &t : getBackends()) { - close_iteration_test( t ); + close_iteration_test(t); } } -void -file_based_write_read( std::string file_ending ) +void file_based_write_read(std::string file_ending) { namespace io = openPMD; // the iterations we want to write - std::vector< int > iterations = { 10, 30, 50, 70 }; + std::vector iterations = {10, 30, 50, 70}; // MPI communicator meta-data and file name - int i_mpi_rank{ -1 }, i_mpi_size{ -1 }; - MPI_Comm_rank( MPI_COMM_WORLD, &i_mpi_rank ); - MPI_Comm_size( MPI_COMM_WORLD, &i_mpi_size ); - unsigned mpi_rank{ static_cast< unsigned >( i_mpi_rank ) }, - mpi_size{ static_cast< unsigned >( i_mpi_size ) }; + int i_mpi_rank{-1}, i_mpi_size{-1}; + MPI_Comm_rank(MPI_COMM_WORLD, &i_mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &i_mpi_size); + unsigned mpi_rank{static_cast(i_mpi_rank)}, + mpi_size{static_cast(i_mpi_size)}; std::string name = "../samples/file_based_write_read_%05T." + file_ending; // data (we just use the same data for each step for demonstration) // we assign 10 longitudinal cells & 300 transversal cells per rank here - unsigned const local_Nz = 10u; + unsigned const local_Nz = 10u; unsigned const global_Nz = local_Nz * mpi_size; unsigned const global_Nx = 300u; using precision = double; - std::vector< precision > E_x_data( global_Nx * local_Nz ); + std::vector E_x_data(global_Nx * local_Nz); // filling some values: 0, 1, ... - std::iota( E_x_data.begin(), E_x_data.end(), local_Nz * mpi_rank); - std::transform(E_x_data.begin(), E_x_data.end(), E_x_data.begin(), - [](precision d) -> precision { return std::sin( d * 2.0 * 3.1415 / 20. ); }); + std::iota(E_x_data.begin(), E_x_data.end(), local_Nz * mpi_rank); + std::transform( + E_x_data.begin(), + E_x_data.end(), + E_x_data.begin(), + [](precision d) -> precision { + return std::sin(d * 2.0 * 3.1415 / 20.); + }); { // open a parallel series @@ -726,18 +808,21 @@ file_based_write_read( std::string file_ending ) series.setIterationEncoding(IterationEncoding::fileBased); int const last_step = 100; - for (int step = 0; step < last_step; ++step) { + for (int step = 0; step < last_step; ++step) + { MPI_Barrier(MPI_COMM_WORLD); // is this an output step? bool const rank_in_output_step = - std::find(iterations.begin(), iterations.end(), step) != iterations.end(); - if (!rank_in_output_step) continue; + std::find(iterations.begin(), iterations.end(), step) != + iterations.end(); + if (!rank_in_output_step) + continue; // now we write (parallel, independent I/O) auto it = series.iterations[step]; auto E = it.meshes["E"]; // record - auto E_x = E["x"]; // record component + auto E_x = E["x"]; // record component // some meta-data E.setAxisLabels({"z", "x"}); @@ -747,21 +832,21 @@ file_based_write_read( std::string file_ending ) // update values std::iota(E_x_data.begin(), E_x_data.end(), local_Nz * mpi_rank); - std::transform(E_x_data.begin(), E_x_data.end(), E_x_data.begin(), - [&step](precision d) -> precision { - return std::sin(d * 2.0 * 3.1415 / 100. + step); - }); + std::transform( + E_x_data.begin(), + E_x_data.end(), + E_x_data.begin(), + [&step](precision d) -> precision { + return std::sin(d * 2.0 * 3.1415 / 100. + step); + }); auto dataset = io::Dataset( - io::determineDatatype(), - {global_Nx, global_Nz}); + io::determineDatatype(), {global_Nx, global_Nz}); E_x.resetDataset(dataset); Offset chunk_offset = {0, local_Nz * mpi_rank}; Extent chunk_extent = {global_Nx, local_Nz}; - E_x.storeChunk( - io::shareRaw(E_x_data), - chunk_offset, chunk_extent); + E_x.storeChunk(io::shareRaw(E_x_data), chunk_offset, chunk_extent); series.flush(); } } @@ -772,40 +857,39 @@ file_based_write_read( std::string file_ending ) name, Access::READ_ONLY, MPI_COMM_WORLD, - "{\"defer_iteration_parsing\": true}" ); - Iteration it = read.iterations[ 30 ]; + "{\"defer_iteration_parsing\": true}"); + Iteration it = read.iterations[30]; it.open(); // collective - if( mpi_rank == 0 ) // non-collective branch + if (mpi_rank == 0) // non-collective branch { auto E_x = it.meshes["E"]["x"]; - auto data = E_x.loadChunk< double >(); + auto data = E_x.loadChunk(); read.flush(); } } } -TEST_CASE( "file_based_write_read", "[parallel]" ) +TEST_CASE("file_based_write_read", "[parallel]") { - for( auto const & t : getBackends() ) + for (auto const &t : getBackends()) { - file_based_write_read( t ); + file_based_write_read(t); } } -void -hipace_like_write( std::string file_ending ) +void hipace_like_write(std::string file_ending) { namespace io = openPMD; bool const verbose = false; // print statements // the iterations we want to write - std::vector< int > iterations = { 10, 30, 50, 70 }; + std::vector iterations = {10, 30, 50, 70}; // Parallel HDF5 + chunking does not work with independent IO pattern bool const isHDF5 = file_ending == "h5"; std::string options = "{}"; - if( isHDF5 ) + if (isHDF5) /* * some keys and values capitalized randomly to check whether * capitalization-insensitivity is working. @@ -820,28 +904,33 @@ hipace_like_write( std::string file_ending ) })"; // MPI communicator meta-data and file name - int i_mpi_rank{ -1 }, i_mpi_size{ -1 }; - MPI_Comm_rank( MPI_COMM_WORLD, &i_mpi_rank ); - MPI_Comm_size( MPI_COMM_WORLD, &i_mpi_size ); - unsigned mpi_rank{ static_cast< unsigned >( i_mpi_rank ) }, - mpi_size{ static_cast< unsigned >( i_mpi_size ) }; + int i_mpi_rank{-1}, i_mpi_size{-1}; + MPI_Comm_rank(MPI_COMM_WORLD, &i_mpi_rank); + MPI_Comm_size(MPI_COMM_WORLD, &i_mpi_size); + unsigned mpi_rank{static_cast(i_mpi_rank)}, + mpi_size{static_cast(i_mpi_size)}; std::string name = "../samples/hipace_like_write." + file_ending; // data (we just use the same data for each step for demonstration) // we assign 10 longitudinal cells & 300 transversal cells per rank here - unsigned const local_Nz = 10u; + unsigned const local_Nz = 10u; unsigned const global_Nz = local_Nz * mpi_size; unsigned const global_Nx = 300u; using precision = double; - std::vector< precision > E_x_data( global_Nx * local_Nz ); + std::vector E_x_data(global_Nx * local_Nz); // filling some values: 0, 1, ... - std::iota( E_x_data.begin(), E_x_data.end(), local_Nz * mpi_rank); - std::transform(E_x_data.begin(), E_x_data.end(), E_x_data.begin(), - [](precision d) -> precision { return std::sin( d * 2.0 * 3.1415 / 20. ); }); + std::iota(E_x_data.begin(), E_x_data.end(), local_Nz * mpi_rank); + std::transform( + E_x_data.begin(), + E_x_data.end(), + E_x_data.begin(), + [](precision d) -> precision { + return std::sin(d * 2.0 * 3.1415 / 20.); + }); // open a parallel series - Series series( name, Access::CREATE, MPI_COMM_WORLD, options ); - series.setIterationEncoding( IterationEncoding::groupBased ); + Series series(name, Access::CREATE, MPI_COMM_WORLD, options); + series.setIterationEncoding(IterationEncoding::groupBased); series.flush(); // in HiPACE, ranks write one-by-one to a "swiped" step, overlapping @@ -849,7 +938,8 @@ hipace_like_write( std::string file_ending ) int const last_step = 100; int const my_first_step = i_mpi_rank * int(local_Nz); int const all_last_step = last_step + (i_mpi_size - 1) * int(local_Nz); - for( int first_rank_step = 0; first_rank_step < all_last_step; ++first_rank_step ) + for (int first_rank_step = 0; first_rank_step < all_last_step; + ++first_rank_step) { MPI_Barrier(MPI_COMM_WORLD); @@ -857,44 +947,54 @@ hipace_like_write( std::string file_ending ) // step on the local rank int const step = first_rank_step - my_first_step; - if( verbose ) - std::cout << "[" << i_mpi_rank << "] " << - "step: " << step << " | first_ranks_step: " << first_rank_step << std::endl; + if (verbose) + std::cout << "[" << i_mpi_rank << "] " + << "step: " << step + << " | first_ranks_step: " << first_rank_step + << std::endl; // do we start writing to a new step? bool const start_new_output_step = - std::find(iterations.begin(), iterations.end(), first_rank_step) != iterations.end(); + std::find(iterations.begin(), iterations.end(), first_rank_step) != + iterations.end(); // are we just about to finish writing to a step? - // TODO; if we detect this, we can collectively call `it.close()` after storeChunk/flush() + // TODO; if we detect this, we can collectively call `it.close()` after + // storeChunk/flush() - // collectively: create a new iteration and declare records we want to write - if( verbose ) - std::cout << "[" << i_mpi_rank << "] " << - "start_new_output_step: " << start_new_output_step << std::endl; - if( start_new_output_step && false ) // looks like we don't even need that :) + // collectively: create a new iteration and declare records we want to + // write + if (verbose) + std::cout << "[" << i_mpi_rank << "] " + << "start_new_output_step: " << start_new_output_step + << std::endl; + if (start_new_output_step && + false) // looks like we don't even need that :) { auto it = series.iterations[first_rank_step]; auto E = it.meshes["E"]; // record auto E_x = E["x"]; // record component auto dataset = io::Dataset( - io::determineDatatype< precision >( ), - {global_Nx, global_Nz}); + io::determineDatatype(), {global_Nx, global_Nz}); E_x.resetDataset(dataset); - //series.flush(); + // series.flush(); } // has this ranks started computations yet? - if( step < 0 ) continue; + if (step < 0) + continue; // has this ranks stopped computations? - if( step > last_step ) continue; + if (step > last_step) + continue; // does this rank contribute to with output currently? bool const rank_in_output_step = - std::find(iterations.begin(), iterations.end(), step) != iterations.end(); - if( !rank_in_output_step ) continue; + std::find(iterations.begin(), iterations.end(), step) != + iterations.end(); + if (!rank_in_output_step) + continue; // now we write (parallel, independent I/O) auto it = series.iterations[step]; auto E = it.meshes["E"]; // record - auto E_x = E["x"]; // record component + auto E_x = E["x"]; // record component // some meta-data E.setAxisLabels({"z", "x"}); @@ -903,18 +1003,22 @@ hipace_like_write( std::string file_ending ) E_x.setPosition({0.0, 0.0}); // update values - std::iota( E_x_data.begin(), E_x_data.end(), local_Nz * mpi_rank); - std::transform(E_x_data.begin(), E_x_data.end(), E_x_data.begin(), - [&step](precision d) -> precision { return std::sin( d * 2.0 * 3.1415 / 100. + step ); }); + std::iota(E_x_data.begin(), E_x_data.end(), local_Nz * mpi_rank); + std::transform( + E_x_data.begin(), + E_x_data.end(), + E_x_data.begin(), + [&step](precision d) -> precision { + return std::sin(d * 2.0 * 3.1415 / 100. + step); + }); auto dataset = io::Dataset( - io::determineDatatype< precision >( ), - {global_Nx, global_Nz}); + io::determineDatatype(), {global_Nx, global_Nz}); E_x.resetDataset(dataset); Offset chunk_offset = {0, local_Nz * mpi_rank}; Extent chunk_extent = {global_Nx, local_Nz}; - auto const copyToShared = []( std::vector< precision > const & data ) { + auto const copyToShared = [](std::vector const &data) { auto d = std::shared_ptr( new precision[data.size()], std::default_delete()); std::copy(data.begin(), data.end(), d.get()); @@ -922,65 +1026,63 @@ hipace_like_write( std::string file_ending ) }; E_x.storeChunk( copyToShared(E_x_data), - //io::shareRaw(E_x_data), - chunk_offset, chunk_extent); + // io::shareRaw(E_x_data), + chunk_offset, + chunk_extent); series.flush(); } } -TEST_CASE( "hipace_like_write", "[parallel]" ) +TEST_CASE("hipace_like_write", "[parallel]") { - for( auto const & t : getBackends() ) + for (auto const &t : getBackends()) { - hipace_like_write( t ); + hipace_like_write(t); } } #endif #if openPMD_HAVE_ADIOS2 && openPMD_HAVE_MPI -void -adios2_streaming( bool variableBasedLayout ) +void adios2_streaming(bool variableBasedLayout) { - int size{ -1 }; - int rank{ -1 }; - MPI_Comm_size( MPI_COMM_WORLD, &size ); - MPI_Comm_rank( MPI_COMM_WORLD, &rank ); - if( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "NOT_SET" ) == "ADIOS1" ) + int size{-1}; + int rank{-1}; + MPI_Comm_size(MPI_COMM_WORLD, &size); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); + if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1") { // run this test for ADIOS2 only return; } - if( size < 2 || rank > 1 ) + if (size < 2 || rank > 1) { return; } constexpr size_t extent = 100; - if( rank == 0 ) + if (rank == 0) { // write - Series writeSeries( - "../samples/adios2_stream.sst", Access::CREATE ); - if( variableBasedLayout ) + Series writeSeries("../samples/adios2_stream.sst", Access::CREATE); + if (variableBasedLayout) { - writeSeries.setIterationEncoding( - IterationEncoding::variableBased ); + writeSeries.setIterationEncoding(IterationEncoding::variableBased); } auto iterations = writeSeries.writeIterations(); - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - auto iteration = iterations[ i ]; - auto E_x = iteration.meshes[ "E" ][ "x" ]; + auto iteration = iterations[i]; + auto E_x = iteration.meshes["E"]["x"]; E_x.resetDataset( - openPMD::Dataset( openPMD::Datatype::INT, { extent } ) ); - std::vector< int > data( extent, i ); - E_x.storeChunk( data, { 0 }, { extent } ); + openPMD::Dataset(openPMD::Datatype::INT, {extent})); + std::vector data(extent, i); + E_x.storeChunk(data, {0}, {extent}); // we encourage manually closing iterations, but it should // not matter so let's do the switcharoo for this test - if( i % 2 == 0 ) + if (i % 2 == 0) { writeSeries.flush(); } @@ -990,7 +1092,7 @@ adios2_streaming( bool variableBasedLayout ) } } } - else if( rank == 1 ) + else if (rank == 1) { // read // it should be possible to select the sst engine via file ending or @@ -1002,23 +1104,23 @@ adios2_streaming( bool variableBasedLayout ) * this avoids that the reader sees that file. */ using namespace std::chrono_literals; - std::this_thread::sleep_for( 1s ); + std::this_thread::sleep_for(1s); Series readSeries( "../samples/adios2_stream.sst", Access::READ_ONLY, - "defer_iteration_parsing = true" ); // inline TOML + "defer_iteration_parsing = true"); // inline TOML size_t last_iteration_index = 0; - for( auto iteration : readSeries.readIterations() ) + for (auto iteration : readSeries.readIterations()) { - auto E_x = iteration.meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 1 ); - REQUIRE( E_x.getExtent()[ 0 ] == extent ); - auto chunk = E_x.loadChunk< int >( { 0 }, { extent } ); + auto E_x = iteration.meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 1); + REQUIRE(E_x.getExtent()[0] == extent); + auto chunk = E_x.loadChunk({0}, {extent}); // we encourage manually closing iterations, but it should // not matter so let's do the switcharoo for this test - if( last_iteration_index % 2 == 0 ) + if (last_iteration_index % 2 == 0) { readSeries.flush(); } @@ -1026,33 +1128,33 @@ adios2_streaming( bool variableBasedLayout ) { iteration.close(); } - for( size_t i = 0; i < extent; ++i ) + for (size_t i = 0; i < extent; ++i) { - REQUIRE( chunk.get()[ i ] == int(iteration.iterationIndex) ); + REQUIRE(chunk.get()[i] == int(iteration.iterationIndex)); } last_iteration_index = iteration.iterationIndex; } - REQUIRE( last_iteration_index == 9 ); + REQUIRE(last_iteration_index == 9); } } -TEST_CASE( "adios2_streaming", "[pseudoserial][adios2]" ) +TEST_CASE("adios2_streaming", "[pseudoserial][adios2]") { - adios2_streaming( true ); - adios2_streaming( false ); + adios2_streaming(true); + adios2_streaming(false); } -TEST_CASE( "parallel_adios2_json_config", "[parallel][adios2]" ) +TEST_CASE("parallel_adios2_json_config", "[parallel][adios2]") { - if( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "NOT_SET" ) == "ADIOS1" ) + if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1") { // run this test for ADIOS2 only return; } - int size{ -1 }; - int rank{ -1 }; - MPI_Comm_size( MPI_COMM_WORLD, &size ); - MPI_Comm_rank( MPI_COMM_WORLD, &rank ); + int size{-1}; + int rank{-1}; + MPI_Comm_size(MPI_COMM_WORLD, &size); + MPI_Comm_rank(MPI_COMM_WORLD, &rank); std::string writeConfigBP3 = R"END( [adios2] @@ -1093,40 +1195,37 @@ type = "blosc" clevel = 1 doshuffle = "BLOSC_BITSHUFFLE" )END"; - auto const write = [ size, rank ]( - std::string const & filename, - std::string const & config ) { - if( rank == 0 ) - { - std::fstream file; - file.open( - "../samples/write_config.toml", - std::ios_base::out | std::ios_base::binary ); - file << config; - file.flush(); - } - MPI_Barrier( MPI_COMM_WORLD ); - openPMD::Series series( - filename, - openPMD::Access::CREATE, - MPI_COMM_WORLD, - "@../samples/write_config.toml" ); - auto E_x = series.iterations[ 0 ].meshes[ "E" ][ "x" ]; - openPMD::Dataset ds( - openPMD::Datatype::INT, { unsigned( size ), 1000 } ); - E_x.resetDataset( ds ); - std::vector< int > data( 1000, 0 ); - E_x.storeChunk( data, { unsigned( rank ), 0 }, { 1, 1000 } ); - series.flush(); - }; - write( "../samples/jsonConfiguredBP4Parallel.bp", writeConfigBP4 ); - write( "../samples/jsonConfiguredBP3Parallel.bp", writeConfigBP3 ); + auto const write = + [size, rank](std::string const &filename, std::string const &config) { + if (rank == 0) + { + std::fstream file; + file.open( + "../samples/write_config.toml", + std::ios_base::out | std::ios_base::binary); + file << config; + file.flush(); + } + MPI_Barrier(MPI_COMM_WORLD); + openPMD::Series series( + filename, + openPMD::Access::CREATE, + MPI_COMM_WORLD, + "@../samples/write_config.toml"); + auto E_x = series.iterations[0].meshes["E"]["x"]; + openPMD::Dataset ds(openPMD::Datatype::INT, {unsigned(size), 1000}); + E_x.resetDataset(ds); + std::vector data(1000, 0); + E_x.storeChunk(data, {unsigned(rank), 0}, {1, 1000}); + series.flush(); + }; + write("../samples/jsonConfiguredBP4Parallel.bp", writeConfigBP4); + write("../samples/jsonConfiguredBP3Parallel.bp", writeConfigBP3); // BP3 engine writes files, BP4 writes directories - REQUIRE( - openPMD::auxiliary::file_exists( "../samples/jsonConfiguredBP3.bp" ) ); - REQUIRE( openPMD::auxiliary::directory_exists( - "../samples/jsonConfiguredBP4.bp" ) ); + REQUIRE(openPMD::auxiliary::file_exists("../samples/jsonConfiguredBP3.bp")); + REQUIRE(openPMD::auxiliary::directory_exists( + "../samples/jsonConfiguredBP4.bp")); std::string readConfigBP3 = R"END( { @@ -1149,124 +1248,116 @@ doshuffle = "BLOSC_BITSHUFFLE" } )END"; auto const read = - [ size, rank ] - ( std::string const & filename, std::string const & config ) { + [size, rank](std::string const &filename, std::string const &config) { // let's write the config to a file and read it from there - if( rank == 0 ) + if (rank == 0) { std::fstream file; - file.open( "../samples/read_config.json", std::ios_base::out ); + file.open("../samples/read_config.json", std::ios_base::out); file << config; file.flush(); } - MPI_Barrier( MPI_COMM_WORLD ); + MPI_Barrier(MPI_COMM_WORLD); openPMD::Series series( filename, openPMD::Access::READ_ONLY, MPI_COMM_WORLD, - " @ ../samples/read_config.json " ); - auto E_x = series.iterations[ 0 ].meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 2 ); - REQUIRE( E_x.getExtent()[ 0 ] == unsigned( size ) ); - REQUIRE( E_x.getExtent()[ 1 ] == 1000 ); - auto chunk = - E_x.loadChunk< int >( { unsigned( rank ), 0 }, { 1, 1000 } ); + " @ ../samples/read_config.json "); + auto E_x = series.iterations[0].meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 2); + REQUIRE(E_x.getExtent()[0] == unsigned(size)); + REQUIRE(E_x.getExtent()[1] == 1000); + auto chunk = E_x.loadChunk({unsigned(rank), 0}, {1, 1000}); series.flush(); - for( size_t i = 0; i < 1000; ++i ) + for (size_t i = 0; i < 1000; ++i) { - REQUIRE( chunk.get()[ i ] == 0 ); + REQUIRE(chunk.get()[i] == 0); } }; - read( "../samples/jsonConfiguredBP3Parallel.bp", readConfigBP3 ); - read( "../samples/jsonConfiguredBP4Parallel.bp", readConfigBP4 ); + read("../samples/jsonConfiguredBP3Parallel.bp", readConfigBP3); + read("../samples/jsonConfiguredBP4Parallel.bp", readConfigBP4); } -void -adios2_ssc() +void adios2_ssc() { auto const extensions = openPMD::getFileExtensions(); - if( std::find( extensions.begin(), extensions.end(), "ssc" ) == - extensions.end() ) + if (std::find(extensions.begin(), extensions.end(), "ssc") == + extensions.end()) { // SSC engine not available in ADIOS2 return; } - int global_size{ -1 }; - int global_rank{ -1 }; - MPI_Comm_size( MPI_COMM_WORLD, &global_size ); - MPI_Comm_rank( MPI_COMM_WORLD, &global_rank ); - if( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "NOT_SET" ) == "ADIOS1" ) + int global_size{-1}; + int global_rank{-1}; + MPI_Comm_size(MPI_COMM_WORLD, &global_size); + MPI_Comm_rank(MPI_COMM_WORLD, &global_rank); + if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1") { // run this test for ADIOS2 only return; } - if( global_size < 2 ) + if (global_size < 2) { return; } int color = global_rank % 2; MPI_Comm local_comm; - MPI_Comm_split( MPI_COMM_WORLD, color, global_rank, &local_comm ); - int local_size{ -1 }; - int local_rank{ -1 }; - MPI_Comm_size( local_comm, &local_size ); - MPI_Comm_rank( local_comm, &local_rank ); + MPI_Comm_split(MPI_COMM_WORLD, color, global_rank, &local_comm); + int local_size{-1}; + int local_rank{-1}; + MPI_Comm_size(local_comm, &local_size); + MPI_Comm_rank(local_comm, &local_rank); constexpr size_t extent = 10; - if( color == 0 ) + if (color == 0) { // write Series writeSeries( - "../samples/adios2_stream.ssc", - Access::CREATE, - local_comm ); + "../samples/adios2_stream.ssc", Access::CREATE, local_comm); auto iterations = writeSeries.writeIterations(); - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - auto iteration = iterations[ i ]; - auto E_x = iteration.meshes[ "E" ][ "x" ]; - E_x.resetDataset( openPMD::Dataset( - openPMD::Datatype::INT, { unsigned( local_size ), extent } ) ); - std::vector< int > data( extent, i ); - E_x.storeChunk( - data, { unsigned( local_rank ), 0 }, { 1, extent } ); + auto iteration = iterations[i]; + auto E_x = iteration.meshes["E"]["x"]; + E_x.resetDataset(openPMD::Dataset( + openPMD::Datatype::INT, {unsigned(local_size), extent})); + std::vector data(extent, i); + E_x.storeChunk(data, {unsigned(local_rank), 0}, {1, extent}); iteration.close(); } } - else if( color == 1 ) + else if (color == 1) { // read Series readSeries( - "../samples/adios2_stream.ssc", - Access::READ_ONLY, - local_comm ); + "../samples/adios2_stream.ssc", Access::READ_ONLY, local_comm); size_t last_iteration_index = 0; - for( auto iteration : readSeries.readIterations() ) + for (auto iteration : readSeries.readIterations()) { - auto E_x = iteration.meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 2 ); - REQUIRE( E_x.getExtent()[ 1 ] == extent ); - auto chunk = E_x.loadChunk< int >( - { unsigned( local_rank ), 0 }, { 1, extent } ); + auto E_x = iteration.meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 2); + REQUIRE(E_x.getExtent()[1] == extent); + auto chunk = + E_x.loadChunk({unsigned(local_rank), 0}, {1, extent}); iteration.close(); - for( size_t i = 0; i < extent; ++i ) + for (size_t i = 0; i < extent; ++i) { - REQUIRE( chunk.get()[ i ] == int(iteration.iterationIndex) ); + REQUIRE(chunk.get()[i] == int(iteration.iterationIndex)); } last_iteration_index = iteration.iterationIndex; } - REQUIRE( last_iteration_index == 9 ); + REQUIRE(last_iteration_index == 9); } } -TEST_CASE( "adios2_ssc", "[parallel][adios2]" ) +TEST_CASE("adios2_ssc", "[parallel][adios2]") { adios2_ssc(); } diff --git a/test/SerialIOTest.cpp b/test/SerialIOTest.cpp index 5b7a3b1c84..2aa77305fc 100644 --- a/test/SerialIOTest.cpp +++ b/test/SerialIOTest.cpp @@ -1,7 +1,7 @@ // expose private and protected members for invasive testing #if openPMD_USE_INVASIVE_TESTS -# define OPENPMD_private public -# define OPENPMD_protected public +#define OPENPMD_private public: +#define OPENPMD_protected public: #endif #include "openPMD/auxiliary/Environment.hpp" @@ -24,8 +24,8 @@ #include #include #include -#include #include +#include #include #include #include @@ -43,50 +43,46 @@ struct BackendSelection } }; -std::vector< BackendSelection > testedBackends() +std::vector testedBackends() { auto variants = getVariants(); - std::map< std::string, std::string > extensions{ - { "json", "json" }, - { "adios1", "bp1" }, - { "adios2", "bp" }, - { "hdf5", "h5" } }; - std::vector< BackendSelection > res; - for( auto const & pair : variants ) - { - if( pair.second ) + std::map extensions{ + {"json", "json"}, {"adios1", "bp1"}, {"adios2", "bp"}, {"hdf5", "h5"}}; + std::vector res; + for (auto const &pair : variants) + { + if (pair.second) { - auto lookup = extensions.find( pair.first ); - if( lookup != extensions.end() ) + auto lookup = extensions.find(pair.first); + if (lookup != extensions.end()) { std::string extension = lookup->second; - res.push_back( - { std::move( pair.first ), std::move( extension ) } ); + res.push_back({std::move(pair.first), std::move(extension)}); } } } return res; } -std::vector< std::string > testedFileExtensions() +std::vector testedFileExtensions() { auto allExtensions = getFileExtensions(); auto newEnd = std::remove_if( - allExtensions.begin(), - allExtensions.end(), - []( std::string const & ext ) - { return ext == "sst" || ext == "ssc"; } ); - return { allExtensions.begin(), newEnd }; + allExtensions.begin(), allExtensions.end(), [](std::string const &ext) { + return ext == "sst" || ext == "ssc"; + }); + return {allExtensions.begin(), newEnd}; } #if openPMD_HAVE_ADIOS2 -TEST_CASE( "adios2_char_portability", "[serial][adios2]" ) +TEST_CASE("adios2_char_portability", "[serial][adios2]") { /* * This tests portability of char attributes in ADIOS2 in schema 20210209. */ - if( auxiliary::getEnvString("OPENPMD_NEW_ATTRIBUTE_LAYOUT", "NOT_SET") == "NOT_SET") + if (auxiliary::getEnvString("OPENPMD_NEW_ATTRIBUTE_LAYOUT", "NOT_SET") == + "NOT_SET") { /* * @todo As soon as we have added automatic detection for the new @@ -107,109 +103,105 @@ TEST_CASE( "adios2_char_portability", "[serial][adios2]" ) })END"; { adios2::ADIOS adios; - auto IO = adios.DeclareIO( "IO" ); + auto IO = adios.DeclareIO("IO"); auto engine = IO.Open( - "../samples/adios2_char_portability.bp", adios2::Mode::Write ); + "../samples/adios2_char_portability.bp", adios2::Mode::Write); engine.BeginStep(); // write default openPMD attributes - auto writeAttribute = - [ &engine, &IO ]( std::string const & name, auto value ) - { - using variable_type = decltype( value ); - engine.Put( IO.DefineVariable< variable_type >( name ), value ); + auto writeAttribute = [&engine, + &IO](std::string const &name, auto value) { + using variable_type = decltype(value); + engine.Put(IO.DefineVariable(name), value); }; - writeAttribute( "/basePath", std::string( "/data/%T/" ) ); - writeAttribute( "/date", std::string( "2021-02-22 11:14:00 +0000" ) ); - writeAttribute( "/iterationEncoding", std::string( "groupBased" ) ); - writeAttribute( "/iterationFormat", std::string( "/data/%T/" ) ); - writeAttribute( "/openPMD", std::string( "1.1.0" ) ); - writeAttribute( "/openPMDextension", uint32_t( 0 ) ); - writeAttribute( "/software", std::string( "openPMD-api" ) ); - writeAttribute( "/softwareVersion", std::string( "0.14.0-dev" ) ); - - IO.DefineAttribute< uint64_t >( - "__openPMD_internal/openPMD2_adios2_schema", 20210209 ); - IO.DefineAttribute< unsigned char >( "__openPMD_internal/useSteps", 1 ); + writeAttribute("/basePath", std::string("/data/%T/")); + writeAttribute("/date", std::string("2021-02-22 11:14:00 +0000")); + writeAttribute("/iterationEncoding", std::string("groupBased")); + writeAttribute("/iterationFormat", std::string("/data/%T/")); + writeAttribute("/openPMD", std::string("1.1.0")); + writeAttribute("/openPMDextension", uint32_t(0)); + writeAttribute("/software", std::string("openPMD-api")); + writeAttribute("/softwareVersion", std::string("0.14.0-dev")); + + IO.DefineAttribute( + "__openPMD_internal/openPMD2_adios2_schema", 20210209); + IO.DefineAttribute("__openPMD_internal/useSteps", 1); // write char things that should be read back properly std::string baseString = "abcdefghi"; // null termination not necessary, ADIOS knows the size of its variables - std::vector< signed char > signedVector( 9 ); - std::vector< unsigned char > unsignedVector( 9 ); - for( unsigned i = 0; i < 9; ++i ) + std::vector signedVector(9); + std::vector unsignedVector(9); + for (unsigned i = 0; i < 9; ++i) { - signedVector[ i ] = baseString[ i ]; - unsignedVector[ i ] = baseString[ i ]; + signedVector[i] = baseString[i]; + unsignedVector[i] = baseString[i]; } engine.Put( - IO.DefineVariable< signed char >( - "/signedVector", { 3, 3 }, { 0, 0 }, { 3, 3 } ), - signedVector.data() ); + IO.DefineVariable( + "/signedVector", {3, 3}, {0, 0}, {3, 3}), + signedVector.data()); engine.Put( - IO.DefineVariable< unsigned char >( - "/unsignedVector", { 3, 3 }, { 0, 0 }, { 3, 3 } ), - unsignedVector.data() ); + IO.DefineVariable( + "/unsignedVector", {3, 3}, {0, 0}, {3, 3}), + unsignedVector.data()); engine.Put( - IO.DefineVariable< char >( - "/unspecifiedVector", { 3, 3 }, { 0, 0 }, { 3, 3 } ), - baseString.c_str() ); + IO.DefineVariable( + "/unspecifiedVector", {3, 3}, {0, 0}, {3, 3}), + baseString.c_str()); - writeAttribute( "/signedChar", ( signed char )'a' ); - writeAttribute( "/unsignedChar", ( unsigned char )'a' ); - writeAttribute( "/char", ( char )'a' ); + writeAttribute("/signedChar", (signed char)'a'); + writeAttribute("/unsignedChar", (unsigned char)'a'); + writeAttribute("/char", (char)'a'); engine.EndStep(); engine.Close(); } { - if( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "ADIOS2" ) != - "ADIOS2" ) + if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "ADIOS2") != "ADIOS2") { return; } Series read( - "../samples/adios2_char_portability.bp", - Access::READ_ONLY, - config ); - auto signedVectorAttribute = read.getAttribute( "signedVector" ); - REQUIRE( signedVectorAttribute.dtype == Datatype::VEC_STRING ); - auto unsignedVectorAttribute = read.getAttribute( "unsignedVector" ); - REQUIRE( unsignedVectorAttribute.dtype == Datatype::VEC_STRING ); + "../samples/adios2_char_portability.bp", Access::READ_ONLY, config); + auto signedVectorAttribute = read.getAttribute("signedVector"); + REQUIRE(signedVectorAttribute.dtype == Datatype::VEC_STRING); + auto unsignedVectorAttribute = read.getAttribute("unsignedVector"); + REQUIRE(unsignedVectorAttribute.dtype == Datatype::VEC_STRING); auto unspecifiedVectorAttribute = - read.getAttribute( "unspecifiedVector" ); - REQUIRE( unspecifiedVectorAttribute.dtype == Datatype::VEC_STRING ); - std::vector< std::string > desiredVector{ "abc", "def", "ghi" }; + read.getAttribute("unspecifiedVector"); + REQUIRE(unspecifiedVectorAttribute.dtype == Datatype::VEC_STRING); + std::vector desiredVector{"abc", "def", "ghi"}; REQUIRE( - signedVectorAttribute.get< std::vector< std::string > >() == - desiredVector ); + signedVectorAttribute.get>() == + desiredVector); REQUIRE( - unsignedVectorAttribute.get< std::vector< std::string > >() == - desiredVector ); + unsignedVectorAttribute.get>() == + desiredVector); REQUIRE( - unspecifiedVectorAttribute.get< std::vector< std::string > >() == - desiredVector ); + unspecifiedVectorAttribute.get>() == + desiredVector); - auto signedCharAttribute = read.getAttribute( "signedChar" ); + auto signedCharAttribute = read.getAttribute("signedChar"); // we don't have that datatype yet // REQUIRE(unsignedCharAttribute.dtype == Datatype::SCHAR); - auto unsignedCharAttribute = read.getAttribute( "unsignedChar" ); - REQUIRE( unsignedCharAttribute.dtype == Datatype::UCHAR ); - auto charAttribute = read.getAttribute( "char" ); + auto unsignedCharAttribute = read.getAttribute("unsignedChar"); + REQUIRE(unsignedCharAttribute.dtype == Datatype::UCHAR); + auto charAttribute = read.getAttribute("char"); // might currently report Datatype::UCHAR on some platforms // REQUIRE(unsignedCharAttribute.dtype == Datatype::CHAR); - REQUIRE( signedCharAttribute.get< char >() == char( 'a' ) ); - REQUIRE( unsignedCharAttribute.get< char >() == char( 'a' ) ); - REQUIRE( charAttribute.get< char >() == char( 'a' ) ); + REQUIRE(signedCharAttribute.get() == char('a')); + REQUIRE(unsignedCharAttribute.get() == char('a')); + REQUIRE(charAttribute.get() == char('a')); } } #endif void write_and_read_many_iterations( - std::string const & ext, bool intermittentFlushes ) + std::string const &ext, bool intermittentFlushes) { // the idea here is to trigger the maximum allowed number of file handles, // e.g., the upper limit in "ulimit -n" (default: often 1024). Once this @@ -219,8 +211,10 @@ void write_and_read_many_iterations( // iteration is not dirty before closing // Our flushing logic must not forget to close even if the iteration is // otherwise untouched and needs not be flushed. - unsigned int nIterations = auxiliary::getEnvNum( "OPENPMD_TEST_NFILES_MAX", 1030 ); - std::string filename = "../samples/many_iterations/many_iterations_%T." + ext; + unsigned int nIterations = + auxiliary::getEnvNum("OPENPMD_TEST_NFILES_MAX", 1030); + std::string filename = + "../samples/many_iterations/many_iterations_%T." + ext; std::vector data(10); std::iota(data.begin(), data.end(), 0.); @@ -228,13 +222,14 @@ void write_and_read_many_iterations( { Series write(filename, Access::CREATE); - for (unsigned int i = 0; i < nIterations; ++i) { + for (unsigned int i = 0; i < nIterations; ++i) + { // std::cout << "Putting iteration " << i << std::endl; Iteration it = write.iterations[i]; auto E_x = it.meshes["E"]["x"]; - E_x.resetDataset( ds ); - E_x.storeChunk( data, { 0 }, { 10 } ); - if( intermittentFlushes ) + E_x.resetDataset(ds); + E_x.storeChunk(data, {0}, {10}); + if (intermittentFlushes) { write.flush(); } @@ -242,92 +237,104 @@ void write_and_read_many_iterations( } // ~Series intentionally not yet called - Series read( filename, Access::READ_ONLY, "{\"defer_iteration_parsing\": true}" ); - for( auto iteration : read.iterations ) + Series read( + filename, Access::READ_ONLY, "{\"defer_iteration_parsing\": true}"); + for (auto iteration : read.iterations) { iteration.second.open(); // std::cout << "Reading iteration " << iteration.first << // std::endl; - auto E_x = iteration.second.meshes[ "E" ][ "x" ]; - auto chunk = E_x.loadChunk< float >( { 0 }, { 10 } ); - if( intermittentFlushes ) + auto E_x = iteration.second.meshes["E"]["x"]; + auto chunk = E_x.loadChunk({0}, {10}); + if (intermittentFlushes) { read.flush(); } iteration.second.close(); auto array = chunk.get(); - for (size_t i = 0; i < 10; ++i) { + for (size_t i = 0; i < 10; ++i) + { REQUIRE(array[i] == float(i)); } } } - Series list( filename, Access::READ_ONLY ); - helper::listSeries( list ); + Series list(filename, Access::READ_ONLY); + helper::listSeries(list); } -TEST_CASE( "write_and_read_many_iterations", "[serial]" ) +TEST_CASE("write_and_read_many_iterations", "[serial]") { bool intermittentFlushes = false; - if( auxiliary::directory_exists( "../samples/many_iterations" ) ) - auxiliary::remove_directory( "../samples/many_iterations" ); - for( auto const & t : testedFileExtensions() ) + if (auxiliary::directory_exists("../samples/many_iterations")) + auxiliary::remove_directory("../samples/many_iterations"); + for (auto const &t : testedFileExtensions()) { - write_and_read_many_iterations( t, intermittentFlushes ); + write_and_read_many_iterations(t, intermittentFlushes); intermittentFlushes = !intermittentFlushes; } } -TEST_CASE( "multi_series_test", "[serial]" ) +TEST_CASE("multi_series_test", "[serial]") { - std::list< Series > allSeries; + std::list allSeries; auto myfileExtensions = testedFileExtensions(); - // this test demonstrates an ADIOS1 (upstream) bug, comment this section to trigger it - auto const rmEnd = std::remove_if( myfileExtensions.begin(), myfileExtensions.end(), [](std::string const & beit) { - return beit == "bp" && - determineFormat("test.bp") == Format::ADIOS1; - }); + // this test demonstrates an ADIOS1 (upstream) bug, comment this section to + // trigger it + auto const rmEnd = std::remove_if( + myfileExtensions.begin(), + myfileExtensions.end(), + [](std::string const &beit) { + return beit == "bp" && determineFormat("test.bp") == Format::ADIOS1; + }); myfileExtensions.erase(rmEnd, myfileExtensions.end()); // have multiple serial series alive at the same time - for (auto const sn : {1, 2, 3}) { - for (auto const & t: myfileExtensions) + for (auto const sn : {1, 2, 3}) + { + for (auto const &t : myfileExtensions) { auto const file_ending = t; std::cout << file_ending << std::endl; allSeries.emplace_back( - std::string("../samples/multi_open_test_"). - append(std::to_string(sn)).append(".").append(file_ending), - Access::CREATE - ); + std::string("../samples/multi_open_test_") + .append(std::to_string(sn)) + .append(".") + .append(file_ending), + Access::CREATE); allSeries.back().iterations[sn].setAttribute("wululu", sn); allSeries.back().flush(); } } // skip some series: sn=1 auto it = allSeries.begin(); - std::for_each( myfileExtensions.begin(), myfileExtensions.end(), [&it](std::string const &){ - it++; - }); + std::for_each( + myfileExtensions.begin(), + myfileExtensions.end(), + [&it](std::string const &) { it++; }); // remove some series: sn=2 - std::for_each( myfileExtensions.begin(), myfileExtensions.end(), [&it, &allSeries](std::string const &){ - it = allSeries.erase(it); - }); + std::for_each( + myfileExtensions.begin(), + myfileExtensions.end(), + [&it, &allSeries](std::string const &) { it = allSeries.erase(it); }); // write from last series: sn=3 - std::for_each( myfileExtensions.begin(), myfileExtensions.end(), [&it](std::string const &){ - it->iterations[10].setAttribute("wululu", 10); - it->flush(); - it++; - }); + std::for_each( + myfileExtensions.begin(), + myfileExtensions.end(), + [&it](std::string const &) { + it->iterations[10].setAttribute("wululu", 10); + it->flush(); + it++; + }); // remove all leftover series allSeries.clear(); } -TEST_CASE( "available_chunks_test_json", "[serial][json]" ) +TEST_CASE("available_chunks_test_json", "[serial][json]") { /* * This test is JSON specific @@ -361,63 +368,62 @@ TEST_CASE( "available_chunks_test_json", "[serial][json]" ) constexpr unsigned height = 10; std::string name = "../samples/available_chunks.json"; - std::vector< int > data{ 2, 4, 6, 8 }; + std::vector data{2, 4, 6, 8}; { - Series write( name, Access::CREATE ); - Iteration it0 = write.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { height, 4 } } ); - for( unsigned line = 2; line < 7; ++line ) + Series write(name, Access::CREATE); + Iteration it0 = write.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {height, 4}}); + for (unsigned line = 2; line < 7; ++line) { - E_x.storeChunk( data, { line, 0 }, { 1, 4 } ); + E_x.storeChunk(data, {line, 0}, {1, 4}); } - for( unsigned line = 7; line < 9; ++line ) + for (unsigned line = 7; line < 9; ++line) { - E_x.storeChunk( data, { line, 0 }, { 1, 2 } ); + E_x.storeChunk(data, {line, 0}, {1, 2}); } - E_x.storeChunk( data, { 8, 3 }, { 2, 1 } ); + E_x.storeChunk(data, {8, 3}, {2, 1}); - auto E_y = it0.meshes[ "E" ][ "y" ]; - E_y.resetDataset( { Datatype::INT, { height, 4 } } ); - E_y.makeConstant( 1234 ); + auto E_y = it0.meshes["E"]["y"]; + E_y.resetDataset({Datatype::INT, {height, 4}}); + E_y.makeConstant(1234); it0.close(); } { - Series read( name, Access::READ_ONLY ); - Iteration it0 = read.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; + Series read(name, Access::READ_ONLY); + Iteration it0 = read.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; ChunkTable table = E_x.availableChunks(); - REQUIRE( table.size() == 3 ); + REQUIRE(table.size() == 3); /* * Explicitly convert things to bool, so Catch doesn't get the splendid * idea to print the Chunk struct. */ - REQUIRE( bool( table[ 0 ] == WrittenChunkInfo( { 2, 0 }, { 5, 4 } ) ) ); - REQUIRE( bool( table[ 1 ] == WrittenChunkInfo( { 7, 0 }, { 2, 2 } ) ) ); - REQUIRE( bool( table[ 2 ] == WrittenChunkInfo( { 8, 3 }, { 2, 1 } ) ) ); + REQUIRE(bool(table[0] == WrittenChunkInfo({2, 0}, {5, 4}))); + REQUIRE(bool(table[1] == WrittenChunkInfo({7, 0}, {2, 2}))); + REQUIRE(bool(table[2] == WrittenChunkInfo({8, 3}, {2, 1}))); - auto E_y = it0.meshes[ "E" ][ "y" ]; + auto E_y = it0.meshes["E"]["y"]; table = E_y.availableChunks(); - REQUIRE( table.size() == 1 ); - REQUIRE( - bool( table[ 0 ] == WrittenChunkInfo( { 0, 0 }, { height, 4 } ) ) ); + REQUIRE(table.size() == 1); + REQUIRE(bool(table[0] == WrittenChunkInfo({0, 0}, {height, 4}))); } } -TEST_CASE( "multiple_series_handles_test", "[serial]" ) +TEST_CASE("multiple_series_handles_test", "[serial]") { /* * First test: No premature flushes through destructor when another copy * is still around */ { - std::unique_ptr< openPMD::Series > series_ptr; + std::unique_ptr series_ptr; { openPMD::Series series( - "sample%T.json", openPMD::AccessType::CREATE ); - series_ptr = std::make_unique< openPMD::Series >( series ); + "sample%T.json", openPMD::AccessType::CREATE); + series_ptr = std::make_unique(series); /* * we have two handles for the same Series instance now: * series and series_ptr @@ -427,20 +433,19 @@ TEST_CASE( "multiple_series_handles_test", "[serial]" ) * since no iteration has been written yet, an error will be thrown */ } - series_ptr->iterations[ 0 ].meshes[ "E" ][ "x" ].makeEmpty< int >( 1 ); + series_ptr->iterations[0].meshes["E"]["x"].makeEmpty(1); } /* * Second test: A Series handle should remain accessible even if the * original handle is destroyed */ { - std::unique_ptr< openPMD::Series > series_ptr; + std::unique_ptr series_ptr; { openPMD::Series series( - "sample%T.json", openPMD::AccessType::CREATE ); - series_ptr = std::make_unique< openPMD::Series >( series ); - series_ptr->iterations[ 0 ].meshes[ "E" ][ "x" ].makeEmpty< int >( - 1 ); + "sample%T.json", openPMD::AccessType::CREATE); + series_ptr = std::make_unique(series); + series_ptr->iterations[0].meshes["E"]["x"].makeEmpty(1); } /* * series_ptr is still in scope, but the original Series instance @@ -453,8 +458,7 @@ TEST_CASE( "multiple_series_handles_test", "[serial]" ) } } -void -close_iteration_test( std::string file_ending ) +void close_iteration_test(std::string file_ending) { std::string name = "../samples/close_iterations_%T." + file_ending; @@ -463,11 +467,11 @@ close_iteration_test( std::string file_ending ) Series write(name, Access::CREATE); bool isAdios1 = write.backend() == "ADIOS1"; { - Iteration it0 = write.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 2, 2 } } ); - E_x.storeChunk( data, { 0, 0 }, { 2, 2 } ); - it0.close( /* flush = */ false ); + Iteration it0 = write.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {2, 2}}); + E_x.storeChunk(data, {0, 0}, {2, 2}); + it0.close(/* flush = */ false); } write.flush(); // } @@ -476,198 +480,193 @@ close_iteration_test( std::string file_ending ) { // run a simplified test for Adios1 since Adios1 has issues opening // twice in the same process - REQUIRE( auxiliary::file_exists( "../samples/close_iterations_0.bp" ) ); + REQUIRE(auxiliary::file_exists("../samples/close_iterations_0.bp")); } else { - Series read( name, Access::READ_ONLY ); - Iteration it0 = read.iterations[ 0 ]; - auto E_x_read = it0.meshes[ "E" ][ "x" ]; - auto chunk = E_x_read.loadChunk< int >( { 0, 0 }, { 2, 2 } ); - it0.close( /* flush = */ false ); + Series read(name, Access::READ_ONLY); + Iteration it0 = read.iterations[0]; + auto E_x_read = it0.meshes["E"]["x"]; + auto chunk = E_x_read.loadChunk({0, 0}, {2, 2}); + it0.close(/* flush = */ false); read.flush(); - for( size_t i = 0; i < data.size(); ++i ) + for (size_t i = 0; i < data.size(); ++i) { - REQUIRE( data[ i ] == chunk.get()[ i ] ); + REQUIRE(data[i] == chunk.get()[i]); } } { Iteration it1 = write.iterations[1]; - auto E_x = it1.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 2, 2 } } ); - E_x.storeChunk( data, { 0, 0 }, { 2, 2 } ); - it1.close( /* flush = */ true ); + auto E_x = it1.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {2, 2}}); + E_x.storeChunk(data, {0, 0}, {2, 2}); + it1.close(/* flush = */ true); // illegally access iteration after closing - E_x.storeChunk( data, { 0, 0 }, { 2, 2 } ); - REQUIRE_THROWS( write.flush() ); + E_x.storeChunk(data, {0, 0}, {2, 2}); + REQUIRE_THROWS(write.flush()); } if (isAdios1) { // run a simplified test for Adios1 since Adios1 has issues opening // twice in the same process - REQUIRE( auxiliary::file_exists( "../samples/close_iterations_1.bp" ) ); + REQUIRE(auxiliary::file_exists("../samples/close_iterations_1.bp")); } else { - Series read( name, Access::READ_ONLY ); - Iteration it1 = read.iterations[ 1 ]; - auto E_x_read = it1.meshes[ "E" ][ "x" ]; - auto chunk = E_x_read.loadChunk< int >( { 0, 0 }, { 2, 2 } ); - it1.close( /* flush = */ true ); - for( size_t i = 0; i < data.size(); ++i ) + Series read(name, Access::READ_ONLY); + Iteration it1 = read.iterations[1]; + auto E_x_read = it1.meshes["E"]["x"]; + auto chunk = E_x_read.loadChunk({0, 0}, {2, 2}); + it1.close(/* flush = */ true); + for (size_t i = 0; i < data.size(); ++i) { - REQUIRE( data[ i ] == chunk.get()[ i ] ); + REQUIRE(data[i] == chunk.get()[i]); } - auto read_again = E_x_read.loadChunk< int >( { 0, 0 }, { 2, 2 } ); - REQUIRE_THROWS( read.flush() ); + auto read_again = E_x_read.loadChunk({0, 0}, {2, 2}); + REQUIRE_THROWS(read.flush()); } { - Series list{ name, Access::READ_ONLY }; - helper::listSeries( list ); + Series list{name, Access::READ_ONLY}; + helper::listSeries(list); } } -TEST_CASE( "close_iteration_test", "[serial]" ) +TEST_CASE("close_iteration_test", "[serial]") { - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - close_iteration_test( t ); + close_iteration_test(t); } } -void -close_iteration_interleaved_test( std::string const file_ending, - IterationEncoding const it_encoding ) +void close_iteration_interleaved_test( + std::string const file_ending, IterationEncoding const it_encoding) { std::string name = "../samples/close_iterations_interleaved_"; - if( it_encoding == IterationEncoding::fileBased ) - name.append( "f_%T" ); - else if( it_encoding == IterationEncoding::groupBased ) - name.append( "g" ); - else if( it_encoding == IterationEncoding::variableBased ) - name.append( "v" ); - name.append( "." ).append( file_ending ); + if (it_encoding == IterationEncoding::fileBased) + name.append("f_%T"); + else if (it_encoding == IterationEncoding::groupBased) + name.append("g"); + else if (it_encoding == IterationEncoding::variableBased) + name.append("v"); + name.append(".").append(file_ending); std::cout << name << std::endl; std::vector data{2, 4, 6, 8}; { - Series write( name, Access::CREATE ); - write.setIterationEncoding( it_encoding ); + Series write(name, Access::CREATE); + write.setIterationEncoding(it_encoding); // interleaved write pattern - Iteration it1 = write.iterations[ 1 ]; - auto E_x = it1.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 2, 2 } } ); - E_x.storeChunk( data, { 0, 0 }, { 1, 2 } ); + Iteration it1 = write.iterations[1]; + auto E_x = it1.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {2, 2}}); + E_x.storeChunk(data, {0, 0}, {1, 2}); E_x.seriesFlush(); - Iteration it2 = write.iterations[ 2 ]; - E_x = it2.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 2, 2 } } ); - E_x.storeChunk( data, { 0, 0 }, { 1, 2 } ); + Iteration it2 = write.iterations[2]; + E_x = it2.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {2, 2}}); + E_x.storeChunk(data, {0, 0}, {1, 2}); E_x.seriesFlush(); - E_x = it1.meshes[ "E" ][ "x" ]; - E_x.storeChunk( data, { 1, 0 }, { 1, 1 } ); + E_x = it1.meshes["E"]["x"]; + E_x.storeChunk(data, {1, 0}, {1, 1}); E_x.seriesFlush(); - E_x = it2.meshes[ "E" ][ "x" ]; - E_x.storeChunk( data, { 1, 0 }, { 1, 1 } ); + E_x = it2.meshes["E"]["x"]; + E_x.storeChunk(data, {1, 0}, {1, 1}); E_x.seriesFlush(); // now we start a third iteration - Iteration it3 = write.iterations[ 3 ]; - E_x = it3.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 2, 2 } } ); - E_x.storeChunk( data, { 0, 0 }, { 1, 2 } ); + Iteration it3 = write.iterations[3]; + E_x = it3.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {2, 2}}); + E_x.storeChunk(data, {0, 0}, {1, 2}); E_x.seriesFlush(); // let's finish writing to 1 and 2 - E_x = it1.meshes[ "E" ][ "x" ]; - E_x.storeChunk( data, { 1, 1 }, { 1, 1 } ); + E_x = it1.meshes["E"]["x"]; + E_x.storeChunk(data, {1, 1}, {1, 1}); E_x.seriesFlush(); it1.close(); - E_x = it2.meshes[ "E" ][ "x" ]; - E_x.storeChunk( data, { 1, 1 }, { 1, 1 } ); + E_x = it2.meshes["E"]["x"]; + E_x.storeChunk(data, {1, 1}, {1, 1}); E_x.seriesFlush(); it2.close(); - E_x = it3.meshes[ "E" ][ "x" ]; - E_x.storeChunk( data, { 1, 0 }, { 1, 2 } ); + E_x = it3.meshes["E"]["x"]; + E_x.storeChunk(data, {1, 0}, {1, 2}); E_x.seriesFlush(); it3.close(); } } -TEST_CASE( "close_iteration_interleaved_test", "[serial]" ) +TEST_CASE("close_iteration_interleaved_test", "[serial]") { bool const bp_prefer_adios1 = - ( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "NOT_SET" ) == "ADIOS1" ); + (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1"); - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - close_iteration_interleaved_test( t, IterationEncoding::fileBased ); - close_iteration_interleaved_test( t, IterationEncoding::groupBased ); + close_iteration_interleaved_test(t, IterationEncoding::fileBased); + close_iteration_interleaved_test(t, IterationEncoding::groupBased); // run this test for ADIOS2 & JSON only - if( t == "h5" ) + if (t == "h5") continue; - if( t == "bp" && bp_prefer_adios1 ) + if (t == "bp" && bp_prefer_adios1) continue; - close_iteration_interleaved_test( t, IterationEncoding::variableBased ); + close_iteration_interleaved_test(t, IterationEncoding::variableBased); } } -void -close_and_copy_attributable_test( std::string file_ending ) +void close_and_copy_attributable_test(std::string file_ending) { using position_t = double; // open file for writing - Series series( "electrons." + file_ending, Access::CREATE ); + Series series("electrons." + file_ending, Access::CREATE); - Datatype datatype = determineDatatype< position_t >(); + Datatype datatype = determineDatatype(); constexpr unsigned long length = 10ul; - Extent global_extent = { length }; - Dataset dataset = Dataset( datatype, global_extent ); - std::shared_ptr< position_t > local_data( - new position_t[ length ], - []( position_t const * ptr ) { delete[] ptr; } ); + Extent global_extent = {length}; + Dataset dataset = Dataset(datatype, global_extent); + std::shared_ptr local_data( + new position_t[length], [](position_t const *ptr) { delete[] ptr; }); - std::unique_ptr< Iteration > iteration_ptr; - for( size_t i = 0; i < 100; i+=10 ) + std::unique_ptr iteration_ptr; + for (size_t i = 0; i < 100; i += 10) { - if( iteration_ptr ) + if (iteration_ptr) { - *iteration_ptr = series.iterations[ i ]; + *iteration_ptr = series.iterations[i]; } else { // use copy constructor - iteration_ptr = - std::make_unique< Iteration >( series.iterations[ i ] ); + iteration_ptr = std::make_unique(series.iterations[i]); } - Record electronPositions = - iteration_ptr->particles[ "e" ][ "position" ]; + Record electronPositions = iteration_ptr->particles["e"]["position"]; // TODO set this automatically to zero if not provided Record electronPositionsOffset = - iteration_ptr->particles[ "e" ][ "positionOffset" ]; + iteration_ptr->particles["e"]["positionOffset"]; - std::iota( local_data.get(), local_data.get() + length, i * length ); - for( auto const & dim : { "x", "y", "z" } ) + std::iota(local_data.get(), local_data.get() + length, i * length); + for (auto const &dim : {"x", "y", "z"}) { - RecordComponent pos = electronPositions[ dim ]; - pos.resetDataset( dataset ); - pos.storeChunk( local_data, Offset{ 0 }, global_extent ); + RecordComponent pos = electronPositions[dim]; + pos.resetDataset(dataset); + pos.storeChunk(local_data, Offset{0}, global_extent); - RecordComponent posOff = electronPositionsOffset[ dim ]; - posOff.resetDataset( dataset ); - posOff.makeConstant( position_t( 0.0 ) ); + RecordComponent posOff = electronPositionsOffset[dim]; + posOff.resetDataset(dataset); + posOff.makeConstant(position_t(0.0)); } iteration_ptr->close(); // force re-flush of previous iterations @@ -675,351 +674,442 @@ close_and_copy_attributable_test( std::string file_ending ) } } -TEST_CASE( "close_and_copy_attributable_test", "[serial]" ) +TEST_CASE("close_and_copy_attributable_test", "[serial]") { // demonstrator for https://github.com/openPMD/openPMD-api/issues/765 - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - close_and_copy_attributable_test( t ); + close_and_copy_attributable_test(t); } } #if openPMD_HAVE_ADIOS2 -TEST_CASE( "close_iteration_throws_test", "[serial]" ) +TEST_CASE("close_iteration_throws_test", "[serial]") { /* * Iterations should not be accessed any more after closing. * Test that the openPMD API detects that case and throws. */ { - Series series( - "../samples/close_iteration_throws_1.bp", Access::CREATE ); - auto it0 = series.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 5 } } ); - std::vector< int > data{ 0, 1, 2, 3, 4 }; - E_x.storeChunk( data, { 0 }, { 5 } ); + Series series("../samples/close_iteration_throws_1.bp", Access::CREATE); + auto it0 = series.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {5}}); + std::vector data{0, 1, 2, 3, 4}; + E_x.storeChunk(data, {0}, {5}); it0.close(); - auto B_y = it0.meshes[ "B" ][ "y" ]; - B_y.resetDataset( { Datatype::INT, { 5 } } ); - B_y.storeChunk( data, { 0 }, { 5 } ); - REQUIRE_THROWS( series.flush() ); + auto B_y = it0.meshes["B"]["y"]; + B_y.resetDataset({Datatype::INT, {5}}); + B_y.storeChunk(data, {0}, {5}); + REQUIRE_THROWS(series.flush()); } { - Series series( - "../samples/close_iteration_throws_2.bp", Access::CREATE ); - auto it0 = series.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 5 } } ); - std::vector< int > data{ 0, 1, 2, 3, 4 }; - E_x.storeChunk( data, { 0 }, { 5 } ); + Series series("../samples/close_iteration_throws_2.bp", Access::CREATE); + auto it0 = series.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {5}}); + std::vector data{0, 1, 2, 3, 4}; + E_x.storeChunk(data, {0}, {5}); it0.close(); - auto e_position_x = it0.particles[ "e" ][ "position" ][ "x" ]; - e_position_x.resetDataset( { Datatype::INT, { 5 } } ); - e_position_x.storeChunk( data, { 0 }, { 5 } ); - REQUIRE_THROWS( series.flush() ); + auto e_position_x = it0.particles["e"]["position"]["x"]; + e_position_x.resetDataset({Datatype::INT, {5}}); + e_position_x.storeChunk(data, {0}, {5}); + REQUIRE_THROWS(series.flush()); } { - Series series( - "../samples/close_iteration_throws_3.bp", Access::CREATE ); - auto it0 = series.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { 5 } } ); - std::vector< int > data{ 0, 1, 2, 3, 4 }; - E_x.storeChunk( data, { 0 }, { 5 } ); + Series series("../samples/close_iteration_throws_3.bp", Access::CREATE); + auto it0 = series.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {5}}); + std::vector data{0, 1, 2, 3, 4}; + E_x.storeChunk(data, {0}, {5}); it0.close(); - it0.setTimeUnitSI( 2.0 ); - REQUIRE_THROWS( series.flush() ); + it0.setTimeUnitSI(2.0); + REQUIRE_THROWS(series.flush()); } } #endif -inline void -empty_dataset_test( std::string file_ending ) +inline void empty_dataset_test(std::string file_ending) { { Series series( - "../samples/empty_datasets." + file_ending, Access::CREATE ); + "../samples/empty_datasets." + file_ending, Access::CREATE); auto makeEmpty_dim_7_int = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_int" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_int"]; auto makeEmpty_dim_7_long = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_bool" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_bool"]; auto makeEmpty_dim_7_int_alt = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_int_alt" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_int_alt"]; auto makeEmpty_dim_7_long_alt = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_bool_alt" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_bool_alt"]; auto makeEmpty_resetDataset_dim3 = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_resetDataset_dim3" ]; + series.iterations[1].meshes["rho"]["makeEmpty_resetDataset_dim3"]; auto makeEmpty_resetDataset_dim3_notallzero = - series.iterations[ 1 ] - .meshes[ "rho" ][ "makeEmpty_resetDataset_dim3_notallzero" ]; - makeEmpty_dim_7_int.makeEmpty< int >( 7 ); - makeEmpty_dim_7_long.makeEmpty< long >( 7 ); - makeEmpty_dim_7_int_alt.makeEmpty( Datatype::INT, 7 ); - makeEmpty_dim_7_long_alt.makeEmpty( Datatype::LONG, 7 ); + series.iterations[1] + .meshes["rho"]["makeEmpty_resetDataset_dim3_notallzero"]; + makeEmpty_dim_7_int.makeEmpty(7); + makeEmpty_dim_7_long.makeEmpty(7); + makeEmpty_dim_7_int_alt.makeEmpty(Datatype::INT, 7); + makeEmpty_dim_7_long_alt.makeEmpty(Datatype::LONG, 7); makeEmpty_resetDataset_dim3.resetDataset( - Dataset( Datatype::LONG, Extent( 3, 0 ) ) ); + Dataset(Datatype::LONG, Extent(3, 0))); makeEmpty_resetDataset_dim3_notallzero.resetDataset( - Dataset( Datatype::LONG_DOUBLE, Extent{ 1, 2, 0 } ) ); + Dataset(Datatype::LONG_DOUBLE, Extent{1, 2, 0})); series.flush(); - } { Series series( - "../samples/empty_datasets." + file_ending, Access::READ_ONLY ); + "../samples/empty_datasets." + file_ending, Access::READ_ONLY); REQUIRE(series.iterations.contains(1)); REQUIRE(series.iterations.count(1) == 1); REQUIRE(series.iterations.count(123456) == 0); REQUIRE(series.iterations[1].meshes.contains("rho")); - REQUIRE(series.iterations[1].meshes["rho"].contains("makeEmpty_dim_7_int")); + REQUIRE( + series.iterations[1].meshes["rho"].contains("makeEmpty_dim_7_int")); auto makeEmpty_dim_7_int = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_int" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_int"]; auto makeEmpty_dim_7_long = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_bool" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_bool"]; auto makeEmpty_dim_7_int_alt = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_int_alt" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_int_alt"]; auto makeEmpty_dim_7_long_alt = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_dim_7_bool_alt" ]; + series.iterations[1].meshes["rho"]["makeEmpty_dim_7_bool_alt"]; auto makeEmpty_resetDataset_dim3 = - series.iterations[ 1 ].meshes[ "rho" ][ "makeEmpty_resetDataset_dim3" ]; + series.iterations[1].meshes["rho"]["makeEmpty_resetDataset_dim3"]; auto makeEmpty_resetDataset_dim3_notallzero = - series.iterations[ 1 ] - .meshes[ "rho" ][ "makeEmpty_resetDataset_dim3_notallzero" ]; + series.iterations[1] + .meshes["rho"]["makeEmpty_resetDataset_dim3_notallzero"]; REQUIRE(makeEmpty_dim_7_int.getDimensionality() == 7); REQUIRE(makeEmpty_dim_7_int.getExtent() == Extent(7, 0)); - REQUIRE(isSame(makeEmpty_dim_7_int.getDatatype(), determineDatatype< int >())); + REQUIRE(isSame( + makeEmpty_dim_7_int.getDatatype(), determineDatatype())); REQUIRE(makeEmpty_dim_7_long.getDimensionality() == 7); REQUIRE(makeEmpty_dim_7_long.getExtent() == Extent(7, 0)); - REQUIRE(isSame(makeEmpty_dim_7_long.getDatatype(), determineDatatype< long >())); + REQUIRE(isSame( + makeEmpty_dim_7_long.getDatatype(), determineDatatype())); REQUIRE(makeEmpty_dim_7_int_alt.getDimensionality() == 7); REQUIRE(makeEmpty_dim_7_int_alt.getExtent() == Extent(7, 0)); - REQUIRE(isSame(makeEmpty_dim_7_int_alt.getDatatype(), determineDatatype< int >())); + REQUIRE(isSame( + makeEmpty_dim_7_int_alt.getDatatype(), determineDatatype())); REQUIRE(makeEmpty_dim_7_long_alt.getDimensionality() == 7); REQUIRE(makeEmpty_dim_7_long_alt.getExtent() == Extent(7, 0)); - REQUIRE(isSame(makeEmpty_dim_7_long_alt.getDatatype(), determineDatatype< long >())); + REQUIRE(isSame( + makeEmpty_dim_7_long_alt.getDatatype(), determineDatatype())); REQUIRE(makeEmpty_resetDataset_dim3.getDimensionality() == 3); REQUIRE(makeEmpty_resetDataset_dim3.getExtent() == Extent(3, 0)); - REQUIRE(isSame(makeEmpty_resetDataset_dim3.getDatatype(), Datatype::LONG)); + REQUIRE( + isSame(makeEmpty_resetDataset_dim3.getDatatype(), Datatype::LONG)); - REQUIRE(makeEmpty_resetDataset_dim3_notallzero.getDimensionality() == 3); - REQUIRE(makeEmpty_resetDataset_dim3_notallzero.getExtent() == Extent{1,2,0}); - REQUIRE(isSame(makeEmpty_resetDataset_dim3_notallzero.getDatatype(), Datatype::LONG_DOUBLE)); + REQUIRE( + makeEmpty_resetDataset_dim3_notallzero.getDimensionality() == 3); + REQUIRE( + makeEmpty_resetDataset_dim3_notallzero.getExtent() == + Extent{1, 2, 0}); + REQUIRE(isSame( + makeEmpty_resetDataset_dim3_notallzero.getDatatype(), + Datatype::LONG_DOUBLE)); } { - Series list{ "../samples/empty_datasets." + file_ending, Access::READ_ONLY }; - helper::listSeries( list ); + Series list{ + "../samples/empty_datasets." + file_ending, Access::READ_ONLY}; + helper::listSeries(list); } } -TEST_CASE( "empty_dataset_test", "[serial]" ) +TEST_CASE("empty_dataset_test", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - empty_dataset_test( t ); + empty_dataset_test(t); } } -inline -void constant_scalar(std::string file_ending) +inline void constant_scalar(std::string file_ending) { Mesh::Geometry const geometry = Mesh::Geometry::spherical; std::string const geometryParameters = "dummyGeometryParameters"; Mesh::DataOrder const dataOrder = Mesh::DataOrder::F; - std::vector< double > const gridSpacing { 1.0, 2.0, 3.0 }; - std::vector< double > const gridGlobalOffset{ 11.0, 22.0, 33.0 }; + std::vector const gridSpacing{1.0, 2.0, 3.0}; + std::vector const gridGlobalOffset{11.0, 22.0, 33.0}; double const gridUnitSI = 3.14; - std::vector< std::string > const axisLabels { "x", "y", "z" }; - std::map< UnitDimension, double > const unitDimensions{ - {UnitDimension::I, 1.0}, - {UnitDimension::J, 2.0} - }; + std::vector const axisLabels{"x", "y", "z"}; + std::map const unitDimensions{ + {UnitDimension::I, 1.0}, {UnitDimension::J, 2.0}}; double const timeOffset = 1234.0; { // constant scalar - Series s = Series("../samples/constant_scalar." + file_ending, Access::CREATE); + Series s = + Series("../samples/constant_scalar." + file_ending, Access::CREATE); auto rho = s.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR]; REQUIRE(s.iterations[1].meshes["rho"].scalar()); rho.resetDataset(Dataset(Datatype::CHAR, {1, 2, 3})); - rho.makeConstant(static_cast< char >('a')); + rho.makeConstant(static_cast('a')); REQUIRE(rho.constant()); // mixed constant/non-constant auto E_x = s.iterations[1].meshes["E"]["x"]; E_x.resetDataset(Dataset(Datatype::FLOAT, {1, 2, 3})); - E_x.makeConstant(static_cast< float >(13.37)); + E_x.makeConstant(static_cast(13.37)); auto E_y = s.iterations[1].meshes["E"]["y"]; E_y.resetDataset(Dataset(Datatype::UINT, {1, 2, 3})); - std::shared_ptr< unsigned int > E(new unsigned int[6], [](unsigned int const *p){ delete[] p; }); + std::shared_ptr E( + new unsigned int[6], [](unsigned int const *p) { delete[] p; }); unsigned int e{0}; - std::generate(E.get(), E.get() + 6, [&e]{ return e++; }); + std::generate(E.get(), E.get() + 6, [&e] { return e++; }); E_y.storeChunk(E, {0, 0, 0}, {1, 2, 3}); // store a number of predefined attributes in E - Mesh & E_mesh = s.iterations[1].meshes["E"]; - E_mesh.setGeometry( geometry ); - E_mesh.setGeometryParameters( geometryParameters ); - E_mesh.setDataOrder( dataOrder ); - E_mesh.setGridSpacing( gridSpacing ); - E_mesh.setGridGlobalOffset( gridGlobalOffset ); - E_mesh.setGridUnitSI( gridUnitSI ); - E_mesh.setAxisLabels( axisLabels ); + Mesh &E_mesh = s.iterations[1].meshes["E"]; + E_mesh.setGeometry(geometry); + E_mesh.setGeometryParameters(geometryParameters); + E_mesh.setDataOrder(dataOrder); + E_mesh.setGridSpacing(gridSpacing); + E_mesh.setGridGlobalOffset(gridGlobalOffset); + E_mesh.setGridUnitSI(gridUnitSI); + E_mesh.setAxisLabels(axisLabels); E_mesh.setUnitDimension(unitDimensions); - E_mesh.setTimeOffset( timeOffset ); + E_mesh.setTimeOffset(timeOffset); // constant scalar - auto pos = s.iterations[1].particles["e"]["position"][RecordComponent::SCALAR]; + auto pos = + s.iterations[1].particles["e"]["position"][RecordComponent::SCALAR]; pos.resetDataset(Dataset(Datatype::DOUBLE, {3, 2, 1})); - pos.makeConstant(static_cast< double >(42.)); - auto posOff = s.iterations[1].particles["e"]["positionOffset"][RecordComponent::SCALAR]; + pos.makeConstant(static_cast(42.)); + auto posOff = + s.iterations[1] + .particles["e"]["positionOffset"][RecordComponent::SCALAR]; posOff.resetDataset(Dataset(Datatype::INT, {3, 2, 1})); - posOff.makeConstant(static_cast< int >(-42)); + posOff.makeConstant(static_cast(-42)); // mixed constant/non-constant auto vel_x = s.iterations[1].particles["e"]["velocity"]["x"]; vel_x.resetDataset(Dataset(Datatype::SHORT, {3, 2, 1})); - vel_x.makeConstant(static_cast< short >(-1)); + vel_x.makeConstant(static_cast(-1)); auto vel_y = s.iterations[1].particles["e"]["velocity"]["y"]; vel_y.resetDataset(Dataset(Datatype::ULONGLONG, {3, 2, 1})); - std::shared_ptr< unsigned long long > vel(new unsigned long long[6], [](unsigned long long const *p){ delete[] p; }); + std::shared_ptr vel( + new unsigned long long[6], + [](unsigned long long const *p) { delete[] p; }); unsigned long long v{0}; - std::generate(vel.get(), vel.get() + 6, [&v]{ return v++; }); + std::generate(vel.get(), vel.get() + 6, [&v] { return v++; }); vel_y.storeChunk(vel, {0, 0, 0}, {3, 2, 1}); } { - Series s = Series("../samples/constant_scalar." + file_ending, Access::READ_ONLY); + Series s = Series( + "../samples/constant_scalar." + file_ending, Access::READ_ONLY); REQUIRE(s.iterations[1].meshes.count("rho") == 1); - REQUIRE(s.iterations[1].meshes["rho"].count(MeshRecordComponent::SCALAR) == 1); - REQUIRE(s.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR].containsAttribute("shape")); - REQUIRE(s.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR].getAttribute("shape").get< std::vector< uint64_t > >() == Extent{1, 2, 3}); - REQUIRE(s.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR].containsAttribute("value")); - REQUIRE(s.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR].getAttribute("value").get< char >() == 'a'); + REQUIRE( + s.iterations[1].meshes["rho"].count(MeshRecordComponent::SCALAR) == + 1); + REQUIRE(s.iterations[1] + .meshes["rho"][MeshRecordComponent::SCALAR] + .containsAttribute("shape")); + REQUIRE( + s.iterations[1] + .meshes["rho"][MeshRecordComponent::SCALAR] + .getAttribute("shape") + .get>() == Extent{1, 2, 3}); + REQUIRE(s.iterations[1] + .meshes["rho"][MeshRecordComponent::SCALAR] + .containsAttribute("value")); + REQUIRE( + s.iterations[1] + .meshes["rho"][MeshRecordComponent::SCALAR] + .getAttribute("value") + .get() == 'a'); REQUIRE(s.iterations[1].meshes["rho"].scalar()); - REQUIRE(s.iterations[1].meshes["rho"][MeshRecordComponent::SCALAR].constant()); + REQUIRE(s.iterations[1] + .meshes["rho"][MeshRecordComponent::SCALAR] + .constant()); REQUIRE(s.iterations[1].meshes.count("E") == 1); REQUIRE(!s.iterations[1].meshes["E"].scalar()); REQUIRE(s.iterations[1].meshes["E"].count("x") == 1); REQUIRE(s.iterations[1].meshes["E"]["x"].constant()); REQUIRE(s.iterations[1].meshes["E"]["x"].containsAttribute("shape")); - REQUIRE(s.iterations[1].meshes["E"]["x"].getAttribute("shape").get< std::vector< uint64_t > >() == Extent{1, 2, 3}); + REQUIRE( + s.iterations[1] + .meshes["E"]["x"] + .getAttribute("shape") + .get>() == Extent{1, 2, 3}); REQUIRE(s.iterations[1].meshes["E"]["x"].containsAttribute("value")); - REQUIRE(s.iterations[1].meshes["E"]["x"].getAttribute("value").get< float >() == static_cast< float >(13.37)); + REQUIRE( + s.iterations[1] + .meshes["E"]["x"] + .getAttribute("value") + .get() == static_cast(13.37)); REQUIRE(!s.iterations[1].meshes["E"]["y"].constant()); - REQUIRE(s.iterations[1].meshes["E"]["y"].getExtent() == Extent{1, 2, 3}); + REQUIRE( + s.iterations[1].meshes["E"]["y"].getExtent() == Extent{1, 2, 3}); REQUIRE(s.iterations[1].meshes["E"].count("y") == 1); REQUIRE(!s.iterations[1].meshes["E"]["y"].containsAttribute("shape")); REQUIRE(!s.iterations[1].meshes["E"]["y"].containsAttribute("value")); - REQUIRE(s.iterations[1].meshes["E"]["y"].getExtent() == Extent{1, 2, 3}); + REQUIRE( + s.iterations[1].meshes["E"]["y"].getExtent() == Extent{1, 2, 3}); REQUIRE(s.iterations[1].particles.count("e") == 1); REQUIRE(s.iterations[1].particles["e"].count("position") == 1); - REQUIRE(s.iterations[1].particles["e"]["position"].count(RecordComponent::SCALAR) == 1); - REQUIRE(s.iterations[1].particles["e"]["position"][RecordComponent::SCALAR].containsAttribute("shape")); - REQUIRE(s.iterations[1].particles["e"]["position"][RecordComponent::SCALAR].getAttribute("shape").get< std::vector< uint64_t > >() == Extent{3, 2, 1}); - REQUIRE(s.iterations[1].particles["e"]["position"][RecordComponent::SCALAR].containsAttribute("value")); - REQUIRE(s.iterations[1].particles["e"]["position"][RecordComponent::SCALAR].getAttribute("value").get< double >() == 42.); - REQUIRE(s.iterations[1].particles["e"]["position"][RecordComponent::SCALAR].getExtent() == Extent{3, 2, 1}); + REQUIRE( + s.iterations[1].particles["e"]["position"].count( + RecordComponent::SCALAR) == 1); + REQUIRE(s.iterations[1] + .particles["e"]["position"][RecordComponent::SCALAR] + .containsAttribute("shape")); + REQUIRE( + s.iterations[1] + .particles["e"]["position"][RecordComponent::SCALAR] + .getAttribute("shape") + .get>() == Extent{3, 2, 1}); + REQUIRE(s.iterations[1] + .particles["e"]["position"][RecordComponent::SCALAR] + .containsAttribute("value")); + REQUIRE( + s.iterations[1] + .particles["e"]["position"][RecordComponent::SCALAR] + .getAttribute("value") + .get() == 42.); + REQUIRE( + s.iterations[1] + .particles["e"]["position"][RecordComponent::SCALAR] + .getExtent() == Extent{3, 2, 1}); REQUIRE(s.iterations[1].particles["e"].count("positionOffset") == 1); - REQUIRE(s.iterations[1].particles["e"]["positionOffset"].count(RecordComponent::SCALAR) == 1); - REQUIRE(s.iterations[1].particles["e"]["positionOffset"][RecordComponent::SCALAR].containsAttribute("shape")); - REQUIRE(s.iterations[1].particles["e"]["positionOffset"][RecordComponent::SCALAR].getAttribute("shape").get< std::vector< uint64_t > >() == Extent{3, 2, 1}); - REQUIRE(s.iterations[1].particles["e"]["positionOffset"][RecordComponent::SCALAR].containsAttribute("value")); - REQUIRE(s.iterations[1].particles["e"]["positionOffset"][RecordComponent::SCALAR].getAttribute("value").get< int >() == -42); - REQUIRE(s.iterations[1].particles["e"]["positionOffset"][RecordComponent::SCALAR].getExtent() == Extent{3, 2, 1}); + REQUIRE( + s.iterations[1].particles["e"]["positionOffset"].count( + RecordComponent::SCALAR) == 1); + REQUIRE(s.iterations[1] + .particles["e"]["positionOffset"][RecordComponent::SCALAR] + .containsAttribute("shape")); + REQUIRE( + s.iterations[1] + .particles["e"]["positionOffset"][RecordComponent::SCALAR] + .getAttribute("shape") + .get>() == Extent{3, 2, 1}); + REQUIRE(s.iterations[1] + .particles["e"]["positionOffset"][RecordComponent::SCALAR] + .containsAttribute("value")); + REQUIRE( + s.iterations[1] + .particles["e"]["positionOffset"][RecordComponent::SCALAR] + .getAttribute("value") + .get() == -42); + REQUIRE( + s.iterations[1] + .particles["e"]["positionOffset"][RecordComponent::SCALAR] + .getExtent() == Extent{3, 2, 1}); REQUIRE(s.iterations[1].particles["e"].count("velocity") == 1); REQUIRE(s.iterations[1].particles["e"]["velocity"].count("x") == 1); - REQUIRE(s.iterations[1].particles["e"]["velocity"]["x"].containsAttribute("shape")); - REQUIRE(s.iterations[1].particles["e"]["velocity"]["x"].getAttribute("shape").get< std::vector< uint64_t > >() == Extent{3, 2, 1}); - REQUIRE(s.iterations[1].particles["e"]["velocity"]["x"].containsAttribute("value")); - REQUIRE(s.iterations[1].particles["e"]["velocity"]["x"].getAttribute("value").get< short >() == -1); - REQUIRE(s.iterations[1].particles["e"]["velocity"]["x"].getExtent() == Extent{3, 2, 1}); + REQUIRE( + s.iterations[1].particles["e"]["velocity"]["x"].containsAttribute( + "shape")); + REQUIRE( + s.iterations[1] + .particles["e"]["velocity"]["x"] + .getAttribute("shape") + .get>() == Extent{3, 2, 1}); + REQUIRE( + s.iterations[1].particles["e"]["velocity"]["x"].containsAttribute( + "value")); + REQUIRE( + s.iterations[1] + .particles["e"]["velocity"]["x"] + .getAttribute("value") + .get() == -1); + REQUIRE( + s.iterations[1].particles["e"]["velocity"]["x"].getExtent() == + Extent{3, 2, 1}); REQUIRE(s.iterations[1].particles["e"]["velocity"].count("y") == 1); - REQUIRE(!s.iterations[1].particles["e"]["velocity"]["y"].containsAttribute("shape")); - REQUIRE(!s.iterations[1].particles["e"]["velocity"]["y"].containsAttribute("value")); - REQUIRE(s.iterations[1].particles["e"]["velocity"]["y"].getExtent() == Extent{3, 2, 1}); - - Mesh & E_mesh = s.iterations[1].meshes["E"]; - REQUIRE( E_mesh.geometry() == geometry ); - REQUIRE( E_mesh.geometryParameters() == geometryParameters ); - REQUIRE( E_mesh.dataOrder() == dataOrder ); - REQUIRE( E_mesh.gridSpacing< double >() == gridSpacing ); - REQUIRE( E_mesh.gridGlobalOffset() == gridGlobalOffset ); - REQUIRE( E_mesh.gridUnitSI() == gridUnitSI ); - REQUIRE( E_mesh.axisLabels() == axisLabels ); + REQUIRE( + !s.iterations[1].particles["e"]["velocity"]["y"].containsAttribute( + "shape")); + REQUIRE( + !s.iterations[1].particles["e"]["velocity"]["y"].containsAttribute( + "value")); + REQUIRE( + s.iterations[1].particles["e"]["velocity"]["y"].getExtent() == + Extent{3, 2, 1}); + + Mesh &E_mesh = s.iterations[1].meshes["E"]; + REQUIRE(E_mesh.geometry() == geometry); + REQUIRE(E_mesh.geometryParameters() == geometryParameters); + REQUIRE(E_mesh.dataOrder() == dataOrder); + REQUIRE(E_mesh.gridSpacing() == gridSpacing); + REQUIRE(E_mesh.gridGlobalOffset() == gridGlobalOffset); + REQUIRE(E_mesh.gridUnitSI() == gridUnitSI); + REQUIRE(E_mesh.axisLabels() == axisLabels); // REQUIRE( E_mesh.unitDimension() == unitDimensions ); - REQUIRE( E_mesh.timeOffset< double >() == timeOffset ); + REQUIRE(E_mesh.timeOffset() == timeOffset); - auto E_x_value = s.iterations[1].meshes["E"]["x"].loadChunk< float >(); + auto E_x_value = s.iterations[1].meshes["E"]["x"].loadChunk(); s.flush(); - for( int idx = 0; idx < 1*2*3; ++idx ) - REQUIRE( E_x_value.get()[idx] == static_cast< float >(13.37) ); + for (int idx = 0; idx < 1 * 2 * 3; ++idx) + REQUIRE(E_x_value.get()[idx] == static_cast(13.37)); } { - Series list{ "../samples/constant_scalar." + file_ending, Access::READ_ONLY }; - helper::listSeries( list ); + Series list{ + "../samples/constant_scalar." + file_ending, Access::READ_ONLY}; + helper::listSeries(list); } } -TEST_CASE( "constant_scalar", "[serial]" ) +TEST_CASE("constant_scalar", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - constant_scalar( t ); + constant_scalar(t); } } -TEST_CASE( "flush_without_position_positionOffset", "[serial]" ) +TEST_CASE("flush_without_position_positionOffset", "[serial]") { - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - const std::string & file_ending = t; + const std::string &file_ending = t; Series s = Series( "../samples/flush_without_position_positionOffset." + file_ending, - Access::CREATE ); - ParticleSpecies e = s.iterations[ 0 ].particles[ "e" ]; - RecordComponent weighting = e[ "weighting" ][ RecordComponent::SCALAR ]; - weighting.resetDataset( Dataset( Datatype::FLOAT, Extent{ 2, 2 } ) ); - weighting.storeChunk( std::shared_ptr< float >( - new float[ 4 ](), - []( float const * ptr ) { delete[] ptr; } ), - { 0, 0 }, - { 2, 2 } ); + Access::CREATE); + ParticleSpecies e = s.iterations[0].particles["e"]; + RecordComponent weighting = e["weighting"][RecordComponent::SCALAR]; + weighting.resetDataset(Dataset(Datatype::FLOAT, Extent{2, 2})); + weighting.storeChunk( + std::shared_ptr( + new float[4](), [](float const *ptr) { delete[] ptr; }), + {0, 0}, + {2, 2}); s.flush(); - for( auto const & key : { "position", "positionOffset" } ) + for (auto const &key : {"position", "positionOffset"}) { - for( auto const & dim : { "x", "y", "z" } ) + for (auto const &dim : {"x", "y", "z"}) { - RecordComponent rc = e[ key ][ dim ]; - rc.resetDataset( Dataset( Datatype::FLOAT , Extent{ 2, 2 } ) ); - rc.storeChunk( std::shared_ptr< float >( - new float[ 4 ](), - []( float const * ptr ) { delete[] ptr; } ), - { 0, 0 }, - { 2, 2 } ); - } + RecordComponent rc = e[key][dim]; + rc.resetDataset(Dataset(Datatype::FLOAT, Extent{2, 2})); + rc.storeChunk( + std::shared_ptr( + new float[4](), [](float const *ptr) { delete[] ptr; }), + {0, 0}, + {2, 2}); + } } } } - -inline -void particle_patches( std::string file_ending ) +inline void particle_patches(std::string file_ending) { constexpr auto SCALAR = openPMD::RecordComponent::SCALAR; @@ -1027,30 +1117,39 @@ void particle_patches( std::string file_ending ) uint64_t const num_patches = 2u; { // constant scalar - Series s = Series("../samples/particle_patches%T." + file_ending, Access::CREATE); + Series s = Series( + "../samples/particle_patches%T." + file_ending, Access::CREATE); auto e = s.iterations[42].particles["electrons"]; - for( auto r : {"x", "y"} ) + for (auto r : {"x", "y"}) { auto x = e["position"][r]; x.resetDataset(Dataset(determineDatatype(), {extent})); - std::vector xd( extent ); + std::vector xd(extent); std::iota(xd.begin(), xd.end(), 0); x.storeChunk(xd); - auto o = e["positionOffset"][r]; + auto o = e["positionOffset"][r]; o.resetDataset(Dataset(determineDatatype(), {extent})); - std::vector od( extent ); + std::vector od(extent); std::iota(od.begin(), od.end(), 0); o.storeChunk(od); s.flush(); } - auto const dset_n = Dataset(determineDatatype(), {num_patches, }); + auto const dset_n = Dataset( + determineDatatype(), + { + num_patches, + }); e.particlePatches["numParticles"][SCALAR].resetDataset(dset_n); e.particlePatches["numParticlesOffset"][SCALAR].resetDataset(dset_n); - auto const dset_f = Dataset(determineDatatype(), {num_patches, }); + auto const dset_f = Dataset( + determineDatatype(), + { + num_patches, + }); e.particlePatches["offset"]["x"].resetDataset(dset_f); e.particlePatches["offset"]["y"].resetDataset(dset_f); e.particlePatches["extent"]["x"].resetDataset(dset_f); @@ -1073,16 +1172,20 @@ void particle_patches( std::string file_ending ) e.particlePatches["extent"]["y"].store(1, float(123.)); } { - Series s = Series("../samples/particle_patches%T." + file_ending, Access::READ_ONLY); + Series s = Series( + "../samples/particle_patches%T." + file_ending, Access::READ_ONLY); auto e = s.iterations[42].particles["electrons"]; - auto numParticles = e.particlePatches["numParticles"][SCALAR].template load< uint64_t >(); - auto numParticlesOffset = e.particlePatches["numParticlesOffset"][SCALAR].template load< uint64_t >(); - auto extent_x = e.particlePatches["extent"]["x"].template load< float >(); - auto extent_y = e.particlePatches["extent"]["y"].template load< float >(); - auto offset_x = e.particlePatches["offset"]["x"].template load< float >(); - auto offset_y = e.particlePatches["offset"]["y"].template load< float >(); + auto numParticles = + e.particlePatches["numParticles"][SCALAR].template load(); + auto numParticlesOffset = + e.particlePatches["numParticlesOffset"][SCALAR] + .template load(); + auto extent_x = e.particlePatches["extent"]["x"].template load(); + auto extent_y = e.particlePatches["extent"]["y"].template load(); + auto offset_x = e.particlePatches["offset"]["x"].template load(); + auto offset_y = e.particlePatches["offset"]["y"].template load(); s.flush(); @@ -1101,19 +1204,18 @@ void particle_patches( std::string file_ending ) } } -TEST_CASE( "particle_patches", "[serial]" ) +TEST_CASE("particle_patches", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - particle_patches( t ); + particle_patches(t); } } -inline -void dtype_test( const std::string & backend ) +inline void dtype_test(const std::string &backend) { - bool test_long_double = (backend != "json") || sizeof (long double) <= 8; - bool test_long_long = (backend != "json") || sizeof (long long) <= 8; + bool test_long_double = (backend != "json") || sizeof(long double) <= 8; + bool test_long_long = (backend != "json") || sizeof(long long) <= 8; { Series s = Series("../samples/dtype_test." + backend, Access::CREATE); @@ -1144,21 +1246,33 @@ void dtype_test( const std::string & backend ) } std::string str = "string"; s.setAttribute("string", str); - s.setAttribute("vecChar", std::vector< char >({'c', 'h', 'a', 'r'})); - s.setAttribute("vecInt16", std::vector< int16_t >({32766, 32767})); - s.setAttribute("vecInt32", std::vector< int32_t >({2147483646, 2147483647})); - s.setAttribute("vecInt64", std::vector< int64_t >({9223372036854775806, 9223372036854775807})); - s.setAttribute("vecUchar", std::vector< char >({'u', 'c', 'h', 'a', 'r'})); - s.setAttribute("vecUint16", std::vector< uint16_t >({65534u, 65535u})); - s.setAttribute("vecUint32", std::vector< uint32_t >({4294967294u, 4294967295u})); - s.setAttribute("vecUint64", std::vector< uint64_t >({18446744073709551614u, 18446744073709551615u})); - s.setAttribute("vecFloat", std::vector< float >({0.f, 3.40282e+38f})); - s.setAttribute("vecDouble", std::vector< double >({0., 1.79769e+308})); + s.setAttribute("vecChar", std::vector({'c', 'h', 'a', 'r'})); + s.setAttribute("vecInt16", std::vector({32766, 32767})); + s.setAttribute( + "vecInt32", std::vector({2147483646, 2147483647})); + s.setAttribute( + "vecInt64", + std::vector({9223372036854775806, 9223372036854775807})); + s.setAttribute( + "vecUchar", std::vector({'u', 'c', 'h', 'a', 'r'})); + s.setAttribute("vecUint16", std::vector({65534u, 65535u})); + s.setAttribute( + "vecUint32", std::vector({4294967294u, 4294967295u})); + s.setAttribute( + "vecUint64", + std::vector( + {18446744073709551614u, 18446744073709551615u})); + s.setAttribute("vecFloat", std::vector({0.f, 3.40282e+38f})); + s.setAttribute("vecDouble", std::vector({0., 1.79769e+308})); if (test_long_double) { - s.setAttribute("vecLongdouble", std::vector< long double >({0.L, std::numeric_limits::max()})); + s.setAttribute( + "vecLongdouble", + std::vector( + {0.L, std::numeric_limits::max()})); } - s.setAttribute("vecString", std::vector< std::string >({"vector", "of", "strings"})); + s.setAttribute( + "vecString", std::vector({"vector", "of", "strings"})); s.setAttribute("bool", true); s.setAttribute("boolF", false); @@ -1185,66 +1299,97 @@ void dtype_test( const std::string & backend ) unsigned long long ull = 128u; s.setAttribute("ulonglong", ull); } - s.setAttribute("vecShort", std::vector< short >({32766, 32767})); - s.setAttribute("vecInt", std::vector< int >({32766, 32767})); - s.setAttribute("vecLong", std::vector< long >({2147483646, 2147483647})); + s.setAttribute("vecShort", std::vector({32766, 32767})); + s.setAttribute("vecInt", std::vector({32766, 32767})); + s.setAttribute("vecLong", std::vector({2147483646, 2147483647})); if (test_long_long) { - s.setAttribute("vecLongLong", std::vector< long long >({2147483644, 2147483643})); + s.setAttribute( + "vecLongLong", + std::vector({2147483644, 2147483643})); } - s.setAttribute("vecUShort", std::vector< unsigned short >({65534u, 65535u})); - s.setAttribute("vecUInt", std::vector< unsigned int >({65533u, 65531u})); - s.setAttribute("vecULong", std::vector< unsigned long >({65532u, 65530u})); + s.setAttribute( + "vecUShort", std::vector({65534u, 65535u})); + s.setAttribute("vecUInt", std::vector({65533u, 65531u})); + s.setAttribute( + "vecULong", std::vector({65532u, 65530u})); if (test_long_long) { - s.setAttribute("vecULongLong", std::vector< unsigned long long >({65531u, 65529u})); + s.setAttribute( + "vecULongLong", + std::vector({65531u, 65529u})); } // long double grid spacing // should be possible to parse without error upon opening // the series for reading { - auto E = s.iterations[ 0 ].meshes[ "E" ]; - E.setGridSpacing( std::vector< long double >{ 1.0, 1.0 } ); - auto E_x = E[ "x" ]; - E_x.makeEmpty< double >( 1 ); + auto E = s.iterations[0].meshes["E"]; + E.setGridSpacing(std::vector{1.0, 1.0}); + auto E_x = E["x"]; + E_x.makeEmpty(1); } } Series s = Series("../samples/dtype_test." + backend, Access::READ_ONLY); - REQUIRE(s.getAttribute("char").get< char >() == 'c'); - REQUIRE(s.getAttribute("uchar").get< unsigned char >() == 'u'); - REQUIRE(s.getAttribute("int16").get< int16_t >() == 16); - REQUIRE(s.getAttribute("int32").get< int32_t >() == 32); - REQUIRE(s.getAttribute("int64").get< int64_t >() == 64); - REQUIRE(s.getAttribute("uint16").get< uint16_t >() == 16u); - REQUIRE(s.getAttribute("uint32").get< uint32_t >() == 32u); - REQUIRE(s.getAttribute("uint64").get< uint64_t >() == 64u); - REQUIRE(s.getAttribute("float").get< float >() == 16.e10f); - REQUIRE(s.getAttribute("double").get< double >() == 1.e64); + REQUIRE(s.getAttribute("char").get() == 'c'); + REQUIRE(s.getAttribute("uchar").get() == 'u'); + REQUIRE(s.getAttribute("int16").get() == 16); + REQUIRE(s.getAttribute("int32").get() == 32); + REQUIRE(s.getAttribute("int64").get() == 64); + REQUIRE(s.getAttribute("uint16").get() == 16u); + REQUIRE(s.getAttribute("uint32").get() == 32u); + REQUIRE(s.getAttribute("uint64").get() == 64u); + REQUIRE(s.getAttribute("float").get() == 16.e10f); + REQUIRE(s.getAttribute("double").get() == 1.e64); if (test_long_double) { - REQUIRE(s.getAttribute("longdouble").get< long double >() == 1.e80L); - } - REQUIRE(s.getAttribute("string").get< std::string >() == "string"); - REQUIRE(s.getAttribute("vecChar").get< std::vector< char > >() == std::vector< char >({'c', 'h', 'a', 'r'})); - REQUIRE(s.getAttribute("vecInt16").get< std::vector< int16_t > >() == std::vector< int16_t >({32766, 32767})); - REQUIRE(s.getAttribute("vecInt32").get< std::vector< int32_t > >() == std::vector< int32_t >({2147483646, 2147483647})); - REQUIRE(s.getAttribute("vecInt64").get< std::vector< int64_t > >() == std::vector< int64_t >({9223372036854775806, 9223372036854775807})); - REQUIRE(s.getAttribute("vecUchar").get< std::vector< char > >() == std::vector< char >({'u', 'c', 'h', 'a', 'r'})); - REQUIRE(s.getAttribute("vecUint16").get< std::vector< uint16_t > >() == std::vector< uint16_t >({65534u, 65535u})); - REQUIRE(s.getAttribute("vecUint32").get< std::vector< uint32_t > >() == std::vector< uint32_t >({4294967294u, 4294967295u})); - REQUIRE(s.getAttribute("vecUint64").get< std::vector< uint64_t > >() == std::vector< uint64_t >({18446744073709551614u, 18446744073709551615u})); - REQUIRE(s.getAttribute("vecFloat").get< std::vector< float > >() == std::vector< float >({0.f, 3.40282e+38f})); - REQUIRE(s.getAttribute("vecDouble").get< std::vector< double > >() == std::vector< double >({0., 1.79769e+308})); + REQUIRE(s.getAttribute("longdouble").get() == 1.e80L); + } + REQUIRE(s.getAttribute("string").get() == "string"); + REQUIRE( + s.getAttribute("vecChar").get>() == + std::vector({'c', 'h', 'a', 'r'})); + REQUIRE( + s.getAttribute("vecInt16").get>() == + std::vector({32766, 32767})); + REQUIRE( + s.getAttribute("vecInt32").get>() == + std::vector({2147483646, 2147483647})); + REQUIRE( + s.getAttribute("vecInt64").get>() == + std::vector({9223372036854775806, 9223372036854775807})); + REQUIRE( + s.getAttribute("vecUchar").get>() == + std::vector({'u', 'c', 'h', 'a', 'r'})); + REQUIRE( + s.getAttribute("vecUint16").get>() == + std::vector({65534u, 65535u})); + REQUIRE( + s.getAttribute("vecUint32").get>() == + std::vector({4294967294u, 4294967295u})); + REQUIRE( + s.getAttribute("vecUint64").get>() == + std::vector({18446744073709551614u, 18446744073709551615u})); + REQUIRE( + s.getAttribute("vecFloat").get>() == + std::vector({0.f, 3.40282e+38f})); + REQUIRE( + s.getAttribute("vecDouble").get>() == + std::vector({0., 1.79769e+308})); if (test_long_double) { - REQUIRE(s.getAttribute("vecLongdouble").get< std::vector< long double > >() == std::vector< long double >({0.L, std::numeric_limits::max()})); + REQUIRE( + s.getAttribute("vecLongdouble").get>() == + std::vector( + {0.L, std::numeric_limits::max()})); } - REQUIRE(s.getAttribute("vecString").get< std::vector< std::string > >() == std::vector< std::string >({"vector", "of", "strings"})); - REQUIRE(s.getAttribute("bool").get< bool >() == true); - REQUIRE(s.getAttribute("boolF").get< bool >() == false); + REQUIRE( + s.getAttribute("vecString").get>() == + std::vector({"vector", "of", "strings"})); + REQUIRE(s.getAttribute("bool").get() == true); + REQUIRE(s.getAttribute("boolF").get() == false); // same implementation types (not necessary aliases) detection #if !defined(_MSC_VER) @@ -1269,7 +1414,8 @@ void dtype_test( const std::string & backend ) REQUIRE(s.getAttribute("vecULong").dtype == Datatype::VEC_ULONG); if (test_long_long) { - REQUIRE(s.getAttribute("vecULongLong").dtype == Datatype::VEC_ULONGLONG); + REQUIRE( + s.getAttribute("vecULongLong").dtype == Datatype::VEC_ULONGLONG); } #endif REQUIRE(isSame(s.getAttribute("short").dtype, Datatype::SHORT)); @@ -1292,71 +1438,86 @@ void dtype_test( const std::string & backend ) REQUIRE(isSame(s.getAttribute("vecLong").dtype, Datatype::VEC_LONG)); if (test_long_long) { - REQUIRE(isSame(s.getAttribute("vecLongLong").dtype, Datatype::VEC_LONGLONG)); + REQUIRE(isSame( + s.getAttribute("vecLongLong").dtype, Datatype::VEC_LONGLONG)); } REQUIRE(isSame(s.getAttribute("vecUShort").dtype, Datatype::VEC_USHORT)); REQUIRE(isSame(s.getAttribute("vecUInt").dtype, Datatype::VEC_UINT)); REQUIRE(isSame(s.getAttribute("vecULong").dtype, Datatype::VEC_ULONG)); if (test_long_long) { - REQUIRE(isSame(s.getAttribute("vecULongLong").dtype, Datatype::VEC_ULONGLONG)); + REQUIRE(isSame( + s.getAttribute("vecULongLong").dtype, Datatype::VEC_ULONGLONG)); } } -TEST_CASE( "dtype_test", "[serial]" ) +TEST_CASE("dtype_test", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) dtype_test(t); } -inline -void write_test(const std::string & backend) +inline void write_test(const std::string &backend) { Series o = Series("../samples/serial_write." + backend, Access::CREATE); - ParticleSpecies& e_1 = o.iterations[1].particles["e"]; + ParticleSpecies &e_1 = o.iterations[1].particles["e"]; - std::vector< double > position_global(4); + std::vector position_global(4); double pos{0.}; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - std::shared_ptr< double > position_local_1(new double); - e_1["position"]["x"].resetDataset(Dataset(determineDatatype(position_local_1), {4})); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local_1(new double); + e_1["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local_1), {4})); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *position_local_1 = position_global[i]; e_1["position"]["x"].storeChunk(position_local_1, {i}, {1}); } - std::vector< uint64_t > positionOffset_global(4); + std::vector positionOffset_global(4); uint64_t posOff{0}; - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local_1(new uint64_t); - e_1["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local_1), {4})); + std::generate( + positionOffset_global.begin(), positionOffset_global.end(), [&posOff] { + return posOff++; + }); + std::shared_ptr positionOffset_local_1(new uint64_t); + e_1["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local_1), {4})); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *positionOffset_local_1 = positionOffset_global[i]; e_1["positionOffset"]["x"].storeChunk(positionOffset_local_1, {i}, {1}); } - ParticleSpecies& e_2 = o.iterations[2].particles["e"]; + ParticleSpecies &e_2 = o.iterations[2].particles["e"]; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - std::shared_ptr< double > position_local_2(new double); - e_2["position"]["x"].resetDataset(Dataset(determineDatatype(position_local_2), {4})); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local_2(new double); + e_2["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local_2), {4})); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *position_local_2 = position_global[i]; e_2["position"]["x"].storeChunk(position_local_2, {i}, {1}); } - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local_2(new uint64_t); - e_2["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local_2), {4})); + std::generate( + positionOffset_global.begin(), positionOffset_global.end(), [&posOff] { + return posOff++; + }); + std::shared_ptr positionOffset_local_2(new uint64_t); + e_2["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local_2), {4})); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *positionOffset_local_2 = positionOffset_global[i]; e_2["positionOffset"]["x"].storeChunk(positionOffset_local_2, {i}, {1}); @@ -1364,23 +1525,30 @@ void write_test(const std::string & backend) o.flush(); - ParticleSpecies& e_3 = o.iterations[3].particles["e"]; + ParticleSpecies &e_3 = o.iterations[3].particles["e"]; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - std::shared_ptr< double > position_local_3(new double); - e_3["position"]["x"].resetDataset(Dataset(determineDatatype(position_local_3), {4})); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local_3(new double); + e_3["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local_3), {4})); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *position_local_3 = position_global[i]; e_3["position"]["x"].storeChunk(position_local_3, {i}, {1}); } - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local_3(new uint64_t); - e_3["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local_3), {4})); + std::generate( + positionOffset_global.begin(), positionOffset_global.end(), [&posOff] { + return posOff++; + }); + std::shared_ptr positionOffset_local_3(new uint64_t); + e_3["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local_3), {4})); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *positionOffset_local_3 = positionOffset_global[i]; e_3["positionOffset"]["x"].storeChunk(positionOffset_local_3, {i}, {1}); @@ -1389,26 +1557,29 @@ void write_test(const std::string & backend) o.flush(); } -TEST_CASE( "write_test", "[serial]" ) +TEST_CASE("write_test", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - write_test( t ); - Series list{ "../samples/serial_write." + t, Access::READ_ONLY }; - helper::listSeries( list ); + write_test(t); + Series list{"../samples/serial_write." + t, Access::READ_ONLY}; + helper::listSeries(list); } } -void test_complex(const std::string & backend) { +void test_complex(const std::string &backend) +{ { - Series o = Series("../samples/serial_write_complex." + backend, Access::CREATE); + Series o = Series( + "../samples/serial_write_complex." + backend, Access::CREATE); o.setAttribute("lifeIsComplex", std::complex(4.56, 7.89)); o.setAttribute("butComplexFloats", std::complex(42.3, -99.3)); - if( backend != "bp" ) - o.setAttribute("longDoublesYouSay", std::complex(5.5, -4.55)); + if (backend != "bp") + o.setAttribute( + "longDoublesYouSay", std::complex(5.5, -4.55)); auto Cflt = o.iterations[0].meshes["Cflt"][RecordComponent::SCALAR]; - std::vector< std::complex > cfloats(3); + std::vector> cfloats(3); cfloats.at(0) = {1., 2.}; cfloats.at(1) = {-3., 4.}; cfloats.at(2) = {5., -6.}; @@ -1416,21 +1587,23 @@ void test_complex(const std::string & backend) { Cflt.storeChunk(cfloats, {0}); auto Cdbl = o.iterations[0].meshes["Cdbl"][RecordComponent::SCALAR]; - std::vector< std::complex > cdoubles(3); + std::vector> cdoubles(3); cdoubles.at(0) = {2., 1.}; cdoubles.at(1) = {-4., 3.}; cdoubles.at(2) = {6., -5.}; Cdbl.resetDataset(Dataset(Datatype::CDOUBLE, {cdoubles.size()})); Cdbl.storeChunk(cdoubles, {0}); - std::vector< std::complex > cldoubles(3); - if( backend != "bp" ) + std::vector> cldoubles(3); + if (backend != "bp") { - auto Cldbl = o.iterations[0].meshes["Cldbl"][RecordComponent::SCALAR]; + auto Cldbl = + o.iterations[0].meshes["Cldbl"][RecordComponent::SCALAR]; cldoubles.at(0) = {3., 2.}; cldoubles.at(1) = {-5., 4.}; cldoubles.at(2) = {7., -6.}; - Cldbl.resetDataset(Dataset(Datatype::CLONG_DOUBLE, {cldoubles.size()})); + Cldbl.resetDataset( + Dataset(Datatype::CLONG_DOUBLE, {cldoubles.size()})); Cldbl.storeChunk(cldoubles, {0}); } @@ -1438,48 +1611,63 @@ void test_complex(const std::string & backend) { } { - Series i = Series("../samples/serial_write_complex." + backend, Access::READ_ONLY); - REQUIRE(i.getAttribute("lifeIsComplex").get< std::complex >() == std::complex(4.56, 7.89)); - REQUIRE(i.getAttribute("butComplexFloats").get< std::complex >() == std::complex(42.3, -99.3)); - if( backend != "bp" ) { - REQUIRE(i.getAttribute("longDoublesYouSay").get >() == - std::complex(5.5, -4.55)); + Series i = Series( + "../samples/serial_write_complex." + backend, Access::READ_ONLY); + REQUIRE( + i.getAttribute("lifeIsComplex").get>() == + std::complex(4.56, 7.89)); + REQUIRE( + i.getAttribute("butComplexFloats").get>() == + std::complex(42.3, -99.3)); + if (backend != "bp") + { + REQUIRE( + i.getAttribute("longDoublesYouSay") + .get>() == + std::complex(5.5, -4.55)); } - auto rcflt = i.iterations[0].meshes["Cflt"][RecordComponent::SCALAR].loadChunk< std::complex >(); - auto rcdbl = i.iterations[0].meshes["Cdbl"][RecordComponent::SCALAR].loadChunk< std::complex >(); + auto rcflt = i.iterations[0] + .meshes["Cflt"][RecordComponent::SCALAR] + .loadChunk>(); + auto rcdbl = i.iterations[0] + .meshes["Cdbl"][RecordComponent::SCALAR] + .loadChunk>(); i.flush(); REQUIRE(rcflt.get()[1] == std::complex(-3., 4.)); REQUIRE(rcdbl.get()[2] == std::complex(6, -5.)); - if( backend != "bp" ) + if (backend != "bp") { - auto rcldbl = i.iterations[0].meshes["Cldbl"][RecordComponent::SCALAR].loadChunk< std::complex >(); + auto rcldbl = i.iterations[0] + .meshes["Cldbl"][RecordComponent::SCALAR] + .loadChunk>(); i.flush(); REQUIRE(rcldbl.get()[2] == std::complex(7., -6.)); } } { - Series list{ "../samples/serial_write_complex." + backend, Access::READ_ONLY }; - helper::listSeries( list ); + Series list{ + "../samples/serial_write_complex." + backend, Access::READ_ONLY}; + helper::listSeries(list); } } -TEST_CASE( "test_complex", "[serial]" ) +TEST_CASE("test_complex", "[serial]") { // Notes: // - ADIOS1 and ADIOS 2.7.0 have no complex long double // - JSON read-back not distinguishable yet from N+1 shaped data set - for (auto const & t : testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { test_complex(t); } } -inline -void fileBased_add_EDpic(ParticleSpecies& e, uint64_t const num_particles) +inline void +fileBased_add_EDpic(ParticleSpecies &e, uint64_t const num_particles) { // ED-PIC e["position"].setAttribute("weightingPower", 0.0); @@ -1493,7 +1681,6 @@ void fileBased_add_EDpic(ParticleSpecies& e, uint64_t const num_particles) e["momentum"].setAttribute("weightingPower", 1.0); e["momentum"].setAttribute("macroWeighted", uint32_t(0)); - e["charge"][RecordComponent::SCALAR].resetDataset(dsDbl); e["charge"][RecordComponent::SCALAR].makeConstant(2.3); e["charge"].setAttribute("weightingPower", 1.0); @@ -1516,97 +1703,135 @@ void fileBased_add_EDpic(ParticleSpecies& e, uint64_t const num_particles) e.setAttribute("particleSmoothing", "none"); } -inline -void fileBased_write_test(const std::string & backend) +inline void fileBased_write_test(const std::string &backend) { - if( auxiliary::directory_exists("../samples/subdir") ) + if (auxiliary::directory_exists("../samples/subdir")) auxiliary::remove_directory("../samples/subdir"); { - Series o = Series("../samples/subdir/serial_fileBased_write%03T." + backend, Access::CREATE); + Series o = Series( + "../samples/subdir/serial_fileBased_write%03T." + backend, + Access::CREATE); - ParticleSpecies& e_1 = o.iterations[1].particles["e"]; + ParticleSpecies &e_1 = o.iterations[1].particles["e"]; - std::vector< double > position_global(4); + std::vector position_global(4); double pos{0.}; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - std::shared_ptr< double > position_local_1(new double); - e_1["position"]["x"].resetDataset(Dataset(determineDatatype(position_local_1), {4})); - std::vector< uint64_t > positionOffset_global(4); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local_1(new double); + e_1["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local_1), {4})); + std::vector positionOffset_global(4); uint64_t posOff{0}; - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local_1(new uint64_t); - e_1["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local_1), {4})); + std::generate( + positionOffset_global.begin(), + positionOffset_global.end(), + [&posOff] { return posOff++; }); + std::shared_ptr positionOffset_local_1(new uint64_t); + e_1["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local_1), {4})); fileBased_add_EDpic(e_1, 4); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *position_local_1 = position_global[i]; e_1["position"]["x"].storeChunk(position_local_1, {i}, {1}); *positionOffset_local_1 = positionOffset_global[i]; - e_1["positionOffset"]["x"].storeChunk(positionOffset_local_1, {i}, {1}); + e_1["positionOffset"]["x"].storeChunk( + positionOffset_local_1, {i}, {1}); o.flush(); } - o.iterations[1].setTime(static_cast< double >(1)); + o.iterations[1].setTime(static_cast(1)); - ParticleSpecies& e_2 = o.iterations[2].particles["e"]; + ParticleSpecies &e_2 = o.iterations[2].particles["e"]; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - e_2["position"]["x"].resetDataset(Dataset(determineDatatype(), {4})); - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local_2(new uint64_t); - e_2["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local_2), {4})); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + e_2["position"]["x"].resetDataset( + Dataset(determineDatatype(), {4})); + std::generate( + positionOffset_global.begin(), + positionOffset_global.end(), + [&posOff] { return posOff++; }); + std::shared_ptr positionOffset_local_2(new uint64_t); + e_2["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local_2), {4})); fileBased_add_EDpic(e_2, 4); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { double const position_local_2 = position_global.at(i); - e_2["position"]["x"].storeChunk(shareRaw(&position_local_2), {i}, {1}); + e_2["position"]["x"].storeChunk( + shareRaw(&position_local_2), {i}, {1}); *positionOffset_local_2 = positionOffset_global[i]; - e_2["positionOffset"]["x"].storeChunk(positionOffset_local_2, {i}, {1}); + e_2["positionOffset"]["x"].storeChunk( + positionOffset_local_2, {i}, {1}); o.flush(); } - o.iterations[2].setTime(static_cast< double >(2)); + o.iterations[2].setTime(static_cast(2)); - ParticleSpecies& e_3 = o.iterations[3].particles["e"]; + ParticleSpecies &e_3 = o.iterations[3].particles["e"]; - std::generate(position_global.begin(), position_global.end(), [&pos]{ return pos++; }); - std::shared_ptr< double > position_local_3(new double); - e_3["position"]["x"].resetDataset(Dataset(determineDatatype(position_local_3), {4})); - std::generate(positionOffset_global.begin(), positionOffset_global.end(), [&posOff]{ return posOff++; }); - std::shared_ptr< uint64_t > positionOffset_local_3(new uint64_t); - e_3["positionOffset"]["x"].resetDataset(Dataset(determineDatatype(positionOffset_local_3), {4})); + std::generate(position_global.begin(), position_global.end(), [&pos] { + return pos++; + }); + std::shared_ptr position_local_3(new double); + e_3["position"]["x"].resetDataset( + Dataset(determineDatatype(position_local_3), {4})); + std::generate( + positionOffset_global.begin(), + positionOffset_global.end(), + [&posOff] { return posOff++; }); + std::shared_ptr positionOffset_local_3(new uint64_t); + e_3["positionOffset"]["x"].resetDataset( + Dataset(determineDatatype(positionOffset_local_3), {4})); fileBased_add_EDpic(e_3, 4); - for( uint64_t i = 0; i < 4; ++i ) + for (uint64_t i = 0; i < 4; ++i) { *position_local_3 = position_global[i]; e_3["position"]["x"].storeChunk(position_local_3, {i}, {1}); *positionOffset_local_3 = positionOffset_global[i]; - e_3["positionOffset"]["x"].storeChunk(positionOffset_local_3, {i}, {1}); + e_3["positionOffset"]["x"].storeChunk( + positionOffset_local_3, {i}, {1}); o.flush(); } - o.setOpenPMDextension(1); // this happens intentionally "late" in this test - o.iterations[3].setTime(static_cast< double >(3)); - o.iterations[4].setTime(static_cast< double >(4)); + o.setOpenPMDextension( + 1); // this happens intentionally "late" in this test + o.iterations[3].setTime(static_cast(3)); + o.iterations[4].setTime(static_cast(4)); o.flush(); - o.iterations[5].setTime(static_cast< double >(5)); + o.iterations[5].setTime(static_cast(5)); } - REQUIRE((auxiliary::file_exists("../samples/subdir/serial_fileBased_write001." + backend) - || auxiliary::directory_exists("../samples/subdir/serial_fileBased_write001." + backend))); - REQUIRE((auxiliary::file_exists("../samples/subdir/serial_fileBased_write002." + backend) - || auxiliary::directory_exists("../samples/subdir/serial_fileBased_write002." + backend))); - REQUIRE((auxiliary::file_exists("../samples/subdir/serial_fileBased_write003." + backend) - || auxiliary::directory_exists("../samples/subdir/serial_fileBased_write003." + backend))); + REQUIRE( + (auxiliary::file_exists( + "../samples/subdir/serial_fileBased_write001." + backend) || + auxiliary::directory_exists( + "../samples/subdir/serial_fileBased_write001." + backend))); + REQUIRE( + (auxiliary::file_exists( + "../samples/subdir/serial_fileBased_write002." + backend) || + auxiliary::directory_exists( + "../samples/subdir/serial_fileBased_write002." + backend))); + REQUIRE( + (auxiliary::file_exists( + "../samples/subdir/serial_fileBased_write003." + backend) || + auxiliary::directory_exists( + "../samples/subdir/serial_fileBased_write003." + backend))); { - Series o = Series("../samples/subdir/serial_fileBased_write%T." + backend, Access::READ_ONLY); + Series o = Series( + "../samples/subdir/serial_fileBased_write%T." + backend, + Access::READ_ONLY); REQUIRE(o.iterations.size() == 5); REQUIRE(o.iterations.count(1) == 1); @@ -1627,90 +1852,104 @@ void fileBased_write_test(const std::string & backend) REQUIRE(o.particlesPath() == "particles/"); REQUIRE_FALSE(o.containsAttribute("meshesPath")); REQUIRE_THROWS_AS(o.meshesPath(), no_such_attribute_error); - std::array< double, 7 > udim{{1, 0, 0, 0, 0, 0, 0}}; + std::array udim{{1, 0, 0, 0, 0, 0, 0}}; Extent ext{4}; - for( auto& entry : o.iterations ) + for (auto &entry : o.iterations) { - auto& it = entry.second; - REQUIRE(it.dt< double >() == 1.); - REQUIRE(it.time< double >() == static_cast< double >(entry.first)); + auto &it = entry.second; + REQUIRE(it.dt() == 1.); + REQUIRE(it.time() == static_cast(entry.first)); REQUIRE(it.timeUnitSI() == 1.); - if( entry.first > 3 ) + if (entry.first > 3) continue; // empty iterations - auto& pos = it.particles.at("e").at("position"); - REQUIRE(pos.timeOffset< float >() == 0.f); + auto &pos = it.particles.at("e").at("position"); + REQUIRE(pos.timeOffset() == 0.f); REQUIRE(pos.unitDimension() == udim); REQUIRE(!pos.scalar()); - auto& pos_x = pos.at("x"); + auto &pos_x = pos.at("x"); REQUIRE(pos_x.unitSI() == 1.); REQUIRE(pos_x.getExtent() == ext); REQUIRE(pos_x.getDatatype() == Datatype::DOUBLE); REQUIRE(!pos_x.constant()); - auto& posOff = it.particles.at("e").at("positionOffset"); - REQUIRE(posOff.timeOffset< float >() == 0.f); + auto &posOff = it.particles.at("e").at("positionOffset"); + REQUIRE(posOff.timeOffset() == 0.f); REQUIRE(posOff.unitDimension() == udim); - auto& posOff_x = posOff.at("x"); + auto &posOff_x = posOff.at("x"); REQUIRE(posOff_x.unitSI() == 1.); REQUIRE(posOff_x.getExtent() == ext); #if !defined(_MSC_VER) - REQUIRE(posOff_x.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE(posOff_x.getDatatype() == determineDatatype()); #endif - REQUIRE(isSame(posOff_x.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE( + isSame(posOff_x.getDatatype(), determineDatatype())); - auto position = pos_x.loadChunk< double >({0}, {4}); + auto position = pos_x.loadChunk({0}, {4}); auto position_raw = position.get(); - auto positionOffset = posOff_x.loadChunk< uint64_t >({0}, {4}); + auto positionOffset = posOff_x.loadChunk({0}, {4}); auto positionOffset_raw = positionOffset.get(); o.flush(); - for( uint64_t j = 0; j < 4; ++j ) + for (uint64_t j = 0; j < 4; ++j) { - REQUIRE(position_raw[j] == static_cast< double >(j + (entry.first-1)*4)); - REQUIRE(positionOffset_raw[j] == j + (entry.first-1)*4); + REQUIRE( + position_raw[j] == + static_cast(j + (entry.first - 1) * 4)); + REQUIRE(positionOffset_raw[j] == j + (entry.first - 1) * 4); } } - REQUIRE(o.iterations[3].time< double >() == 3.0); - REQUIRE(o.iterations[4].time< double >() == 4.0); - REQUIRE(o.iterations[5].time< double >() == 5.0); + REQUIRE(o.iterations[3].time() == 3.0); + REQUIRE(o.iterations[4].time() == 4.0); + REQUIRE(o.iterations[5].time() == 5.0); } - // extend existing series with new step and auto-detection of iteration padding + // extend existing series with new step and auto-detection of iteration + // padding { - Series o = Series("../samples/subdir/serial_fileBased_write%T." + backend, Access::READ_WRITE); + Series o = Series( + "../samples/subdir/serial_fileBased_write%T." + backend, + Access::READ_WRITE); REQUIRE(o.iterations.size() == 5); o.iterations[6]; REQUIRE(o.iterations.size() == 6); // write something to trigger opening of the file - o.iterations[ 6 ].particles[ "e" ][ "position" ][ "x" ].resetDataset( - { Datatype::DOUBLE, { 10 } } ); - o.iterations[ 6 ] - .particles[ "e" ][ "position" ][ "x" ] - .makeConstant< double >( 1.0 ); + o.iterations[6].particles["e"]["position"]["x"].resetDataset( + {Datatype::DOUBLE, {10}}); + o.iterations[6].particles["e"]["position"]["x"].makeConstant( + 1.0); - // additional iteration with over-running iteration padding but similar content + // additional iteration with over-running iteration padding but similar + // content // padding: 000 uint64_t const overlong_it = 123456; - o.iterations[ overlong_it ]; + o.iterations[overlong_it]; // write something to trigger opening of the file - o.iterations[ overlong_it ].particles[ "e" ][ "position" ][ "x" ].resetDataset( - { Datatype::DOUBLE, { 12 } } ); - o.iterations[ overlong_it ] - .particles[ "e" ][ "position" ][ "x" ] - .makeConstant< double >( 1.0 ); + o.iterations[overlong_it].particles["e"]["position"]["x"].resetDataset( + {Datatype::DOUBLE, {12}}); + o.iterations[overlong_it] + .particles["e"]["position"]["x"] + .makeConstant(1.0); - o.iterations[ overlong_it ].setTime(static_cast< double >(overlong_it)); + o.iterations[overlong_it].setTime(static_cast(overlong_it)); REQUIRE(o.iterations.size() == 7); } - REQUIRE((auxiliary::file_exists("../samples/subdir/serial_fileBased_write004." + backend) - || auxiliary::directory_exists("../samples/subdir/serial_fileBased_write004." + backend))); - REQUIRE((auxiliary::file_exists("../samples/subdir/serial_fileBased_write123456." + backend) - || auxiliary::directory_exists("../samples/subdir/serial_fileBased_write123456." + backend))); + REQUIRE( + (auxiliary::file_exists( + "../samples/subdir/serial_fileBased_write004." + backend) || + auxiliary::directory_exists( + "../samples/subdir/serial_fileBased_write004." + backend))); + REQUIRE( + (auxiliary::file_exists( + "../samples/subdir/serial_fileBased_write123456." + backend) || + auxiliary::directory_exists( + "../samples/subdir/serial_fileBased_write123456." + backend))); // additional iteration with shorter iteration padding but similar content { - Series o = Series("../samples/subdir/serial_fileBased_write%01T." + backend, Access::READ_WRITE); + Series o = Series( + "../samples/subdir/serial_fileBased_write%01T." + backend, + Access::READ_WRITE); REQUIRE(o.iterations.size() == 1); /* @@ -1720,165 +1959,190 @@ void fileBased_write_test(const std::string & backend) */ REQUIRE(o.iterations.count(123456) == 1); - auto& it = o.iterations[10]; - ParticleSpecies& e = it.particles["e"]; + auto &it = o.iterations[10]; + ParticleSpecies &e = it.particles["e"]; e["position"]["x"].resetDataset(Dataset(Datatype::DOUBLE, {42})); e["positionOffset"]["x"].resetDataset(Dataset(Datatype::DOUBLE, {42})); e["position"]["x"].makeConstant(1.23); e["positionOffset"]["x"].makeConstant(1.23); fileBased_add_EDpic(e, 42); - it.setTime(static_cast< double >(10)); + it.setTime(static_cast(10)); REQUIRE(o.iterations.size() == 2); } - REQUIRE((auxiliary::file_exists("../samples/subdir/serial_fileBased_write10." + backend) - || auxiliary::directory_exists("../samples/subdir/serial_fileBased_write10." + backend))); + REQUIRE( + (auxiliary::file_exists( + "../samples/subdir/serial_fileBased_write10." + backend) || + auxiliary::directory_exists( + "../samples/subdir/serial_fileBased_write10." + backend))); // read back with auto-detection and non-fixed padding { - Series s = Series("../samples/subdir/serial_fileBased_write%T." + backend, Access::READ_ONLY); + Series s = Series( + "../samples/subdir/serial_fileBased_write%T." + backend, + Access::READ_ONLY); REQUIRE(s.iterations.size() == 8); REQUIRE(s.iterations.contains(4)); REQUIRE(s.iterations.contains(10)); REQUIRE(s.iterations.contains(123456)); - REQUIRE(s.iterations[3].time< double >() == 3.0); - REQUIRE(s.iterations[4].time< double >() == 4.0); - REQUIRE(s.iterations[5].time< double >() == 5.0); - REQUIRE(s.iterations[10].time< double >() == 10.0); - REQUIRE(s.iterations[123456].time< double >() == double(123456)); + REQUIRE(s.iterations[3].time() == 3.0); + REQUIRE(s.iterations[4].time() == 4.0); + REQUIRE(s.iterations[5].time() == 5.0); + REQUIRE(s.iterations[10].time() == 10.0); + REQUIRE(s.iterations[123456].time() == double(123456)); } // write with auto-detection and in-consistent padding from step 10 { - REQUIRE_THROWS_WITH(Series("../samples/subdir/serial_fileBased_write%T." + backend, Access::READ_WRITE), - Catch::Equals("Cannot write to a series with inconsistent iteration padding. Please specify '%0T' or open as read-only.")); + REQUIRE_THROWS_WITH( + Series( + "../samples/subdir/serial_fileBased_write%T." + backend, + Access::READ_WRITE), + Catch::Equals( + "Cannot write to a series with inconsistent iteration padding. " + "Please specify '%0T' or open as read-only.")); } // read back with fixed padding { - Series s = Series("../samples/subdir/serial_fileBased_write%03T." + backend, Access::READ_ONLY); + Series s = Series( + "../samples/subdir/serial_fileBased_write%03T." + backend, + Access::READ_ONLY); REQUIRE(s.iterations.size() == 7); REQUIRE(s.iterations.contains(4)); REQUIRE(!s.iterations.contains(10)); REQUIRE(s.iterations.contains(123456)); - REQUIRE(s.iterations[3].time< double >() == 3.0); - REQUIRE(s.iterations[4].time< double >() == 4.0); - REQUIRE(s.iterations[5].time< double >() == 5.0); + REQUIRE(s.iterations[3].time() == 3.0); + REQUIRE(s.iterations[4].time() == 4.0); + REQUIRE(s.iterations[5].time() == 5.0); } // read back with auto-detection (allow relaxed/overflow padding) { - Series s = Series("../samples/subdir/serial_fileBased_write%T." + backend, Access::READ_ONLY); + Series s = Series( + "../samples/subdir/serial_fileBased_write%T." + backend, + Access::READ_ONLY); REQUIRE(s.iterations.size() == 8); REQUIRE(s.iterations.contains(4)); REQUIRE(s.iterations.contains(10)); REQUIRE(s.iterations.contains(123456)); - REQUIRE(s.iterations[3].time< double >() == 3.0); - REQUIRE(s.iterations[4].time< double >() == 4.0); - REQUIRE(s.iterations[5].time< double >() == 5.0); - REQUIRE(s.iterations[10].time< double >() == 10.0); - REQUIRE(s.iterations[123456].time< double >() == - static_cast< double >(123456)); + REQUIRE(s.iterations[3].time() == 3.0); + REQUIRE(s.iterations[4].time() == 4.0); + REQUIRE(s.iterations[5].time() == 5.0); + REQUIRE(s.iterations[10].time() == 10.0); + REQUIRE( + s.iterations[123456].time() == static_cast(123456)); } { - Series list{ "../samples/subdir/serial_fileBased_write%03T." + backend, Access::READ_ONLY }; - helper::listSeries( list ); + Series list{ + "../samples/subdir/serial_fileBased_write%03T." + backend, + Access::READ_ONLY}; + helper::listSeries(list); } } -TEST_CASE( "fileBased_write_test", "[serial]" ) +TEST_CASE("fileBased_write_test", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - fileBased_write_test( t ); + fileBased_write_test(t); } } -inline -void sample_write_thetaMode(std::string file_ending) +inline void sample_write_thetaMode(std::string file_ending) { - Series o = Series(std::string("../samples/thetaMode_%05T.").append(file_ending), Access::CREATE); + Series o = Series( + std::string("../samples/thetaMode_%05T.").append(file_ending), + Access::CREATE); unsigned int const num_modes = 4u; - unsigned int const num_fields = 1u + (num_modes-1u) * 2u; // the first mode is purely real + unsigned int const num_fields = + 1u + (num_modes - 1u) * 2u; // the first mode is purely real unsigned int const N_r = 20; unsigned int const N_z = 64; - std::shared_ptr< float > E_r_data(new float[num_fields*N_r*N_z], [](float const *p){ delete[] p; }); - std::shared_ptr< double > E_t_data(new double[num_fields*N_r*N_z], [](double const *p){ delete[] p; }); - float e_r{0}; - std::generate(E_r_data.get(), E_r_data.get() + num_fields*N_r*N_z, [&e_r]{ return e_r += 1.0f; }); + std::shared_ptr E_r_data( + new float[num_fields * N_r * N_z], [](float const *p) { delete[] p; }); + std::shared_ptr E_t_data( + new double[num_fields * N_r * N_z], + [](double const *p) { delete[] p; }); + float e_r{0}; + std::generate( + E_r_data.get(), E_r_data.get() + num_fields * N_r * N_z, [&e_r] { + return e_r += 1.0f; + }); double e_t{100}; - std::generate(E_t_data.get(), E_t_data.get() + num_fields*N_r*N_z, [&e_t]{ return e_t += 2.0; }); + std::generate( + E_t_data.get(), E_t_data.get() + num_fields * N_r * N_z, [&e_t] { + return e_t += 2.0; + }); std::stringstream geos; geos << "m=" << num_modes << ";imag=+"; std::string const geometryParameters = geos.str(); - for(int i = 0; i <= 400; i+=100 ) + for (int i = 0; i <= 400; i += 100) { auto it = o.iterations[i]; Mesh E = it.meshes["E"]; - E.setGeometry( Mesh::Geometry::thetaMode ); - E.setGeometryParameters( geometryParameters ); - E.setDataOrder( Mesh::DataOrder::C ); - E.setGridSpacing( std::vector{1.0, 1.0} ); - E.setGridGlobalOffset( std::vector{0.0, 0.0} ); - E.setGridUnitSI( 1.0 ); - E.setAxisLabels( std::vector< std::string >{"r", "z"} ); - std::map< UnitDimension, double > const unitDimensions{ - {UnitDimension::I, 1.0}, - {UnitDimension::J, 2.0} - }; - E.setUnitDimension( unitDimensions ); - E.setTimeOffset( 1.e-12 * double(i) ); + E.setGeometry(Mesh::Geometry::thetaMode); + E.setGeometryParameters(geometryParameters); + E.setDataOrder(Mesh::DataOrder::C); + E.setGridSpacing(std::vector{1.0, 1.0}); + E.setGridGlobalOffset(std::vector{0.0, 0.0}); + E.setGridUnitSI(1.0); + E.setAxisLabels(std::vector{"r", "z"}); + std::map const unitDimensions{ + {UnitDimension::I, 1.0}, {UnitDimension::J, 2.0}}; + E.setUnitDimension(unitDimensions); + E.setTimeOffset(1.e-12 * double(i)); auto E_z = E["z"]; - E_z.setUnitSI( 10. ); - E_z.setPosition(std::vector< double >{0.0, 0.5}); - E_z.resetDataset( Dataset(Datatype::FLOAT, {num_fields, N_r, N_z}) ); // (modes, r, z) see setGeometryParameters - E_z.makeConstant( static_cast< float >(42.54) ); - - // write all modes at once (otherwise iterate over modes and first index) + E_z.setUnitSI(10.); + E_z.setPosition(std::vector{0.0, 0.5}); + E_z.resetDataset(Dataset( + Datatype::FLOAT, + {num_fields, N_r, N_z})); // (modes, r, z) see setGeometryParameters + E_z.makeConstant(static_cast(42.54)); + + // write all modes at once (otherwise iterate over modes and first + // index) auto E_r = E["r"]; - E_r.setUnitSI( 10. ); - E_r.setPosition(std::vector< double >{0.5, 0.0}); - E_r.resetDataset( - Dataset(Datatype::FLOAT, {num_fields, N_r, N_z}) - ); + E_r.setUnitSI(10.); + E_r.setPosition(std::vector{0.5, 0.0}); + E_r.resetDataset(Dataset(Datatype::FLOAT, {num_fields, N_r, N_z})); E_r.storeChunk(E_r_data, Offset{0, 0, 0}, Extent{num_fields, N_r, N_z}); auto E_t = E["t"]; - E_t.setUnitSI( 10. ); - E_t.setPosition(std::vector< double >{0.0, 0.0}); - E_t.resetDataset( - Dataset(Datatype::DOUBLE, {num_fields, N_r, N_z}) - ); + E_t.setUnitSI(10.); + E_t.setPosition(std::vector{0.0, 0.0}); + E_t.resetDataset(Dataset(Datatype::DOUBLE, {num_fields, N_r, N_z})); E_t.storeChunk(E_t_data, Offset{0, 0, 0}, Extent{num_fields, N_r, N_z}); o.flush(); } } -TEST_CASE( "sample_write_thetaMode", "[serial][thetaMode]" ) +TEST_CASE("sample_write_thetaMode", "[serial][thetaMode]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - sample_write_thetaMode( t ); + sample_write_thetaMode(t); - Series list{ std::string("../samples/thetaMode_%05T.").append(t), Access::READ_ONLY }; - helper::listSeries( list ); + Series list{ + std::string("../samples/thetaMode_%05T.").append(t), + Access::READ_ONLY}; + helper::listSeries(list); } } -inline -void bool_test(const std::string & backend) +inline void bool_test(const std::string &backend) { { Series o = Series("../samples/serial_bool." + backend, Access::CREATE); @@ -1888,30 +2152,34 @@ void bool_test(const std::string & backend) o.setAttribute("Bool attribute (false)", false); } { - Series o = Series("../samples/serial_bool." + backend, Access::READ_ONLY); + Series o = + Series("../samples/serial_bool." + backend, Access::READ_ONLY); auto attrs = o.attributes(); - REQUIRE(std::count(attrs.begin(), attrs.end(), "Bool attribute (true)") == 1); - REQUIRE(std::count(attrs.begin(), attrs.end(), "Bool attribute (false)") == 1); - REQUIRE(o.getAttribute("Bool attribute (true)").get< bool >() == true); - REQUIRE(o.getAttribute("Bool attribute (false)").get< bool >() == false); + REQUIRE( + std::count(attrs.begin(), attrs.end(), "Bool attribute (true)") == + 1); + REQUIRE( + std::count(attrs.begin(), attrs.end(), "Bool attribute (false)") == + 1); + REQUIRE(o.getAttribute("Bool attribute (true)").get() == true); + REQUIRE(o.getAttribute("Bool attribute (false)").get() == false); } { - Series list{ "../samples/serial_bool." + backend, Access::READ_ONLY }; - helper::listSeries( list ); + Series list{"../samples/serial_bool." + backend, Access::READ_ONLY}; + helper::listSeries(list); } } -TEST_CASE( "bool_test", "[serial]" ) +TEST_CASE("bool_test", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - bool_test( t ); + bool_test(t); } } -inline -void patch_test(const std::string & backend) +inline void patch_test(const std::string &backend) { Series o = Series("../samples/serial_patch." + backend, Access::CREATE); @@ -1926,44 +2194,55 @@ void patch_test(const std::string & backend) uint64_t const patch_idx = 0u; uint64_t const num_patches = 1u; - auto const dset_n = Dataset(determineDatatype(), {num_patches, }); - e.particlePatches["numParticles"][RecordComponent::SCALAR].resetDataset(dset_n); - e.particlePatches["numParticles"][RecordComponent::SCALAR].store(patch_idx, num_particles); - e.particlePatches["numParticlesOffset"][RecordComponent::SCALAR].resetDataset(dset_n); - e.particlePatches["numParticlesOffset"][RecordComponent::SCALAR].store(patch_idx, uint64_t(0u)); - - auto const dset_f = Dataset(determineDatatype(), {num_patches, }); + auto const dset_n = Dataset( + determineDatatype(), + { + num_patches, + }); + e.particlePatches["numParticles"][RecordComponent::SCALAR].resetDataset( + dset_n); + e.particlePatches["numParticles"][RecordComponent::SCALAR].store( + patch_idx, num_particles); + e.particlePatches["numParticlesOffset"][RecordComponent::SCALAR] + .resetDataset(dset_n); + e.particlePatches["numParticlesOffset"][RecordComponent::SCALAR].store( + patch_idx, uint64_t(0u)); + + auto const dset_f = Dataset( + determineDatatype(), + { + num_patches, + }); e.particlePatches["offset"]["x"].resetDataset(dset_f); e.particlePatches["offset"]["x"].store(patch_idx, 0.f); e.particlePatches["extent"]["x"].resetDataset(dset_f); e.particlePatches["extent"]["x"].store(patch_idx, 50.f); } -TEST_CASE( "patch_test", "[serial]" ) +TEST_CASE("patch_test", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { - patch_test( t ); + patch_test(t); Series list{"../samples/serial_patch." + t, Access::READ_ONLY}; helper::listSeries(list); } } -inline -void deletion_test(const std::string & backend) +inline void deletion_test(const std::string &backend) { Series o = Series("../samples/serial_deletion." + backend, Access::CREATE); - - o.setAttribute("removed", - "this attribute will be removed after being written to disk"); + o.setAttribute( + "removed", + "this attribute will be removed after being written to disk"); o.flush(); o.deleteAttribute("removed"); o.flush(); - ParticleSpecies& e = o.iterations[1].particles["e"]; + ParticleSpecies &e = o.iterations[1].particles["e"]; auto dset = Dataset(Datatype::DOUBLE, {1}); e["position"][RecordComponent::SCALAR].resetDataset(dset); e["position"][RecordComponent::SCALAR].makeConstant(20.0); @@ -1982,7 +2261,8 @@ void deletion_test(const std::string & backend) e["deletion_scalar_two"][RecordComponent::SCALAR].resetDataset(dset); o.flush(); - e["deletion_scalar_two"].erase(e["deletion_scalar_two"].find(RecordComponent::SCALAR)); + e["deletion_scalar_two"].erase( + e["deletion_scalar_two"].find(RecordComponent::SCALAR)); e.erase(e.find("deletion_scalar_two")); o.flush(); @@ -1996,66 +2276,74 @@ void deletion_test(const std::string & backend) o.flush(); } -TEST_CASE( "deletion_test", "[serial]" ) +TEST_CASE("deletion_test", "[serial]") { - for (auto const & t: testedFileExtensions()) + for (auto const &t : testedFileExtensions()) { if (t == "bp") { continue; // deletion not implemented in ADIOS1 backend } - deletion_test( t ); + deletion_test(t); } } -inline -void read_missing_throw_test(const std::string & backend) +inline void read_missing_throw_test(const std::string &backend) { try { - auto s = Series("this/does/definitely/not/exist." + backend, Access::READ_ONLY); + auto s = Series( + "this/does/definitely/not/exist." + backend, Access::READ_ONLY); } - catch( ... ) + catch (...) { - std::cout << "read missing: successfully caught! " << backend << std::endl; + std::cout << "read missing: successfully caught! " << backend + << std::endl; } } -TEST_CASE( "read_missing_throw_test", "[serial]" ) +TEST_CASE("read_missing_throw_test", "[serial]") { - for (auto const & t: testedFileExtensions()) - read_missing_throw_test( t ); + for (auto const &t : testedFileExtensions()) + read_missing_throw_test(t); } -inline -void optional_paths_110_test(const std::string & backend) +inline void optional_paths_110_test(const std::string &backend) { try { { - Series s = Series("../samples/issue-sample/no_fields/data%T." + backend, Access::READ_ONLY); + Series s = Series( + "../samples/issue-sample/no_fields/data%T." + backend, + Access::READ_ONLY); auto attrs = s.attributes(); REQUIRE(std::count(attrs.begin(), attrs.end(), "meshesPath") == 1); - REQUIRE(std::count(attrs.begin(), attrs.end(), "particlesPath") == 1); + REQUIRE( + std::count(attrs.begin(), attrs.end(), "particlesPath") == 1); REQUIRE(s.iterations[400].meshes.empty()); REQUIRE(s.iterations[400].particles.size() == 1); } { - Series s = Series("../samples/issue-sample/no_particles/data%T." + backend, Access::READ_ONLY); + Series s = Series( + "../samples/issue-sample/no_particles/data%T." + backend, + Access::READ_ONLY); auto attrs = s.attributes(); REQUIRE(std::count(attrs.begin(), attrs.end(), "meshesPath") == 1); - REQUIRE(std::count(attrs.begin(), attrs.end(), "particlesPath") == 1); + REQUIRE( + std::count(attrs.begin(), attrs.end(), "particlesPath") == 1); REQUIRE(s.iterations[400].meshes.size() == 2); REQUIRE(s.iterations[400].particles.empty()); } - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "issue sample not accessible. (" << e.what() << ")\n"; } { - Series s = Series("../samples/no_meshes_1.1.0_compliant." + backend, Access::CREATE); + Series s = Series( + "../samples/no_meshes_1.1.0_compliant." + backend, Access::CREATE); auto foo = s.iterations[1].particles["foo"]; Dataset dset = Dataset(Datatype::DOUBLE, {1}); foo["position"][RecordComponent::SCALAR].resetDataset(dset); @@ -2065,14 +2353,18 @@ void optional_paths_110_test(const std::string & backend) } { - Series s = Series("../samples/no_particles_1.1.0_compliant." + backend, Access::CREATE); + Series s = Series( + "../samples/no_particles_1.1.0_compliant." + backend, + Access::CREATE); auto foo = s.iterations[1].meshes["foo"]; Dataset dset = Dataset(Datatype::DOUBLE, {1}); foo[RecordComponent::SCALAR].resetDataset(dset); } { - Series s = Series("../samples/no_meshes_1.1.0_compliant." + backend, Access::READ_ONLY); + Series s = Series( + "../samples/no_meshes_1.1.0_compliant." + backend, + Access::READ_ONLY); auto attrs = s.attributes(); REQUIRE(std::count(attrs.begin(), attrs.end(), "meshesPath") == 0); REQUIRE(std::count(attrs.begin(), attrs.end(), "particlesPath") == 1); @@ -2081,7 +2373,9 @@ void optional_paths_110_test(const std::string & backend) } { - Series s = Series("../samples/no_particles_1.1.0_compliant." + backend, Access::READ_ONLY); + Series s = Series( + "../samples/no_particles_1.1.0_compliant." + backend, + Access::READ_ONLY); auto attrs = s.attributes(); REQUIRE(std::count(attrs.begin(), attrs.end(), "meshesPath") == 1); REQUIRE(std::count(attrs.begin(), attrs.end(), "particlesPath") == 0); @@ -2094,31 +2388,26 @@ void git_early_chunk_query( std::string const filename, std::string const species, int const step, - std::string const & jsonConfig = "{}" -) + std::string const &jsonConfig = "{}") { try { - Series s = Series( - filename, - Access::READ_ONLY, - jsonConfig - ); + Series s = Series(filename, Access::READ_ONLY, jsonConfig); auto electrons = s.iterations[step].particles[species]; - for( auto & r : electrons ) + for (auto &r : electrons) { std::cout << r.first << ": "; - for( auto & r_c : r.second ) + for (auto &r_c : r.second) { std::cout << r_c.first << "\n"; auto chunks = r_c.second.availableChunks(); std::cout << "no. of chunks: " << chunks.size() << std::endl; } } - - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; @@ -2126,35 +2415,45 @@ void git_early_chunk_query( } #if openPMD_HAVE_HDF5 -TEST_CASE( "empty_alternate_fbpic", "[serial][hdf5]" ) +TEST_CASE("empty_alternate_fbpic", "[serial][hdf5]") { // Ref.: https://github.com/openPMD/openPMD-viewer/issues/296 try { { - Series s = Series("../samples/issue-sample/empty_alternate_fbpic_%T.h5", Access::READ_ONLY); + Series s = Series( + "../samples/issue-sample/empty_alternate_fbpic_%T.h5", + Access::READ_ONLY); REQUIRE(s.iterations.contains(50)); REQUIRE(s.iterations[50].particles.contains("electrons")); - REQUIRE(s.iterations[50].particles["electrons"].contains("momentum")); - REQUIRE(s.iterations[50].particles["electrons"]["momentum"].contains("x")); - auto empty_rc = s.iterations[50].particles["electrons"]["momentum"]["x"]; + REQUIRE( + s.iterations[50].particles["electrons"].contains("momentum")); + REQUIRE( + s.iterations[50].particles["electrons"]["momentum"].contains( + "x")); + auto empty_rc = + s.iterations[50].particles["electrons"]["momentum"]["x"]; REQUIRE(empty_rc.empty()); REQUIRE(empty_rc.getDimensionality() == 1); REQUIRE(empty_rc.getExtent() == Extent{0}); - REQUIRE(isSame(empty_rc.getDatatype(), determineDatatype< double >())); + REQUIRE( + isSame(empty_rc.getDatatype(), determineDatatype())); } { - Series list{ "../samples/issue-sample/empty_alternate_fbpic_%T.h5", Access::READ_ONLY }; - helper::listSeries( list ); + Series list{ + "../samples/issue-sample/empty_alternate_fbpic_%T.h5", + Access::READ_ONLY}; + helper::listSeries(list); } - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "issue sample not accessible. (" << e.what() << ")\n"; } } -TEST_CASE( "available_chunks_test_hdf5", "[serial][json]" ) +TEST_CASE("available_chunks_test_hdf5", "[serial][json]") { /* * This test is HDF5 specific @@ -2167,45 +2466,44 @@ TEST_CASE( "available_chunks_test_hdf5", "[serial][json]" ) constexpr unsigned height = 10; std::string name = "../samples/available_chunks.h5"; - std::vector< int > data{ 2, 4, 6, 8 }; + std::vector data{2, 4, 6, 8}; { - Series write( name, Access::CREATE ); - Iteration it0 = write.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { Datatype::INT, { height, 4 } } ); - for( unsigned line = 2; line < 7; ++line ) + Series write(name, Access::CREATE); + Iteration it0 = write.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; + E_x.resetDataset({Datatype::INT, {height, 4}}); + for (unsigned line = 2; line < 7; ++line) { - E_x.storeChunk( data, { line, 0 }, { 1, 4 } ); + E_x.storeChunk(data, {line, 0}, {1, 4}); } - for( unsigned line = 7; line < 9; ++line ) + for (unsigned line = 7; line < 9; ++line) { - E_x.storeChunk( data, { line, 0 }, { 1, 2 } ); + E_x.storeChunk(data, {line, 0}, {1, 2}); } - E_x.storeChunk( data, { 8, 3 }, {2, 1 } ); + E_x.storeChunk(data, {8, 3}, {2, 1}); it0.close(); } { - Series read( name, Access::READ_ONLY ); - Iteration it0 = read.iterations[ 0 ]; - auto E_x = it0.meshes[ "E" ][ "x" ]; + Series read(name, Access::READ_ONLY); + Iteration it0 = read.iterations[0]; + auto E_x = it0.meshes["E"]["x"]; ChunkTable table = E_x.availableChunks(); - REQUIRE( table.size() == 1 ); + REQUIRE(table.size() == 1); /* * Explicitly convert things to bool, so Catch doesn't get the splendid * idea to print the Chunk struct. */ - REQUIRE( - bool( table[ 0 ] == WrittenChunkInfo( { 0, 0 }, { height, 4 } ) ) ); + REQUIRE(bool(table[0] == WrittenChunkInfo({0, 0}, {height, 4}))); } } -TEST_CASE( "optional_paths_110_test", "[serial]" ) +TEST_CASE("optional_paths_110_test", "[serial]") { optional_paths_110_test("h5"); // samples only present for hdf5 } -TEST_CASE( "git_hdf5_sample_structure_test", "[serial][hdf5]" ) +TEST_CASE("git_hdf5_sample_structure_test", "[serial][hdf5]") { #if openPMD_USE_INVASIVE_TESTS try @@ -2216,40 +2514,129 @@ TEST_CASE( "git_hdf5_sample_structure_test", "[serial][hdf5]" ) REQUIRE(o.iterations.parent() == getWritable(&o)); REQUIRE_THROWS_AS(o.iterations[42], std::out_of_range); REQUIRE(o.iterations[100].parent() == getWritable(&o.iterations)); - REQUIRE(o.iterations[100].meshes.parent() == getWritable(&o.iterations[100])); - REQUIRE(o.iterations[100].meshes["E"].parent() == getWritable(&o.iterations[100].meshes)); - REQUIRE(o.iterations[100].meshes["E"]["x"].parent() == getWritable(&o.iterations[100].meshes["E"])); - REQUIRE(o.iterations[100].meshes["E"]["y"].parent() == getWritable(&o.iterations[100].meshes["E"])); - REQUIRE(o.iterations[100].meshes["E"]["z"].parent() == getWritable(&o.iterations[100].meshes["E"])); - REQUIRE(o.iterations[100].meshes["rho"].parent() == getWritable(&o.iterations[100].meshes)); - REQUIRE(o.iterations[100].meshes["rho"][MeshRecordComponent::SCALAR].parent() == getWritable(&o.iterations[100].meshes)); - REQUIRE_THROWS_AS(o.iterations[100].meshes["cherries"], std::out_of_range); - REQUIRE(o.iterations[100].particles.parent() == getWritable(&o.iterations[100])); - REQUIRE(o.iterations[100].particles["electrons"].parent() == getWritable(&o.iterations[100].particles)); - REQUIRE(o.iterations[100].particles["electrons"]["charge"].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["charge"][RecordComponent::SCALAR].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["mass"].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["mass"][RecordComponent::SCALAR].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["momentum"].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["momentum"]["x"].parent() == getWritable(&o.iterations[100].particles["electrons"]["momentum"])); - REQUIRE(o.iterations[100].particles["electrons"]["momentum"]["y"].parent() == getWritable(&o.iterations[100].particles["electrons"]["momentum"])); - REQUIRE(o.iterations[100].particles["electrons"]["momentum"]["z"].parent() == getWritable(&o.iterations[100].particles["electrons"]["momentum"])); - REQUIRE(o.iterations[100].particles["electrons"]["position"].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["position"]["x"].parent() == getWritable(&o.iterations[100].particles["electrons"]["position"])); - REQUIRE(o.iterations[100].particles["electrons"]["position"]["y"].parent() == getWritable(&o.iterations[100].particles["electrons"]["position"])); - REQUIRE(o.iterations[100].particles["electrons"]["position"]["z"].parent() == getWritable(&o.iterations[100].particles["electrons"]["position"])); - REQUIRE(o.iterations[100].particles["electrons"]["positionOffset"].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["positionOffset"]["x"].parent() == getWritable(&o.iterations[100].particles["electrons"]["positionOffset"])); - REQUIRE(o.iterations[100].particles["electrons"]["positionOffset"]["y"].parent() == getWritable(&o.iterations[100].particles["electrons"]["positionOffset"])); - REQUIRE(o.iterations[100].particles["electrons"]["positionOffset"]["z"].parent() == getWritable(&o.iterations[100].particles["electrons"]["positionOffset"])); - REQUIRE(o.iterations[100].particles["electrons"]["weighting"].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE(o.iterations[100].particles["electrons"]["weighting"][RecordComponent::SCALAR].parent() == getWritable(&o.iterations[100].particles["electrons"])); - REQUIRE_THROWS_AS(o.iterations[100].particles["electrons"]["numberOfLegs"], std::out_of_range); - REQUIRE_THROWS_AS(o.iterations[100].particles["apples"], std::out_of_range); + REQUIRE( + o.iterations[100].meshes.parent() == + getWritable(&o.iterations[100])); + REQUIRE( + o.iterations[100].meshes["E"].parent() == + getWritable(&o.iterations[100].meshes)); + REQUIRE( + o.iterations[100].meshes["E"]["x"].parent() == + getWritable(&o.iterations[100].meshes["E"])); + REQUIRE( + o.iterations[100].meshes["E"]["y"].parent() == + getWritable(&o.iterations[100].meshes["E"])); + REQUIRE( + o.iterations[100].meshes["E"]["z"].parent() == + getWritable(&o.iterations[100].meshes["E"])); + REQUIRE( + o.iterations[100].meshes["rho"].parent() == + getWritable(&o.iterations[100].meshes)); + REQUIRE( + o.iterations[100] + .meshes["rho"][MeshRecordComponent::SCALAR] + .parent() == getWritable(&o.iterations[100].meshes)); + REQUIRE_THROWS_AS( + o.iterations[100].meshes["cherries"], std::out_of_range); + REQUIRE( + o.iterations[100].particles.parent() == + getWritable(&o.iterations[100])); + REQUIRE( + o.iterations[100].particles["electrons"].parent() == + getWritable(&o.iterations[100].particles)); + REQUIRE( + o.iterations[100].particles["electrons"]["charge"].parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["charge"][RecordComponent::SCALAR] + .parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100].particles["electrons"]["mass"].parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["mass"][RecordComponent::SCALAR] + .parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100].particles["electrons"]["momentum"].parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["momentum"]["x"] + .parent() == + getWritable(&o.iterations[100].particles["electrons"]["momentum"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["momentum"]["y"] + .parent() == + getWritable(&o.iterations[100].particles["electrons"]["momentum"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["momentum"]["z"] + .parent() == + getWritable(&o.iterations[100].particles["electrons"]["momentum"])); + REQUIRE( + o.iterations[100].particles["electrons"]["position"].parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["position"]["x"] + .parent() == + getWritable(&o.iterations[100].particles["electrons"]["position"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["position"]["y"] + .parent() == + getWritable(&o.iterations[100].particles["electrons"]["position"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["position"]["z"] + .parent() == + getWritable(&o.iterations[100].particles["electrons"]["position"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["positionOffset"] + .parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["positionOffset"]["x"] + .parent() == + getWritable( + &o.iterations[100].particles["electrons"]["positionOffset"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["positionOffset"]["y"] + .parent() == + getWritable( + &o.iterations[100].particles["electrons"]["positionOffset"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["positionOffset"]["z"] + .parent() == + getWritable( + &o.iterations[100].particles["electrons"]["positionOffset"])); + REQUIRE( + o.iterations[100].particles["electrons"]["weighting"].parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE( + o.iterations[100] + .particles["electrons"]["weighting"][RecordComponent::SCALAR] + .parent() == + getWritable(&o.iterations[100].particles["electrons"])); + REQUIRE_THROWS_AS( + o.iterations[100].particles["electrons"]["numberOfLegs"], + std::out_of_range); + REQUIRE_THROWS_AS( + o.iterations[100].particles["apples"], std::out_of_range); int32_t i32 = 32; REQUIRE_THROWS(o.setAttribute("setAttributeFail", i32)); - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; @@ -2259,7 +2646,7 @@ TEST_CASE( "git_hdf5_sample_structure_test", "[serial][hdf5]" ) #endif } -TEST_CASE( "git_hdf5_sample_attribute_test", "[serial][hdf5]" ) +TEST_CASE("git_hdf5_sample_attribute_test", "[serial][hdf5]") { try { @@ -2277,88 +2664,92 @@ TEST_CASE( "git_hdf5_sample_attribute_test", "[serial][hdf5]" ) REQUIRE(o.iterations.size() == 5); REQUIRE(o.iterations.count(100) == 1); - Iteration& iteration_100 = o.iterations[100]; - REQUIRE(iteration_100.time< double >() == 3.2847121452090077e-14); - REQUIRE(iteration_100.dt< double >() == 3.2847121452090093e-16); + Iteration &iteration_100 = o.iterations[100]; + REQUIRE(iteration_100.time() == 3.2847121452090077e-14); + REQUIRE(iteration_100.dt() == 3.2847121452090093e-16); REQUIRE(iteration_100.timeUnitSI() == 1.0); REQUIRE(iteration_100.meshes.size() == 2); REQUIRE(iteration_100.meshes.count("E") == 1); REQUIRE(iteration_100.meshes.count("rho") == 1); - std::vector< std::string > al{"x", "y", "z"}; - std::vector< double > gs{8.0000000000000007e-07, - 8.0000000000000007e-07, - 1.0000000000000001e-07}; - std::vector< double > ggo{-1.0000000000000001e-05, - -1.0000000000000001e-05, - -5.1999999999999993e-06}; - std::array< double, 7 > ud{{1., 1., -3., -1., 0., 0., 0.}}; - Mesh& E = iteration_100.meshes["E"]; + std::vector al{"x", "y", "z"}; + std::vector gs{ + 8.0000000000000007e-07, + 8.0000000000000007e-07, + 1.0000000000000001e-07}; + std::vector ggo{ + -1.0000000000000001e-05, + -1.0000000000000001e-05, + -5.1999999999999993e-06}; + std::array ud{{1., 1., -3., -1., 0., 0., 0.}}; + Mesh &E = iteration_100.meshes["E"]; REQUIRE(E.geometry() == Mesh::Geometry::cartesian); REQUIRE(E.dataOrder() == Mesh::DataOrder::C); REQUIRE(E.axisLabels() == al); - REQUIRE(E.gridSpacing< double >() == gs); + REQUIRE(E.gridSpacing() == gs); REQUIRE(E.gridGlobalOffset() == ggo); REQUIRE(E.gridUnitSI() == 1.0); REQUIRE(E.unitDimension() == ud); - REQUIRE(E.timeOffset< double >() == 0.0); + REQUIRE(E.timeOffset() == 0.0); REQUIRE(E.size() == 3); REQUIRE(E.count("x") == 1); REQUIRE(E.count("y") == 1); REQUIRE(E.count("z") == 1); - std::vector< double > p{0.5, 0., 0.}; + std::vector p{0.5, 0., 0.}; Extent e{26, 26, 201}; - MeshRecordComponent& E_x = E["x"]; + MeshRecordComponent &E_x = E["x"]; REQUIRE(E_x.unitSI() == 1.0); - REQUIRE(E_x.position< double >() == p); + REQUIRE(E_x.position() == p); REQUIRE(E_x.getDatatype() == Datatype::DOUBLE); REQUIRE(E_x.getExtent() == e); REQUIRE(E_x.getDimensionality() == 3); p = {0., 0.5, 0.}; - MeshRecordComponent& E_y = E["y"]; + MeshRecordComponent &E_y = E["y"]; REQUIRE(E_y.unitSI() == 1.0); - REQUIRE(E_y.position< double >() == p); + REQUIRE(E_y.position() == p); REQUIRE(E_y.getDatatype() == Datatype::DOUBLE); REQUIRE(E_y.getExtent() == e); REQUIRE(E_y.getDimensionality() == 3); p = {0., 0., 0.5}; - MeshRecordComponent& E_z = E["z"]; + MeshRecordComponent &E_z = E["z"]; REQUIRE(E_z.unitSI() == 1.0); - REQUIRE(E_z.position< double >() == p); + REQUIRE(E_z.position() == p); REQUIRE(E_z.getDatatype() == Datatype::DOUBLE); REQUIRE(E_z.getExtent() == e); REQUIRE(E_z.getDimensionality() == 3); - gs = {8.0000000000000007e-07, - 8.0000000000000007e-07, - 1.0000000000000001e-07}; - ggo = {-1.0000000000000001e-05, - -1.0000000000000001e-05, - -5.1999999999999993e-06}; - ud = {{-3., 0., 1., 1., 0., 0., 0.}}; - Mesh& rho = iteration_100.meshes["rho"]; + gs = { + 8.0000000000000007e-07, + 8.0000000000000007e-07, + 1.0000000000000001e-07}; + ggo = { + -1.0000000000000001e-05, + -1.0000000000000001e-05, + -5.1999999999999993e-06}; + ud = {{-3., 0., 1., 1., 0., 0., 0.}}; + Mesh &rho = iteration_100.meshes["rho"]; REQUIRE(rho.geometry() == Mesh::Geometry::cartesian); REQUIRE(rho.dataOrder() == Mesh::DataOrder::C); REQUIRE(rho.axisLabels() == al); - REQUIRE(rho.gridSpacing< double >() == gs); + REQUIRE(rho.gridSpacing() == gs); REQUIRE(rho.gridGlobalOffset() == ggo); REQUIRE(rho.gridUnitSI() == 1.0); REQUIRE(rho.unitDimension() == ud); - REQUIRE(rho.timeOffset< double >() == 0.0); + REQUIRE(rho.timeOffset() == 0.0); REQUIRE(rho.size() == 1); REQUIRE(rho.count(MeshRecordComponent::SCALAR) == 1); p = {0., 0., 0.}; e = {26, 26, 201}; - MeshRecordComponent& rho_scalar = rho[MeshRecordComponent::SCALAR]; + MeshRecordComponent &rho_scalar = rho[MeshRecordComponent::SCALAR]; REQUIRE(rho_scalar.unitSI() == 1.0); - REQUIRE(rho_scalar.position< double >() == p); + REQUIRE(rho_scalar.position() == p); REQUIRE(rho_scalar.getDatatype() == Datatype::DOUBLE); REQUIRE(rho_scalar.getExtent() == e); REQUIRE(rho_scalar.getDimensionality() == 3); @@ -2366,7 +2757,7 @@ TEST_CASE( "git_hdf5_sample_attribute_test", "[serial][hdf5]" ) REQUIRE(iteration_100.particles.size() == 1); REQUIRE(iteration_100.particles.count("electrons") == 1); - ParticleSpecies& electrons = iteration_100.particles["electrons"]; + ParticleSpecies &electrons = iteration_100.particles["electrons"]; REQUIRE(electrons.size() == 6); REQUIRE(electrons.count("charge") == 1); @@ -2376,190 +2767,214 @@ TEST_CASE( "git_hdf5_sample_attribute_test", "[serial][hdf5]" ) REQUIRE(electrons.count("positionOffset") == 1); REQUIRE(electrons.count("weighting") == 1); - ud = {{0., 0., 1., 1., 0., 0., 0.}}; - Record& charge = electrons["charge"]; + ud = {{0., 0., 1., 1., 0., 0., 0.}}; + Record &charge = electrons["charge"]; REQUIRE(charge.unitDimension() == ud); - REQUIRE(charge.timeOffset< double >() == 0.0); + REQUIRE(charge.timeOffset() == 0.0); REQUIRE(charge.size() == 1); REQUIRE(charge.count(RecordComponent::SCALAR) == 1); e = {85625}; - RecordComponent& charge_scalar = charge[RecordComponent::SCALAR]; + RecordComponent &charge_scalar = charge[RecordComponent::SCALAR]; REQUIRE(charge_scalar.unitSI() == 1.0); REQUIRE(charge_scalar.getDatatype() == Datatype::DOUBLE); REQUIRE(charge_scalar.getDimensionality() == 1); REQUIRE(charge_scalar.getExtent() == e); - ud = {{1., 0., 0., 0., 0., 0., 0.}}; - Record& mass = electrons["mass"]; + ud = {{1., 0., 0., 0., 0., 0., 0.}}; + Record &mass = electrons["mass"]; REQUIRE(mass.unitDimension() == ud); - REQUIRE(mass.timeOffset< double >() == 0.0); + REQUIRE(mass.timeOffset() == 0.0); REQUIRE(mass.size() == 1); REQUIRE(mass.count(RecordComponent::SCALAR) == 1); - RecordComponent& mass_scalar = mass[RecordComponent::SCALAR]; + RecordComponent &mass_scalar = mass[RecordComponent::SCALAR]; REQUIRE(mass_scalar.unitSI() == 1.0); REQUIRE(mass_scalar.getDatatype() == Datatype::DOUBLE); REQUIRE(mass_scalar.getDimensionality() == 1); REQUIRE(mass_scalar.getExtent() == e); - ud = {{1., 1., -1., 0., 0., 0., 0.}}; - Record& momentum = electrons["momentum"]; + ud = {{1., 1., -1., 0., 0., 0., 0.}}; + Record &momentum = electrons["momentum"]; REQUIRE(momentum.unitDimension() == ud); - REQUIRE(momentum.timeOffset< double >() == 0.0); + REQUIRE(momentum.timeOffset() == 0.0); REQUIRE(momentum.size() == 3); REQUIRE(momentum.count("x") == 1); REQUIRE(momentum.count("y") == 1); REQUIRE(momentum.count("z") == 1); - RecordComponent& momentum_x = momentum["x"]; + RecordComponent &momentum_x = momentum["x"]; REQUIRE(momentum_x.unitSI() == 1.0); REQUIRE(momentum_x.getDatatype() == Datatype::DOUBLE); REQUIRE(momentum_x.getDimensionality() == 1); REQUIRE(momentum_x.getExtent() == e); - RecordComponent& momentum_y = momentum["y"]; + RecordComponent &momentum_y = momentum["y"]; REQUIRE(momentum_y.unitSI() == 1.0); REQUIRE(momentum_y.getDatatype() == Datatype::DOUBLE); REQUIRE(momentum_y.getDimensionality() == 1); REQUIRE(momentum_y.getExtent() == e); - RecordComponent& momentum_z = momentum["z"]; + RecordComponent &momentum_z = momentum["z"]; REQUIRE(momentum_z.unitSI() == 1.0); REQUIRE(momentum_z.getDatatype() == Datatype::DOUBLE); REQUIRE(momentum_z.getDimensionality() == 1); REQUIRE(momentum_z.getExtent() == e); - ud = {{1., 0., 0., 0., 0., 0., 0.}}; - Record& position = electrons["position"]; + ud = {{1., 0., 0., 0., 0., 0., 0.}}; + Record &position = electrons["position"]; REQUIRE(position.unitDimension() == ud); - REQUIRE(position.timeOffset< double >() == 0.0); + REQUIRE(position.timeOffset() == 0.0); REQUIRE(position.size() == 3); REQUIRE(position.count("x") == 1); REQUIRE(position.count("y") == 1); REQUIRE(position.count("z") == 1); - RecordComponent& position_x = position["x"]; + RecordComponent &position_x = position["x"]; REQUIRE(position_x.unitSI() == 1.0); REQUIRE(position_x.getDatatype() == Datatype::DOUBLE); REQUIRE(position_x.getDimensionality() == 1); REQUIRE(position_x.getExtent() == e); - RecordComponent& position_y = position["y"]; + RecordComponent &position_y = position["y"]; REQUIRE(position_y.unitSI() == 1.0); REQUIRE(position_y.getDatatype() == Datatype::DOUBLE); REQUIRE(position_y.getDimensionality() == 1); REQUIRE(position_y.getExtent() == e); - RecordComponent& position_z = position["z"]; + RecordComponent &position_z = position["z"]; REQUIRE(position_z.unitSI() == 1.0); REQUIRE(position_z.getDatatype() == Datatype::DOUBLE); REQUIRE(position_z.getDimensionality() == 1); REQUIRE(position_z.getExtent() == e); - Record& positionOffset = electrons["positionOffset"]; + Record &positionOffset = electrons["positionOffset"]; REQUIRE(positionOffset.unitDimension() == ud); - REQUIRE(positionOffset.timeOffset< double >() == 0.0); + REQUIRE(positionOffset.timeOffset() == 0.0); REQUIRE(positionOffset.size() == 3); REQUIRE(positionOffset.count("x") == 1); REQUIRE(positionOffset.count("y") == 1); REQUIRE(positionOffset.count("z") == 1); - RecordComponent& positionOffset_x = positionOffset["x"]; + RecordComponent &positionOffset_x = positionOffset["x"]; REQUIRE(positionOffset_x.unitSI() == 1.0); REQUIRE(positionOffset_x.getDatatype() == Datatype::DOUBLE); REQUIRE(positionOffset_x.getDimensionality() == 1); REQUIRE(positionOffset_x.getExtent() == e); - RecordComponent& positionOffset_y = positionOffset["y"]; + RecordComponent &positionOffset_y = positionOffset["y"]; REQUIRE(positionOffset_y.unitSI() == 1.0); REQUIRE(positionOffset_y.getDatatype() == Datatype::DOUBLE); REQUIRE(positionOffset_y.getDimensionality() == 1); REQUIRE(positionOffset_y.getExtent() == e); - RecordComponent& positionOffset_z = positionOffset["z"]; + RecordComponent &positionOffset_z = positionOffset["z"]; REQUIRE(positionOffset_z.unitSI() == 1.0); REQUIRE(positionOffset_z.getDatatype() == Datatype::DOUBLE); REQUIRE(positionOffset_z.getDimensionality() == 1); REQUIRE(positionOffset_z.getExtent() == e); - ud = {{0., 0., 0., 0., 0., 0., 0.}}; - Record& weighting = electrons["weighting"]; + ud = {{0., 0., 0., 0., 0., 0., 0.}}; + Record &weighting = electrons["weighting"]; REQUIRE(weighting.unitDimension() == ud); - REQUIRE(weighting.timeOffset< double >() == 0.0); + REQUIRE(weighting.timeOffset() == 0.0); REQUIRE(weighting.size() == 1); REQUIRE(weighting.count(RecordComponent::SCALAR) == 1); - RecordComponent& weighting_scalar = weighting[RecordComponent::SCALAR]; + RecordComponent &weighting_scalar = weighting[RecordComponent::SCALAR]; REQUIRE(weighting_scalar.unitSI() == 1.0); REQUIRE(weighting_scalar.getDatatype() == Datatype::DOUBLE); REQUIRE(weighting_scalar.getDimensionality() == 1); REQUIRE(weighting_scalar.getExtent() == e); - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; } } -TEST_CASE( "git_hdf5_sample_content_test", "[serial][hdf5]" ) +TEST_CASE("git_hdf5_sample_content_test", "[serial][hdf5]") { try { Series o = Series("../samples/git-sample/data%T.h5", Access::READ_ONLY); { - double actual[3][3][3] = {{{-1.9080703683727052e-09, -1.5632650729457964e-10, 1.1497536256399599e-09}, - {-1.9979540244463578e-09, -2.5512036927466397e-10, 1.0402234629225404e-09}, - {-1.7353589676361025e-09, -8.0899198451334087e-10, -1.6443779671249104e-10}}, - - {{-2.0029988778702545e-09, -1.9543477947081556e-10, 1.0916454407094989e-09}, - {-2.3890367462087170e-09, -4.7158010829662089e-10, 9.0026075483251589e-10}, - {-1.9033881137886510e-09, -7.5192119197708962e-10, 5.0038861942880430e-10}}, - - {{-1.3271805876513554e-09, -5.9243276950837753e-10, -2.2445734160214670e-10}, - {-7.4578609954301101e-10, -1.1995737736469891e-10, 2.5611823772919706e-10}, - {-9.4806251738077663e-10, -1.5472800818372434e-10, -3.6461900165818406e-10}}}; + double actual[3][3][3] = { + {{-1.9080703683727052e-09, + -1.5632650729457964e-10, + 1.1497536256399599e-09}, + {-1.9979540244463578e-09, + -2.5512036927466397e-10, + 1.0402234629225404e-09}, + {-1.7353589676361025e-09, + -8.0899198451334087e-10, + -1.6443779671249104e-10}}, + + {{-2.0029988778702545e-09, + -1.9543477947081556e-10, + 1.0916454407094989e-09}, + {-2.3890367462087170e-09, + -4.7158010829662089e-10, + 9.0026075483251589e-10}, + {-1.9033881137886510e-09, + -7.5192119197708962e-10, + 5.0038861942880430e-10}}, + + {{-1.3271805876513554e-09, + -5.9243276950837753e-10, + -2.2445734160214670e-10}, + {-7.4578609954301101e-10, + -1.1995737736469891e-10, + 2.5611823772919706e-10}, + {-9.4806251738077663e-10, + -1.5472800818372434e-10, + -3.6461900165818406e-10}}}; Mesh rhoMesh = o.iterations[100].meshes["rho"]; MeshRecordComponent rho = rhoMesh[MeshRecordComponent::SCALAR]; Offset offset{20, 20, 190}; Extent extent{3, 3, 3}; auto data = rho.loadChunk(offset, extent); rhoMesh.seriesFlush(); - double* raw_ptr = data.get(); + double *raw_ptr = data.get(); - for( int i = 0; i < 3; ++i ) - for( int j = 0; j < 3; ++j ) - for( int k = 0; k < 3; ++k ) - REQUIRE(raw_ptr[((i*3) + j)*3 + k] == actual[i][j][k]); + for (int i = 0; i < 3; ++i) + for (int j = 0; j < 3; ++j) + for (int k = 0; k < 3; ++k) + REQUIRE( + raw_ptr[((i * 3) + j) * 3 + k] == actual[i][j][k]); } { double constant_value = 9.1093829099999999e-31; - RecordComponent& electrons_mass = o.iterations[100].particles["electrons"]["mass"][RecordComponent::SCALAR]; + RecordComponent &electrons_mass = + o.iterations[100] + .particles["electrons"]["mass"][RecordComponent::SCALAR]; Offset offset{15}; Extent extent{3}; auto data = electrons_mass.loadChunk(offset, extent); o.flush(); - double* raw_ptr = data.get(); + double *raw_ptr = data.get(); - for( int i = 0; i < 3; ++i ) + for (int i = 0; i < 3; ++i) REQUIRE(raw_ptr[i] == constant_value); } - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; } } -TEST_CASE( "git_hdf5_sample_fileBased_read_test", "[serial][hdf5]" ) +TEST_CASE("git_hdf5_sample_fileBased_read_test", "[serial][hdf5]") { try { @@ -2575,7 +2990,8 @@ TEST_CASE( "git_hdf5_sample_fileBased_read_test", "[serial][hdf5]" ) #if openPMD_USE_INVASIVE_TESTS REQUIRE(o.get().m_filenamePadding == 8); #endif - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; @@ -2583,7 +2999,8 @@ TEST_CASE( "git_hdf5_sample_fileBased_read_test", "[serial][hdf5]" ) try { - Series o = Series("../samples/git-sample/data%08T.h5", Access::READ_ONLY); + Series o = + Series("../samples/git-sample/data%08T.h5", Access::READ_ONLY); REQUIRE(o.iterations.size() == 5); REQUIRE(o.iterations.count(100) == 1); @@ -2595,29 +3012,33 @@ TEST_CASE( "git_hdf5_sample_fileBased_read_test", "[serial][hdf5]" ) #if openPMD_USE_INVASIVE_TESTS REQUIRE(o.get().m_filenamePadding == 8); #endif - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; } - REQUIRE_THROWS_WITH(Series("../samples/git-sample/data%07T.h5", Access::READ_ONLY), - Catch::Equals("No matching iterations found: data%07T")); + REQUIRE_THROWS_WITH( + Series("../samples/git-sample/data%07T.h5", Access::READ_ONLY), + Catch::Equals("No matching iterations found: data%07T")); try { - std::vector< std::string > newFiles{"../samples/git-sample/data00000001.h5", - "../samples/git-sample/data00000010.h5", - "../samples/git-sample/data00001000.h5", - "../samples/git-sample/data00010000.h5", - "../samples/git-sample/data00100000.h5"}; + std::vector newFiles{ + "../samples/git-sample/data00000001.h5", + "../samples/git-sample/data00000010.h5", + "../samples/git-sample/data00001000.h5", + "../samples/git-sample/data00010000.h5", + "../samples/git-sample/data00100000.h5"}; - for( auto const& file : newFiles ) - if( auxiliary::file_exists(file) ) + for (auto const &file : newFiles) + if (auxiliary::file_exists(file)) auxiliary::remove_file(file); { - Series o = Series("../samples/git-sample/data%T.h5", Access::READ_WRITE); + Series o = + Series("../samples/git-sample/data%T.h5", Access::READ_WRITE); #if openPMD_USE_INVASIVE_TESTS REQUIRE(o.get().m_filenamePadding == 8); @@ -2631,28 +3052,30 @@ TEST_CASE( "git_hdf5_sample_fileBased_read_test", "[serial][hdf5]" ) o.flush(); } - for( auto const& file : newFiles ) + for (auto const &file : newFiles) { REQUIRE(auxiliary::file_exists(file)); auxiliary::remove_file(file); } - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; } } -TEST_CASE( "git_hdf5_early_chunk_query", "[serial][hdf5]" ) +TEST_CASE("git_hdf5_early_chunk_query", "[serial][hdf5]") { git_early_chunk_query("../samples/git-sample/data%T.h5", "electrons", 400); } -TEST_CASE( "git_hdf5_sample_read_thetaMode", "[serial][hdf5][thetaMode]" ) +TEST_CASE("git_hdf5_sample_read_thetaMode", "[serial][hdf5][thetaMode]") { try { - Series o = Series("../samples/git-sample/thetaMode/data%T.h5", Access::READ_ONLY); + Series o = Series( + "../samples/git-sample/thetaMode/data%T.h5", Access::READ_ONLY); REQUIRE(o.iterations.size() == 5); REQUIRE(o.iterations.count(100) == 1); @@ -2670,23 +3093,31 @@ TEST_CASE( "git_hdf5_sample_read_thetaMode", "[serial][hdf5][thetaMode]" ) REQUIRE(i.meshes.count("rho") == 1); Mesh B = i.meshes["B"]; - std::vector< std::string > const al{"r", "z"}; - std::vector< double > const gs{3.e-7, 1.e-7}; - std::vector< double > const ggo{0., 3.02e-5}; - std::array< double, 7 > const ud{{0., 1., -2., -1., 0., 0., 0.}}; + std::vector const al{"r", "z"}; + std::vector const gs{3.e-7, 1.e-7}; + std::vector const ggo{0., 3.02e-5}; + std::array const ud{{0., 1., -2., -1., 0., 0., 0.}}; REQUIRE(B.geometry() == Mesh::Geometry::thetaMode); REQUIRE(B.geometryParameters() == "m=2;imag=+"); REQUIRE(B.dataOrder() == Mesh::DataOrder::C); REQUIRE(B.axisLabels() == al); - REQUIRE(B.gridSpacing< double >().size() == 2u); + REQUIRE(B.gridSpacing().size() == 2u); REQUIRE(B.gridGlobalOffset().size() == 2u); - REQUIRE(std::abs(B.gridSpacing< double >()[0] - gs[0]) <= std::numeric_limits::epsilon()); - REQUIRE(std::abs(B.gridSpacing< double >()[1] - gs[1]) <= std::numeric_limits::epsilon()); - REQUIRE(std::abs(B.gridGlobalOffset()[0] - ggo[0]) <= std::numeric_limits::epsilon()); - REQUIRE(std::abs(B.gridGlobalOffset()[1] - ggo[1]) <= std::numeric_limits::epsilon()); + REQUIRE( + std::abs(B.gridSpacing()[0] - gs[0]) <= + std::numeric_limits::epsilon()); + REQUIRE( + std::abs(B.gridSpacing()[1] - gs[1]) <= + std::numeric_limits::epsilon()); + REQUIRE( + std::abs(B.gridGlobalOffset()[0] - ggo[0]) <= + std::numeric_limits::epsilon()); + REQUIRE( + std::abs(B.gridGlobalOffset()[1] - ggo[1]) <= + std::numeric_limits::epsilon()); REQUIRE(B.gridUnitSI() == 1.0); REQUIRE(B.unitDimension() == ud); - REQUIRE(B.timeOffset< double >() == static_cast< double >(0.0f)); + REQUIRE(B.timeOffset() == static_cast(0.0f)); REQUIRE(B.size() == 3); REQUIRE(B.count("r") == 1); @@ -2694,33 +3125,35 @@ TEST_CASE( "git_hdf5_sample_read_thetaMode", "[serial][hdf5][thetaMode]" ) REQUIRE(B.count("z") == 1); MeshRecordComponent B_z = B["z"]; - std::vector< double > const pos{0.5, 0.0}; + std::vector const pos{0.5, 0.0}; Extent const ext{3, 51, 201}; REQUIRE(B_z.unitSI() == 1.0); - REQUIRE(B_z.position< double >() == pos); + REQUIRE(B_z.position() == pos); REQUIRE(B_z.getDatatype() == Datatype::DOUBLE); REQUIRE(B_z.getExtent() == ext); REQUIRE(B_z.getDimensionality() == 3); Offset const offset{1, 10, 90}; // skip mode_0 (one scalar field) Extent const extent{2, 30, 20}; // mode_1 (two scalar fields) - auto data = B_z.loadChunk< double >(offset, extent); + auto data = B_z.loadChunk(offset, extent); o.flush(); - } catch (no_such_file_error& e) + } + catch (no_such_file_error &e) { std::cerr << "git sample not accessible. (" << e.what() << ")\n"; return; } } -TEST_CASE( "hzdr_hdf5_sample_content_test", "[serial][hdf5]" ) +TEST_CASE("hzdr_hdf5_sample_content_test", "[serial][hdf5]") { // since this file might not be publicly available, gracefully handle errors try { /* HZDR: /bigdata/hplsim/development/huebl/lwfa-openPMD-062-smallLWFA-h5 * DOI:10.14278/rodare.57 */ - Series o = Series("../samples/hzdr-sample/h5/simData_%T.h5", Access::READ_ONLY); + Series o = Series( + "../samples/hzdr-sample/h5/simData_%T.h5", Access::READ_ONLY); REQUIRE(o.openPMD() == "1.0.0"); REQUIRE(o.openPMDextension() == 1); @@ -2738,9 +3171,9 @@ TEST_CASE( "hzdr_hdf5_sample_content_test", "[serial][hdf5]" ) REQUIRE(o.iterations.size() >= 1); REQUIRE(o.iterations.count(0) == 1); - Iteration& i = o.iterations[0]; - REQUIRE(i.time< float >() == static_cast< float >(0.0f)); - REQUIRE(i.dt< float >() == static_cast< float >(1.0f)); + Iteration &i = o.iterations[0]; + REQUIRE(i.time() == static_cast(0.0f)); + REQUIRE(i.dt() == static_cast(1.0f)); REQUIRE(i.timeUnitSI() == 1.3899999999999999e-16); REQUIRE(i.meshes.size() == 4); @@ -2749,146 +3182,158 @@ TEST_CASE( "hzdr_hdf5_sample_content_test", "[serial][hdf5]" ) REQUIRE(i.meshes.count("e_chargeDensity") == 1); REQUIRE(i.meshes.count("e_energyDensity") == 1); - std::vector< std::string > al{"z", "y", "x"}; - std::vector< float > gs{static_cast< float >(6.2393283843994141f), - static_cast< float >(1.0630855560302734f), - static_cast< float >(6.2393283843994141f)}; - std::vector< double > ggo{0., 0., 0.}; - std::array< double, 7 > ud{{0., 1., -2., -1., 0., 0., 0.}}; - Mesh& B = i.meshes["B"]; + std::vector al{"z", "y", "x"}; + std::vector gs{ + static_cast(6.2393283843994141f), + static_cast(1.0630855560302734f), + static_cast(6.2393283843994141f)}; + std::vector ggo{0., 0., 0.}; + std::array ud{{0., 1., -2., -1., 0., 0., 0.}}; + Mesh &B = i.meshes["B"]; REQUIRE(B.geometry() == Mesh::Geometry::cartesian); REQUIRE(B.dataOrder() == Mesh::DataOrder::C); REQUIRE(B.axisLabels() == al); - REQUIRE(B.gridSpacing< float >() == gs); + REQUIRE(B.gridSpacing() == gs); REQUIRE(B.gridGlobalOffset() == ggo); REQUIRE(B.gridUnitSI() == 4.1671151661999998e-08); REQUIRE(B.unitDimension() == ud); - REQUIRE(B.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(B.timeOffset() == static_cast(0.0f)); REQUIRE(B.size() == 3); REQUIRE(B.count("x") == 1); REQUIRE(B.count("y") == 1); REQUIRE(B.count("z") == 1); - std::vector< float > p{static_cast< float >(0.0f), - static_cast< float >(0.5f), - static_cast< float >(0.5f)}; + std::vector p{ + static_cast(0.0f), + static_cast(0.5f), + static_cast(0.5f)}; Extent e{80, 384, 80}; - MeshRecordComponent& B_x = B["x"]; + MeshRecordComponent &B_x = B["x"]; REQUIRE(B_x.unitSI() == 40903.822240601701); - REQUIRE(B_x.position< float >() == p); + REQUIRE(B_x.position() == p); REQUIRE(B_x.getDatatype() == Datatype::FLOAT); REQUIRE(B_x.getExtent() == e); REQUIRE(B_x.getDimensionality() == 3); - p = {static_cast< float >(0.5f), - static_cast< float >(0.0f), - static_cast< float >(0.5f)}; - MeshRecordComponent& B_y = B["y"]; + p = { + static_cast(0.5f), + static_cast(0.0f), + static_cast(0.5f)}; + MeshRecordComponent &B_y = B["y"]; REQUIRE(B_y.unitSI() == 40903.822240601701); - REQUIRE(B_y.position< float >() == p); + REQUIRE(B_y.position() == p); REQUIRE(B_y.getDatatype() == Datatype::FLOAT); REQUIRE(B_y.getExtent() == e); REQUIRE(B_y.getDimensionality() == 3); - p = {static_cast< float >(0.5f), - static_cast< float >(0.5f), - static_cast< float >(0.0f)}; - MeshRecordComponent& B_z = B["z"]; + p = { + static_cast(0.5f), + static_cast(0.5f), + static_cast(0.0f)}; + MeshRecordComponent &B_z = B["z"]; REQUIRE(B_z.unitSI() == 40903.822240601701); - REQUIRE(B_z.position< float >() == p); + REQUIRE(B_z.position() == p); REQUIRE(B_z.getDatatype() == Datatype::FLOAT); REQUIRE(B_z.getExtent() == e); REQUIRE(B_z.getDimensionality() == 3); - ud = {{1., 1., -3., -1., 0., 0., 0.}}; - Mesh& E = i.meshes["E"]; + ud = {{1., 1., -3., -1., 0., 0., 0.}}; + Mesh &E = i.meshes["E"]; REQUIRE(E.geometry() == Mesh::Geometry::cartesian); REQUIRE(E.dataOrder() == Mesh::DataOrder::C); REQUIRE(E.axisLabels() == al); - REQUIRE(E.gridSpacing< float >() == gs); + REQUIRE(E.gridSpacing() == gs); REQUIRE(E.gridGlobalOffset() == ggo); REQUIRE(E.gridUnitSI() == 4.1671151661999998e-08); REQUIRE(E.unitDimension() == ud); - REQUIRE(E.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(E.timeOffset() == static_cast(0.0f)); REQUIRE(E.size() == 3); REQUIRE(E.count("x") == 1); REQUIRE(E.count("y") == 1); REQUIRE(E.count("z") == 1); - p = {static_cast< float >(0.5f), - static_cast< float >(0.0f), - static_cast< float >(0.0f)}; + p = { + static_cast(0.5f), + static_cast(0.0f), + static_cast(0.0f)}; e = {80, 384, 80}; - MeshRecordComponent& E_x = E["x"]; + MeshRecordComponent &E_x = E["x"]; REQUIRE(E_x.unitSI() == 12262657411105.049); - REQUIRE(E_x.position< float >() == p); + REQUIRE(E_x.position() == p); REQUIRE(E_x.getDatatype() == Datatype::FLOAT); REQUIRE(E_x.getExtent() == e); REQUIRE(E_x.getDimensionality() == 3); - p = {static_cast< float >(0.0f), - static_cast< float >(0.5f), - static_cast< float >(0.0f)}; - MeshRecordComponent& E_y = E["y"]; + p = { + static_cast(0.0f), + static_cast(0.5f), + static_cast(0.0f)}; + MeshRecordComponent &E_y = E["y"]; REQUIRE(E_y.unitSI() == 12262657411105.049); - REQUIRE(E_y.position< float >() == p); + REQUIRE(E_y.position() == p); REQUIRE(E_y.getDatatype() == Datatype::FLOAT); REQUIRE(E_y.getExtent() == e); REQUIRE(E_y.getDimensionality() == 3); - p = {static_cast< float >(0.0f), - static_cast< float >(0.0f), - static_cast< float >(0.5f)}; - MeshRecordComponent& E_z = E["z"]; + p = { + static_cast(0.0f), + static_cast(0.0f), + static_cast(0.5f)}; + MeshRecordComponent &E_z = E["z"]; REQUIRE(E_z.unitSI() == 12262657411105.049); - REQUIRE(E_z.position< float >() == p); + REQUIRE(E_z.position() == p); REQUIRE(E_z.getDatatype() == Datatype::FLOAT); REQUIRE(E_z.getExtent() == e); REQUIRE(E_z.getDimensionality() == 3); - ud = {{-3., 0., 1., 1., 0., 0., 0.}}; - Mesh& e_chargeDensity = i.meshes["e_chargeDensity"]; + ud = {{-3., 0., 1., 1., 0., 0., 0.}}; + Mesh &e_chargeDensity = i.meshes["e_chargeDensity"]; REQUIRE(e_chargeDensity.geometry() == Mesh::Geometry::cartesian); REQUIRE(e_chargeDensity.dataOrder() == Mesh::DataOrder::C); REQUIRE(e_chargeDensity.axisLabels() == al); - REQUIRE(e_chargeDensity.gridSpacing< float >() == gs); + REQUIRE(e_chargeDensity.gridSpacing() == gs); REQUIRE(e_chargeDensity.gridGlobalOffset() == ggo); REQUIRE(e_chargeDensity.gridUnitSI() == 4.1671151661999998e-08); REQUIRE(e_chargeDensity.unitDimension() == ud); - REQUIRE(e_chargeDensity.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE( + e_chargeDensity.timeOffset() == static_cast(0.0f)); REQUIRE(e_chargeDensity.size() == 1); REQUIRE(e_chargeDensity.count(MeshRecordComponent::SCALAR) == 1); - p = {static_cast< float >(0.f), - static_cast< float >(0.f), - static_cast< float >(0.f)}; - MeshRecordComponent& e_chargeDensity_scalar = e_chargeDensity[MeshRecordComponent::SCALAR]; + p = { + static_cast(0.f), + static_cast(0.f), + static_cast(0.f)}; + MeshRecordComponent &e_chargeDensity_scalar = + e_chargeDensity[MeshRecordComponent::SCALAR]; REQUIRE(e_chargeDensity_scalar.unitSI() == 66306201.002331272); - REQUIRE(e_chargeDensity_scalar.position< float >() == p); + REQUIRE(e_chargeDensity_scalar.position() == p); REQUIRE(e_chargeDensity_scalar.getDatatype() == Datatype::FLOAT); REQUIRE(e_chargeDensity_scalar.getExtent() == e); REQUIRE(e_chargeDensity_scalar.getDimensionality() == 3); - ud = {{-1., 1., -2., 0., 0., 0., 0.}}; - Mesh& e_energyDensity = i.meshes["e_energyDensity"]; + ud = {{-1., 1., -2., 0., 0., 0., 0.}}; + Mesh &e_energyDensity = i.meshes["e_energyDensity"]; REQUIRE(e_energyDensity.geometry() == Mesh::Geometry::cartesian); REQUIRE(e_energyDensity.dataOrder() == Mesh::DataOrder::C); REQUIRE(e_energyDensity.axisLabels() == al); - REQUIRE(e_energyDensity.gridSpacing< float >() == gs); + REQUIRE(e_energyDensity.gridSpacing() == gs); REQUIRE(e_energyDensity.gridGlobalOffset() == ggo); REQUIRE(e_energyDensity.gridUnitSI() == 4.1671151661999998e-08); REQUIRE(e_energyDensity.unitDimension() == ud); - REQUIRE(e_energyDensity.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE( + e_energyDensity.timeOffset() == static_cast(0.0f)); REQUIRE(e_energyDensity.size() == 1); REQUIRE(e_energyDensity.count(MeshRecordComponent::SCALAR) == 1); - MeshRecordComponent& e_energyDensity_scalar = e_energyDensity[MeshRecordComponent::SCALAR]; + MeshRecordComponent &e_energyDensity_scalar = + e_energyDensity[MeshRecordComponent::SCALAR]; REQUIRE(e_energyDensity_scalar.unitSI() == 1.0146696675429705e+18); - REQUIRE(e_energyDensity_scalar.position< float >() == p); + REQUIRE(e_energyDensity_scalar.position() == p); REQUIRE(e_energyDensity_scalar.getDatatype() == Datatype::FLOAT); REQUIRE(e_energyDensity_scalar.getExtent() == e); REQUIRE(e_energyDensity_scalar.getDimensionality() == 3); @@ -2896,7 +3341,7 @@ TEST_CASE( "hzdr_hdf5_sample_content_test", "[serial][hdf5]" ) REQUIRE(i.particles.size() == 1); REQUIRE(i.particles.count("e") == 1); - ParticleSpecies& species_e = i.particles["e"]; + ParticleSpecies &species_e = i.particles["e"]; REQUIRE(species_e.size() == 6); REQUIRE(species_e.count("charge") == 1); @@ -2907,143 +3352,150 @@ TEST_CASE( "hzdr_hdf5_sample_content_test", "[serial][hdf5]" ) REQUIRE(species_e.count("positionOffset") == 1); REQUIRE(species_e.count("weighting") == 1); - ud = {{0., 0., 1., 1., 0., 0., 0.}}; - Record& e_charge = species_e["charge"]; + ud = {{0., 0., 1., 1., 0., 0., 0.}}; + Record &e_charge = species_e["charge"]; REQUIRE(e_charge.unitDimension() == ud); - REQUIRE(e_charge.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(e_charge.timeOffset() == static_cast(0.0f)); REQUIRE(e_charge.size() == 1); REQUIRE(e_charge.count(RecordComponent::SCALAR) == 1); e = {2150400}; - RecordComponent& e_charge_scalar = e_charge[RecordComponent::SCALAR]; + RecordComponent &e_charge_scalar = e_charge[RecordComponent::SCALAR]; REQUIRE(e_charge_scalar.unitSI() == 4.7980045488500004e-15); REQUIRE(e_charge_scalar.getDatatype() == Datatype::DOUBLE); REQUIRE(e_charge_scalar.getExtent() == e); REQUIRE(e_charge_scalar.getDimensionality() == 1); - ud = {{0., 1., 0., 0., 0., 0., 0.}}; - Record& e_mass = species_e["mass"]; + ud = {{0., 1., 0., 0., 0., 0., 0.}}; + Record &e_mass = species_e["mass"]; REQUIRE(e_mass.unitDimension() == ud); - REQUIRE(e_mass.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(e_mass.timeOffset() == static_cast(0.0f)); REQUIRE(e_mass.size() == 1); REQUIRE(e_mass.count(RecordComponent::SCALAR) == 1); - RecordComponent& e_mass_scalar = e_mass[RecordComponent::SCALAR]; + RecordComponent &e_mass_scalar = e_mass[RecordComponent::SCALAR]; REQUIRE(e_mass_scalar.unitSI() == 2.7279684799430467e-26); REQUIRE(e_mass_scalar.getDatatype() == Datatype::DOUBLE); REQUIRE(e_mass_scalar.getExtent() == e); REQUIRE(e_mass_scalar.getDimensionality() == 1); - ud = {{1., 1., -1., 0., 0., 0., 0.}}; - Record& e_momentum = species_e["momentum"]; + ud = {{1., 1., -1., 0., 0., 0., 0.}}; + Record &e_momentum = species_e["momentum"]; REQUIRE(e_momentum.unitDimension() == ud); - REQUIRE(e_momentum.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(e_momentum.timeOffset() == static_cast(0.0f)); REQUIRE(e_momentum.size() == 3); REQUIRE(e_momentum.count("x") == 1); REQUIRE(e_momentum.count("y") == 1); REQUIRE(e_momentum.count("z") == 1); - RecordComponent& e_momentum_x = e_momentum["x"]; + RecordComponent &e_momentum_x = e_momentum["x"]; REQUIRE(e_momentum_x.unitSI() == 8.1782437594864961e-18); REQUIRE(e_momentum_x.getDatatype() == Datatype::FLOAT); REQUIRE(e_momentum_x.getExtent() == e); REQUIRE(e_momentum_x.getDimensionality() == 1); - RecordComponent& e_momentum_y = e_momentum["y"]; + RecordComponent &e_momentum_y = e_momentum["y"]; REQUIRE(e_momentum_y.unitSI() == 8.1782437594864961e-18); REQUIRE(e_momentum_y.getDatatype() == Datatype::FLOAT); REQUIRE(e_momentum_y.getExtent() == e); REQUIRE(e_momentum_y.getDimensionality() == 1); - RecordComponent& e_momentum_z = e_momentum["z"]; + RecordComponent &e_momentum_z = e_momentum["z"]; REQUIRE(e_momentum_z.unitSI() == 8.1782437594864961e-18); REQUIRE(e_momentum_z.getDatatype() == Datatype::FLOAT); REQUIRE(e_momentum_z.getExtent() == e); REQUIRE(e_momentum_z.getDimensionality() == 1); - ud = {{1., 0., 0., 0., 0., 0., 0.}}; - Record& e_position = species_e["position"]; + ud = {{1., 0., 0., 0., 0., 0., 0.}}; + Record &e_position = species_e["position"]; REQUIRE(e_position.unitDimension() == ud); - REQUIRE(e_position.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(e_position.timeOffset() == static_cast(0.0f)); REQUIRE(e_position.size() == 3); REQUIRE(e_position.count("x") == 1); REQUIRE(e_position.count("y") == 1); REQUIRE(e_position.count("z") == 1); - RecordComponent& e_position_x = e_position["x"]; + RecordComponent &e_position_x = e_position["x"]; REQUIRE(e_position_x.unitSI() == 2.599999993753294e-07); REQUIRE(e_position_x.getDatatype() == Datatype::FLOAT); REQUIRE(e_position_x.getExtent() == e); REQUIRE(e_position_x.getDimensionality() == 1); - RecordComponent& e_position_y = e_position["y"]; + RecordComponent &e_position_y = e_position["y"]; REQUIRE(e_position_y.unitSI() == 4.4299999435019118e-08); REQUIRE(e_position_y.getDatatype() == Datatype::FLOAT); REQUIRE(e_position_y.getExtent() == e); REQUIRE(e_position_y.getDimensionality() == 1); - RecordComponent& e_position_z = e_position["z"]; + RecordComponent &e_position_z = e_position["z"]; REQUIRE(e_position_z.unitSI() == 2.599999993753294e-07); REQUIRE(e_position_z.getDatatype() == Datatype::FLOAT); REQUIRE(e_position_z.getExtent() == e); REQUIRE(e_position_z.getDimensionality() == 1); - ud = {{1., 0., 0., 0., 0., 0., 0.}}; - Record& e_positionOffset = species_e["positionOffset"]; + ud = {{1., 0., 0., 0., 0., 0., 0.}}; + Record &e_positionOffset = species_e["positionOffset"]; REQUIRE(e_positionOffset.unitDimension() == ud); - REQUIRE(e_positionOffset.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE( + e_positionOffset.timeOffset() == static_cast(0.0f)); REQUIRE(e_positionOffset.size() == 3); REQUIRE(e_positionOffset.count("x") == 1); REQUIRE(e_positionOffset.count("y") == 1); REQUIRE(e_positionOffset.count("z") == 1); - RecordComponent& e_positionOffset_x = e_positionOffset["x"]; + RecordComponent &e_positionOffset_x = e_positionOffset["x"]; REQUIRE(e_positionOffset_x.unitSI() == 2.599999993753294e-07); - REQUIRE(e_positionOffset_x.getDatatype() == determineDatatype< int32_t >()); + REQUIRE( + e_positionOffset_x.getDatatype() == determineDatatype()); REQUIRE(e_positionOffset_x.getExtent() == e); REQUIRE(e_positionOffset_x.getDimensionality() == 1); - RecordComponent& e_positionOffset_y = e_positionOffset["y"]; + RecordComponent &e_positionOffset_y = e_positionOffset["y"]; REQUIRE(e_positionOffset_y.unitSI() == 4.4299999435019118e-08); - REQUIRE(e_positionOffset_y.getDatatype() == determineDatatype< int32_t >()); + REQUIRE( + e_positionOffset_y.getDatatype() == determineDatatype()); REQUIRE(e_positionOffset_y.getExtent() == e); REQUIRE(e_positionOffset_y.getDimensionality() == 1); - RecordComponent& e_positionOffset_z = e_positionOffset["z"]; + RecordComponent &e_positionOffset_z = e_positionOffset["z"]; REQUIRE(e_positionOffset_z.unitSI() == 2.599999993753294e-07); - REQUIRE(e_positionOffset_z.getDatatype() == determineDatatype< int32_t >()); + REQUIRE( + e_positionOffset_z.getDatatype() == determineDatatype()); REQUIRE(e_positionOffset_z.getExtent() == e); REQUIRE(e_positionOffset_z.getDimensionality() == 1); - ud = {{0., 0., 0., 0., 0., 0., 0.}}; - Record& e_weighting = species_e["weighting"]; + ud = {{0., 0., 0., 0., 0., 0., 0.}}; + Record &e_weighting = species_e["weighting"]; REQUIRE(e_weighting.unitDimension() == ud); - REQUIRE(e_weighting.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(e_weighting.timeOffset() == static_cast(0.0f)); REQUIRE(e_weighting.size() == 1); REQUIRE(e_weighting.count(RecordComponent::SCALAR) == 1); - RecordComponent& e_weighting_scalar = e_weighting[RecordComponent::SCALAR]; + RecordComponent &e_weighting_scalar = + e_weighting[RecordComponent::SCALAR]; REQUIRE(e_weighting_scalar.unitSI() == 1.0); REQUIRE(e_weighting_scalar.getDatatype() == Datatype::FLOAT); REQUIRE(e_weighting_scalar.getExtent() == e); REQUIRE(e_weighting_scalar.getDimensionality() == 1); - ParticlePatches& e_patches = species_e.particlePatches; - REQUIRE(e_patches.size() == 4); /* extent, numParticles, numParticlesOffset, offset */ + ParticlePatches &e_patches = species_e.particlePatches; + REQUIRE( + e_patches.size() == + 4); /* extent, numParticles, numParticlesOffset, offset */ REQUIRE(e_patches.count("extent") == 1); REQUIRE(e_patches.count("numParticles") == 1); REQUIRE(e_patches.count("numParticlesOffset") == 1); REQUIRE(e_patches.count("offset") == 1); REQUIRE(e_patches.numPatches() == 4); - ud = {{1., 0., 0., 0., 0., 0., 0.}}; - PatchRecord& e_extent = e_patches["extent"]; + ud = {{1., 0., 0., 0., 0., 0., 0.}}; + PatchRecord &e_extent = e_patches["extent"]; REQUIRE(e_extent.unitDimension() == ud); REQUIRE(e_extent.size() == 3); @@ -3051,63 +3503,76 @@ TEST_CASE( "hzdr_hdf5_sample_content_test", "[serial][hdf5]" ) REQUIRE(e_extent.count("y") == 1); REQUIRE(e_extent.count("z") == 1); - PatchRecordComponent& e_extent_x = e_extent["x"]; + PatchRecordComponent &e_extent_x = e_extent["x"]; REQUIRE(e_extent_x.unitSI() == 2.599999993753294e-07); #if !defined(_MSC_VER) - REQUIRE(e_extent_x.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE(e_extent_x.getDatatype() == determineDatatype()); #endif - REQUIRE(isSame(e_extent_x.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE( + isSame(e_extent_x.getDatatype(), determineDatatype())); - PatchRecordComponent& e_extent_y = e_extent["y"]; + PatchRecordComponent &e_extent_y = e_extent["y"]; REQUIRE(e_extent_y.unitSI() == 4.429999943501912e-08); #if !defined(_MSC_VER) - REQUIRE(e_extent_y.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE(e_extent_y.getDatatype() == determineDatatype()); #endif - REQUIRE(isSame(e_extent_y.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE( + isSame(e_extent_y.getDatatype(), determineDatatype())); - PatchRecordComponent& e_extent_z = e_extent["z"]; + PatchRecordComponent &e_extent_z = e_extent["z"]; REQUIRE(e_extent_z.unitSI() == 2.599999993753294e-07); #if !defined(_MSC_VER) - REQUIRE(e_extent_z.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE(e_extent_z.getDatatype() == determineDatatype()); #endif - REQUIRE(isSame(e_extent_z.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE( + isSame(e_extent_z.getDatatype(), determineDatatype())); - std::vector< uint64_t > data( e_patches.size() ); + std::vector data(e_patches.size()); e_extent_z.load(shareRaw(data.data())); species_e.seriesFlush(); - REQUIRE(data.at(0) == static_cast< uint64_t >(80)); - REQUIRE(data.at(1) == static_cast< uint64_t >(80)); - REQUIRE(data.at(2) == static_cast< uint64_t >(80)); - REQUIRE(data.at(3) == static_cast< uint64_t >(80)); + REQUIRE(data.at(0) == static_cast(80)); + REQUIRE(data.at(1) == static_cast(80)); + REQUIRE(data.at(2) == static_cast(80)); + REQUIRE(data.at(3) == static_cast(80)); - PatchRecord& e_numParticles = e_patches["numParticles"]; + PatchRecord &e_numParticles = e_patches["numParticles"]; REQUIRE(e_numParticles.size() == 1); REQUIRE(e_numParticles.count(RecordComponent::SCALAR) == 1); - PatchRecordComponent& e_numParticles_scalar = e_numParticles[RecordComponent::SCALAR]; + PatchRecordComponent &e_numParticles_scalar = + e_numParticles[RecordComponent::SCALAR]; #if !defined(_MSC_VER) - REQUIRE(e_numParticles_scalar.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE( + e_numParticles_scalar.getDatatype() == + determineDatatype()); #endif - REQUIRE(isSame(e_numParticles_scalar.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE(isSame( + e_numParticles_scalar.getDatatype(), + determineDatatype())); e_numParticles_scalar.load(shareRaw(data.data())); o.flush(); - REQUIRE(data.at(0) == static_cast< uint64_t >(512000)); - REQUIRE(data.at(1) == static_cast< uint64_t >(819200)); - REQUIRE(data.at(2) == static_cast< uint64_t >(819200)); - REQUIRE(data.at(3) == static_cast< uint64_t >(0)); + REQUIRE(data.at(0) == static_cast(512000)); + REQUIRE(data.at(1) == static_cast(819200)); + REQUIRE(data.at(2) == static_cast(819200)); + REQUIRE(data.at(3) == static_cast(0)); - PatchRecord& e_numParticlesOffset = e_patches["numParticlesOffset"]; + PatchRecord &e_numParticlesOffset = e_patches["numParticlesOffset"]; REQUIRE(e_numParticlesOffset.size() == 1); REQUIRE(e_numParticlesOffset.count(RecordComponent::SCALAR) == 1); - PatchRecordComponent& e_numParticlesOffset_scalar = e_numParticlesOffset[RecordComponent::SCALAR]; + PatchRecordComponent &e_numParticlesOffset_scalar = + e_numParticlesOffset[RecordComponent::SCALAR]; #if !defined(_MSC_VER) - REQUIRE(e_numParticlesOffset_scalar.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE( + e_numParticlesOffset_scalar.getDatatype() == + determineDatatype()); #endif - REQUIRE(isSame(e_numParticlesOffset_scalar.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE(isSame( + e_numParticlesOffset_scalar.getDatatype(), + determineDatatype())); - PatchRecord& e_offset = e_patches["offset"]; + PatchRecord &e_offset = e_patches["offset"]; REQUIRE(e_offset.unitDimension() == ud); REQUIRE(e_offset.size() == 3); @@ -3115,71 +3580,77 @@ TEST_CASE( "hzdr_hdf5_sample_content_test", "[serial][hdf5]" ) REQUIRE(e_offset.count("y") == 1); REQUIRE(e_offset.count("z") == 1); - PatchRecordComponent& e_offset_x = e_offset["x"]; + PatchRecordComponent &e_offset_x = e_offset["x"]; REQUIRE(e_offset_x.unitSI() == 2.599999993753294e-07); #if !defined(_MSC_VER) - REQUIRE(e_offset_x.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE(e_offset_x.getDatatype() == determineDatatype()); #endif - REQUIRE(isSame(e_offset_x.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE( + isSame(e_offset_x.getDatatype(), determineDatatype())); - PatchRecordComponent& e_offset_y = e_offset["y"]; + PatchRecordComponent &e_offset_y = e_offset["y"]; REQUIRE(e_offset_y.unitSI() == 4.429999943501912e-08); #if !defined(_MSC_VER) - REQUIRE(e_offset_y.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE(e_offset_y.getDatatype() == determineDatatype()); #endif - REQUIRE(isSame(e_offset_y.getDatatype(), determineDatatype< uint64_t >())); + REQUIRE( + isSame(e_offset_y.getDatatype(), determineDatatype())); e_offset_y.load(shareRaw(data.data())); o.flush(); - REQUIRE(data.at(0) == static_cast< uint64_t >(0)); - REQUIRE(data.at(1) == static_cast< uint64_t >(128)); - REQUIRE(data.at(2) == static_cast< uint64_t >(256)); - REQUIRE(data.at(3) == static_cast< uint64_t >(384)); + REQUIRE(data.at(0) == static_cast(0)); + REQUIRE(data.at(1) == static_cast(128)); + REQUIRE(data.at(2) == static_cast(256)); + REQUIRE(data.at(3) == static_cast(384)); - PatchRecordComponent& e_offset_z = e_offset["z"]; + PatchRecordComponent &e_offset_z = e_offset["z"]; REQUIRE(e_offset_z.unitSI() == 2.599999993753294e-07); #if !defined(_MSC_VER) - REQUIRE(e_offset_z.getDatatype() == determineDatatype< uint64_t >()); + REQUIRE(e_offset_z.getDatatype() == determineDatatype()); #endif - REQUIRE(isSame(e_offset_z.getDatatype(), determineDatatype< uint64_t >())); - } catch (no_such_file_error& e) + REQUIRE( + isSame(e_offset_z.getDatatype(), determineDatatype())); + } + catch (no_such_file_error &e) { std::cerr << "HZDR sample not accessible. (" << e.what() << ")\n"; return; } } -TEST_CASE( "hdf5_bool_test", "[serial][hdf5]" ) +TEST_CASE("hdf5_bool_test", "[serial][hdf5]") { bool_test("h5"); } -TEST_CASE( "hdf5_patch_test", "[serial][hdf5]" ) +TEST_CASE("hdf5_patch_test", "[serial][hdf5]") { patch_test("h5"); } -TEST_CASE( "hdf5_deletion_test", "[serial][hdf5]" ) +TEST_CASE("hdf5_deletion_test", "[serial][hdf5]") { deletion_test("h5"); } #else -TEST_CASE( "no_serial_hdf5", "[serial][hdf5]" ) +TEST_CASE("no_serial_hdf5", "[serial][hdf5]") { REQUIRE(true); } #endif #if openPMD_HAVE_ADIOS1 -TEST_CASE( "hzdr_adios1_sample_content_test", "[serial][adios1]" ) +TEST_CASE("hzdr_adios1_sample_content_test", "[serial][adios1]") { // since this file might not be publicly available, gracefully handle errors - /** @todo add bp example files to https://github.com/openPMD/openPMD-example-datasets */ + /** @todo add bp example files to + * https://github.com/openPMD/openPMD-example-datasets */ try { /* HZDR: /bigdata/hplsim/development/huebl/lwfa-bgfield-001 * DOI:10.14278/rodare.57 */ - Series o = Series("../samples/hzdr-sample/bp/checkpoint_%T.bp", Access::READ_ONLY); + Series o = Series( + "../samples/hzdr-sample/bp/checkpoint_%T.bp", Access::READ_ONLY); REQUIRE(o.openPMD() == "1.0.0"); REQUIRE(o.openPMDextension() == 1); @@ -3196,136 +3667,145 @@ TEST_CASE( "hzdr_adios1_sample_content_test", "[serial][adios1]" ) REQUIRE(o.iterations.size() >= 1); REQUIRE(o.iterations.count(0) == 1); - Iteration& i = o.iterations[0]; - REQUIRE(i.time< float >() == static_cast< float >(0.0f)); - REQUIRE(i.dt< float >() == static_cast< float >(1.0f)); + Iteration &i = o.iterations[0]; + REQUIRE(i.time() == static_cast(0.0f)); + REQUIRE(i.dt() == static_cast(1.0f)); REQUIRE(i.timeUnitSI() == 1.3899999999999999e-16); REQUIRE(i.meshes.count("B") == 1); REQUIRE(i.meshes.count("E") == 1); REQUIRE(i.meshes.size() == 2); - std::vector< std::string > al{"z", "y", "x"}; - std::vector< float > gs{static_cast< float >(4.252342224121094f), - static_cast< float >(1.0630855560302734f), - static_cast< float >(4.252342224121094f)}; - std::vector< double > ggo{0., 0., 0.}; - std::array< double, 7 > ud{{0., 1., -2., -1., 0., 0., 0.}}; - Mesh& B = i.meshes["B"]; + std::vector al{"z", "y", "x"}; + std::vector gs{ + static_cast(4.252342224121094f), + static_cast(1.0630855560302734f), + static_cast(4.252342224121094f)}; + std::vector ggo{0., 0., 0.}; + std::array ud{{0., 1., -2., -1., 0., 0., 0.}}; + Mesh &B = i.meshes["B"]; REQUIRE(B.geometry() == Mesh::Geometry::cartesian); REQUIRE(B.dataOrder() == Mesh::DataOrder::C); REQUIRE(B.axisLabels() == al); - REQUIRE(B.gridSpacing< float >() == gs); + REQUIRE(B.gridSpacing() == gs); REQUIRE(B.gridGlobalOffset() == ggo); REQUIRE(B.gridUnitSI() == 4.1671151661999998e-08); REQUIRE(B.unitDimension() == ud); - REQUIRE(B.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(B.timeOffset() == static_cast(0.0f)); REQUIRE(B.size() == 3); REQUIRE(B.count("x") == 1); REQUIRE(B.count("y") == 1); REQUIRE(B.count("z") == 1); - std::vector< float > p{static_cast< float >(0.0f), - static_cast< float >(0.5f), - static_cast< float >(0.5f)}; + std::vector p{ + static_cast(0.0f), + static_cast(0.5f), + static_cast(0.5f)}; Extent e{192, 512, 192}; - MeshRecordComponent& B_x = B["x"]; + MeshRecordComponent &B_x = B["x"]; REQUIRE(B_x.unitSI() == 40903.82224060171); - REQUIRE(B_x.position< float >() == p); + REQUIRE(B_x.position() == p); REQUIRE(B_x.getDatatype() == Datatype::FLOAT); REQUIRE(B_x.getExtent() == e); REQUIRE(B_x.getDimensionality() == 3); - p = {static_cast< float >(0.5f), - static_cast< float >(0.0f), - static_cast< float >(0.5f)}; - MeshRecordComponent& B_y = B["y"]; + p = { + static_cast(0.5f), + static_cast(0.0f), + static_cast(0.5f)}; + MeshRecordComponent &B_y = B["y"]; REQUIRE(B_y.unitSI() == 40903.82224060171); - REQUIRE(B_y.position< float >() == p); + REQUIRE(B_y.position() == p); REQUIRE(B_y.getDatatype() == Datatype::FLOAT); REQUIRE(B_y.getExtent() == e); REQUIRE(B_y.getDimensionality() == 3); - p = {static_cast< float >(0.5f), - static_cast< float >(0.5f), - static_cast< float >(0.0f)}; - MeshRecordComponent& B_z = B["z"]; + p = { + static_cast(0.5f), + static_cast(0.5f), + static_cast(0.0f)}; + MeshRecordComponent &B_z = B["z"]; REQUIRE(B_z.unitSI() == 40903.82224060171); - REQUIRE(B_z.position< float >() == p); + REQUIRE(B_z.position() == p); REQUIRE(B_z.getDatatype() == Datatype::FLOAT); REQUIRE(B_z.getExtent() == e); REQUIRE(B_z.getDimensionality() == 3); - ud = {{1., 1., -3., -1., 0., 0., 0.}}; - Mesh& E = i.meshes["E"]; + ud = {{1., 1., -3., -1., 0., 0., 0.}}; + Mesh &E = i.meshes["E"]; REQUIRE(E.geometry() == Mesh::Geometry::cartesian); REQUIRE(E.dataOrder() == Mesh::DataOrder::C); REQUIRE(E.axisLabels() == al); - REQUIRE(E.gridSpacing< float >() == gs); + REQUIRE(E.gridSpacing() == gs); REQUIRE(E.gridGlobalOffset() == ggo); REQUIRE(E.gridUnitSI() == 4.1671151661999998e-08); REQUIRE(E.unitDimension() == ud); - REQUIRE(E.timeOffset< float >() == static_cast< float >(0.0f)); + REQUIRE(E.timeOffset() == static_cast(0.0f)); REQUIRE(E.size() == 3); REQUIRE(E.count("x") == 1); REQUIRE(E.count("y") == 1); REQUIRE(E.count("z") == 1); - p = {static_cast< float >(0.5f), - static_cast< float >(0.0f), - static_cast< float >(0.0f)}; + p = { + static_cast(0.5f), + static_cast(0.0f), + static_cast(0.0f)}; e = {192, 512, 192}; - MeshRecordComponent& E_x = E["x"]; + MeshRecordComponent &E_x = E["x"]; REQUIRE(E_x.unitSI() == 12262657411105.05); - REQUIRE(E_x.position< float >() == p); + REQUIRE(E_x.position() == p); REQUIRE(E_x.getDatatype() == Datatype::FLOAT); REQUIRE(E_x.getExtent() == e); REQUIRE(E_x.getDimensionality() == 3); - p = {static_cast< float >(0.0f), - static_cast< float >(0.5f), - static_cast< float >(0.0f)}; - MeshRecordComponent& E_y = E["y"]; + p = { + static_cast(0.0f), + static_cast(0.5f), + static_cast(0.0f)}; + MeshRecordComponent &E_y = E["y"]; REQUIRE(E_y.unitSI() == 12262657411105.05); - REQUIRE(E_y.position< float >() == p); + REQUIRE(E_y.position() == p); REQUIRE(E_y.getDatatype() == Datatype::FLOAT); REQUIRE(E_y.getExtent() == e); REQUIRE(E_y.getDimensionality() == 3); - p = {static_cast< float >(0.0f), - static_cast< float >(0.0f), - static_cast< float >(0.5f)}; - MeshRecordComponent& E_z = E["z"]; + p = { + static_cast(0.0f), + static_cast(0.0f), + static_cast(0.5f)}; + MeshRecordComponent &E_z = E["z"]; REQUIRE(E_z.unitSI() == 12262657411105.05); - REQUIRE(E_z.position< float >() == p); + REQUIRE(E_z.position() == p); REQUIRE(E_z.getDatatype() == Datatype::FLOAT); REQUIRE(E_z.getExtent() == e); REQUIRE(E_z.getDimensionality() == 3); REQUIRE(i.particles.empty()); - float actual[3][3][3] = {{{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, - {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, - {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, - {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, - {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, - {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, - {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, - {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, - {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}}; + float actual[3][3][3] = { + {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, + {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, + {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, + {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, + {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, + {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}, + {{6.7173387e-06f, 6.7173387e-06f, 6.7173387e-06f}, + {7.0438218e-06f, 7.0438218e-06f, 7.0438218e-06f}, + {7.3689453e-06f, 7.3689453e-06f, 7.3689453e-06f}}}; Offset offset{20, 20, 150}; Extent extent{3, 3, 3}; auto data = B_z.loadChunk(offset, extent); o.flush(); - float* raw_ptr = data.get(); + float *raw_ptr = data.get(); - for( int a = 0; a < 3; ++a ) - for( int b = 0; b < 3; ++b ) - for( int c = 0; c < 3; ++c ) - REQUIRE(raw_ptr[((a*3) + b)*3 + c] == actual[a][b][c]); - } catch (no_such_file_error& e) + for (int a = 0; a < 3; ++a) + for (int b = 0; b < 3; ++b) + for (int c = 0; c < 3; ++c) + REQUIRE(raw_ptr[((a * 3) + b) * 3 + c] == actual[a][b][c]); + } + catch (no_such_file_error &e) { std::cerr << "HZDR sample not accessible. (" << e.what() << ")\n"; return; @@ -3333,14 +3813,14 @@ TEST_CASE( "hzdr_adios1_sample_content_test", "[serial][adios1]" ) } #else -TEST_CASE( "no_serial_adios1", "[serial][adios]") +TEST_CASE("no_serial_adios1", "[serial][adios]") { REQUIRE(true); } #endif #if openPMD_HAVE_ADIOS1 -TEST_CASE( "serial_adios1_json_config", "[serial][adios1]" ) +TEST_CASE("serial_adios1_json_config", "[serial][adios1]") { std::string globalConfig = R"END( { @@ -3366,68 +3846,68 @@ TEST_CASE( "serial_adios1_json_config", "[serial][adios1]" ) } })END"; - auto test1 = [ & ]() { + auto test1 = [&]() { Series write( "../samples/adios1_dataset_transform.bp", Access::CREATE, - globalConfig ); - auto meshes = write.writeIterations()[ 0 ].meshes; + globalConfig); + auto meshes = write.writeIterations()[0].meshes; auto defaultConfiguredMesh = - meshes[ "defaultConfigured" ][ RecordComponent::SCALAR ]; + meshes["defaultConfigured"][RecordComponent::SCALAR]; - Dataset ds{ Datatype::INT, { 10 } }; + Dataset ds{Datatype::INT, {10}}; - defaultConfiguredMesh.resetDataset( ds ); + defaultConfiguredMesh.resetDataset(ds); - std::vector< int > data( 10, 2345 ); - defaultConfiguredMesh.storeChunk( data, { 0 }, { 10 } ); + std::vector data(10, 2345); + defaultConfiguredMesh.storeChunk(data, {0}, {10}); write.flush(); }; REQUIRE_THROWS_WITH( test1(), - Catch::Equals( "[ADIOS1] Internal error: Failed to set ADIOS transform " - "during Dataset creation" ) ); + Catch::Equals("[ADIOS1] Internal error: Failed to set ADIOS transform " + "during Dataset creation")); - auto test2 = [ & ]() { + auto test2 = [&]() { Series write( "../samples/adios1_dataset_transform.bp", Access::CREATE, - globalConfig ); - auto meshes = write.writeIterations()[ 0 ].meshes; + globalConfig); + auto meshes = write.writeIterations()[0].meshes; auto overridenTransformMesh = - meshes[ "overridenConfig" ][ RecordComponent::SCALAR ]; + meshes["overridenConfig"][RecordComponent::SCALAR]; - Dataset ds{ Datatype::INT, { 10 } }; + Dataset ds{Datatype::INT, {10}}; ds.options = localConfig; - overridenTransformMesh.resetDataset( ds ); + overridenTransformMesh.resetDataset(ds); - std::vector< int > data( 10, 2345 ); - overridenTransformMesh.storeChunk( data, { 0 }, { 10 } ); + std::vector data(10, 2345); + overridenTransformMesh.storeChunk(data, {0}, {10}); write.flush(); }; REQUIRE_THROWS_WITH( test2(), - Catch::Equals( "[ADIOS1] Internal error: Failed to set ADIOS transform " - "during Dataset creation" ) ); + Catch::Equals("[ADIOS1] Internal error: Failed to set ADIOS transform " + "during Dataset creation")); - auto test3 = [ & ]() { + auto test3 = [&]() { // use no dataset transform at all Series write( "../samples/adios1_dataset_transform.bp", Access::CREATE, - globalConfigWithoutCompression ); - auto meshes = write.writeIterations()[ 0 ].meshes; + globalConfigWithoutCompression); + auto meshes = write.writeIterations()[0].meshes; auto defaultConfiguredMesh = - meshes[ "defaultConfigured" ][ RecordComponent::SCALAR ]; + meshes["defaultConfigured"][RecordComponent::SCALAR]; - Dataset ds{ Datatype::INT, { 10 } }; - defaultConfiguredMesh.resetDataset( ds ); + Dataset ds{Datatype::INT, {10}}; + defaultConfiguredMesh.resetDataset(ds); - std::vector< int > data( 10, 2345 ); - defaultConfiguredMesh.storeChunk( data, { 0 }, { 10 } ); + std::vector data(10, 2345); + defaultConfiguredMesh.storeChunk(data, {0}, {10}); write.flush(); }; @@ -3436,18 +3916,18 @@ TEST_CASE( "serial_adios1_json_config", "[serial][adios1]" ) #endif #if openPMD_HAVE_ADIOS2 -TEST_CASE( "git_adios2_early_chunk_query", "[serial][adios2]" ) +TEST_CASE("git_adios2_early_chunk_query", "[serial][adios2]") { git_early_chunk_query( "../samples/git-sample/3d-bp4/example-3d-bp4_%T.bp", "e", 600, - R"({"backend": "adios2"})" ); + R"({"backend": "adios2"})"); } -TEST_CASE( "serial_adios2_backend_config", "[serial][adios2]" ) +TEST_CASE("serial_adios2_backend_config", "[serial][adios2]") { - if( auxiliary::getEnvString( "OPENPMD_BP_BACKEND", "NOT_SET" ) == "ADIOS1" ) + if (auxiliary::getEnvString("OPENPMD_BP_BACKEND", "NOT_SET") == "ADIOS1") { // run this test for ADIOS2 only return; @@ -3550,41 +4030,40 @@ doshuffle = "BLOSC_BITSHUFFLE" [hdf5] this = "should not warn" )END"; - auto const write = [ &datasetConfig ]( - std::string const & filename, - std::string const & config ) { + auto const write = [&datasetConfig]( + std::string const &filename, + std::string const &config) { std::fstream file; file.open( "../samples/write_config.toml", - std::ios_base::out | std::ios::binary ); + std::ios_base::out | std::ios::binary); file << config; file.flush(); openPMD::Series series( filename, openPMD::Access::CREATE, - "@../samples/write_config.toml " ); - auto E_x = series.iterations[ 0 ].meshes[ "E" ][ "x" ]; - openPMD::Dataset ds( openPMD::Datatype::INT, { 1000 } ); - E_x.resetDataset( ds ); - std::vector< int > data( 1000, 0 ); - E_x.storeChunk( data, { 0 }, { 1000 } ); - - auto E_y = series.iterations[ 0 ].meshes[ "E" ][ "y" ]; + "@../samples/write_config.toml "); + auto E_x = series.iterations[0].meshes["E"]["x"]; + openPMD::Dataset ds(openPMD::Datatype::INT, {1000}); + E_x.resetDataset(ds); + std::vector data(1000, 0); + E_x.storeChunk(data, {0}, {1000}); + + auto E_y = series.iterations[0].meshes["E"]["y"]; // let's override the global compression settings ds.options = datasetConfig; - E_y.resetDataset( ds ); - E_y.storeChunk( data, { 0 }, { 1000 } ); + E_y.resetDataset(ds); + E_y.storeChunk(data, {0}, {1000}); series.flush(); }; - write( "../samples/jsonConfiguredBP4.bp", writeConfigBP4 ); - write( "../samples/jsonConfiguredBP3.bp", writeConfigBP3 ); - write( "../samples/jsonConfiguredNull.bp", writeConfigNull ); + write("../samples/jsonConfiguredBP4.bp", writeConfigBP4); + write("../samples/jsonConfiguredBP3.bp", writeConfigBP3); + write("../samples/jsonConfiguredNull.bp", writeConfigNull); // BP3 engine writes files, BP4 writes directories - REQUIRE( - openPMD::auxiliary::file_exists( "../samples/jsonConfiguredBP3.bp" ) ); - REQUIRE( openPMD::auxiliary::directory_exists( - "../samples/jsonConfiguredBP4.bp" ) ); + REQUIRE(openPMD::auxiliary::file_exists("../samples/jsonConfiguredBP3.bp")); + REQUIRE(openPMD::auxiliary::directory_exists( + "../samples/jsonConfiguredBP4.bp")); std::string readConfigBP3 = R"END( { @@ -3626,92 +4105,94 @@ this = "should not warn" } } )END"; - auto const read = []( std::string const & filename, - std::string const & config ) { + auto const read = [](std::string const &filename, + std::string const &config) { // let's write the config to a file and read it from there std::fstream file; - file.open( "../samples/read_config.json", std::ios_base::out ); + file.open("../samples/read_config.json", std::ios_base::out); file << config; file.flush(); openPMD::Series series( - filename, openPMD::Access::READ_ONLY, - " @ ../samples/read_config.json " ); - auto E_x = series.iterations[ 0 ].meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 1 ); - REQUIRE( E_x.getExtent()[ 0 ] == 1000 ); - auto chunk = E_x.loadChunk< int >( { 0 }, { 1000 } ); + filename, + openPMD::Access::READ_ONLY, + " @ ../samples/read_config.json "); + auto E_x = series.iterations[0].meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 1); + REQUIRE(E_x.getExtent()[0] == 1000); + auto chunk = E_x.loadChunk({0}, {1000}); series.flush(); - for( size_t i = 0; i < 1000; ++i ) + for (size_t i = 0; i < 1000; ++i) { - REQUIRE( chunk.get()[ i ] == 0 ); + REQUIRE(chunk.get()[i] == 0); } - auto E_y = series.iterations[ 0 ].meshes[ "E" ][ "x" ]; - REQUIRE( E_y.getDimensionality() == 1 ); - REQUIRE( E_y.getExtent()[ 0 ] == 1000 ); - chunk = E_y.loadChunk< int >( { 0 }, { 1000 } ); + auto E_y = series.iterations[0].meshes["E"]["x"]; + REQUIRE(E_y.getDimensionality() == 1); + REQUIRE(E_y.getExtent()[0] == 1000); + chunk = E_y.loadChunk({0}, {1000}); series.flush(); - for( size_t i = 0; i < 1000; ++i ) + for (size_t i = 0; i < 1000; ++i) { - REQUIRE( chunk.get()[ i ] == 0 ); + REQUIRE(chunk.get()[i] == 0); } }; - read( "../samples/jsonConfiguredBP3.bp", readConfigBP3 ); - read( "../samples/jsonConfiguredBP4.bp", readConfigBP4 ); + read("../samples/jsonConfiguredBP3.bp", readConfigBP3); + read("../samples/jsonConfiguredBP4.bp", readConfigBP4); } -void -bp4_steps( std::string const & file, std::string const & options_write, std::string const & options_read ) +void bp4_steps( + std::string const &file, + std::string const &options_write, + std::string const &options_read) { { - Series writeSeries( file, Access::CREATE, options_write ); + Series writeSeries(file, Access::CREATE, options_write); auto iterations = writeSeries.writeIterations(); - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - auto iteration = iterations[ i ]; - auto E = iteration.meshes[ "E" ]; - auto E_x = E[ "x" ]; + auto iteration = iterations[i]; + auto E = iteration.meshes["E"]; + auto E_x = E["x"]; E.setAttribute( "vector_of_string", - std::vector< std::string >{ "vector", "of", "string" } ); - E_x.resetDataset( - openPMD::Dataset( openPMD::Datatype::INT, { 10 } ) ); - std::vector< int > data( 10, i ); - E_x.storeChunk( data, { 0 }, { 10 } ); + std::vector{"vector", "of", "string"}); + E_x.resetDataset(openPMD::Dataset(openPMD::Datatype::INT, {10})); + std::vector data(10, i); + E_x.storeChunk(data, {0}, {10}); iteration.close(); } } - if( options_read.empty() ) + if (options_read.empty()) { return; } - Series readSeries( file, Access::READ_ONLY, options_read ); + Series readSeries(file, Access::READ_ONLY, options_read); size_t last_iteration_index = 0; - for( auto iteration : readSeries.readIterations() ) + for (auto iteration : readSeries.readIterations()) { - auto E = iteration.meshes[ "E" ]; - auto E_x = E[ "x" ]; + auto E = iteration.meshes["E"]; + auto E_x = E["x"]; REQUIRE( - E.getAttribute( "vector_of_string" ) - .get< std::vector< std::string > >() == - std::vector< std::string >{ "vector", "of", "string" } ); - REQUIRE( E_x.getDimensionality() == 1 ); - REQUIRE( E_x.getExtent()[ 0 ] == 10 ); - auto chunk = E_x.loadChunk< int >( { 0 }, { 10 } ); + E.getAttribute("vector_of_string") + .get>() == + std::vector{"vector", "of", "string"}); + REQUIRE(E_x.getDimensionality() == 1); + REQUIRE(E_x.getExtent()[0] == 10); + auto chunk = E_x.loadChunk({0}, {10}); iteration.close(); // @todo replace with ::close() - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - REQUIRE( chunk.get()[ i ] == int(iteration.iterationIndex) ); + REQUIRE(chunk.get()[i] == int(iteration.iterationIndex)); } last_iteration_index = iteration.iterationIndex; } - REQUIRE( last_iteration_index == 9 ); + REQUIRE(last_iteration_index == 9); } -TEST_CASE( "bp4_steps", "[serial][adios2]" ) +TEST_CASE("bp4_steps", "[serial][adios2]") { std::string useSteps = R"( { @@ -3741,12 +4222,12 @@ TEST_CASE( "bp4_steps", "[serial][adios2]" ) UseSteps = false )"; // sing the yes no song - bp4_steps( "../samples/bp4steps_yes_yes.bp", useSteps, useSteps ); - bp4_steps( "../samples/bp4steps_no_yes.bp", dontUseSteps, useSteps ); - bp4_steps( "../samples/bp4steps_yes_no.bp", useSteps, dontUseSteps ); - bp4_steps( "../samples/bp4steps_no_no.bp", dontUseSteps, dontUseSteps ); - bp4_steps( "../samples/nullcore.bp", nullcore, "" ); - bp4_steps( "../samples/bp4steps_default.bp", "{}", "{}" ); + bp4_steps("../samples/bp4steps_yes_yes.bp", useSteps, useSteps); + bp4_steps("../samples/bp4steps_no_yes.bp", dontUseSteps, useSteps); + bp4_steps("../samples/bp4steps_yes_no.bp", useSteps, dontUseSteps); + bp4_steps("../samples/bp4steps_no_no.bp", dontUseSteps, dontUseSteps); + bp4_steps("../samples/nullcore.bp", nullcore, ""); + bp4_steps("../samples/bp4steps_default.bp", "{}", "{}"); /* * Do this whole thing once more, but this time use the new attribute @@ -3775,115 +4256,112 @@ TEST_CASE( "bp4_steps", "[serial][adios2]" ) } )"; // sing the yes no song - bp4_steps( "../samples/newlayout_bp4steps_yes_yes.bp", useSteps, useSteps ); + bp4_steps("../samples/newlayout_bp4steps_yes_yes.bp", useSteps, useSteps); bp4_steps( - "../samples/newlayout_bp4steps_yes_no.bp", useSteps, dontUseSteps ); + "../samples/newlayout_bp4steps_yes_no.bp", useSteps, dontUseSteps); bp4_steps( - "../samples/newlayout_bp4steps_no_yes.bp", dontUseSteps, useSteps ); + "../samples/newlayout_bp4steps_no_yes.bp", dontUseSteps, useSteps); bp4_steps( - "../samples/newlayout_bp4steps_no_no.bp", dontUseSteps, dontUseSteps ); + "../samples/newlayout_bp4steps_no_no.bp", dontUseSteps, dontUseSteps); } #endif -void -serial_iterator( std::string const & file ) +void serial_iterator(std::string const &file) { constexpr Extent::value_type extent = 1000; { - Series writeSeries( file, Access::CREATE ); + Series writeSeries(file, Access::CREATE); auto iterations = writeSeries.writeIterations(); - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - auto iteration = iterations[ i ]; - auto E_x = iteration.meshes[ "E" ][ "x" ]; - E_x.resetDataset( - openPMD::Dataset( openPMD::Datatype::INT, { 1000 } ) ); - std::vector< int > data( 1000, i ); - E_x.storeChunk( data, { 0 }, { 1000 } ); + auto iteration = iterations[i]; + auto E_x = iteration.meshes["E"]["x"]; + E_x.resetDataset(openPMD::Dataset(openPMD::Datatype::INT, {1000})); + std::vector data(1000, i); + E_x.storeChunk(data, {0}, {1000}); iteration.close(); } } - Series readSeries( file, Access::READ_ONLY ); + Series readSeries(file, Access::READ_ONLY); size_t last_iteration_index = 0; - for( auto iteration : readSeries.readIterations() ) + for (auto iteration : readSeries.readIterations()) { - auto E_x = iteration.meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 1 ); - REQUIRE( E_x.getExtent()[ 0 ] == extent ); - auto chunk = E_x.loadChunk< int >( { 0 }, { extent } ); + auto E_x = iteration.meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 1); + REQUIRE(E_x.getExtent()[0] == extent); + auto chunk = E_x.loadChunk({0}, {extent}); iteration.close(); - for( size_t i = 0; i < extent; ++i ) + for (size_t i = 0; i < extent; ++i) { - REQUIRE( chunk.get()[ i ] == int(iteration.iterationIndex) ); + REQUIRE(chunk.get()[i] == int(iteration.iterationIndex)); } last_iteration_index = iteration.iterationIndex; } - REQUIRE( last_iteration_index == 9 ); + REQUIRE(last_iteration_index == 9); } -TEST_CASE( "serial_iterator", "[serial][adios2]" ) +TEST_CASE("serial_iterator", "[serial][adios2]") { - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - serial_iterator( "../samples/serial_iterator_filebased_%T." + t ); - serial_iterator( "../samples/serial_iterator_groupbased." + t ); + serial_iterator("../samples/serial_iterator_filebased_%T." + t); + serial_iterator("../samples/serial_iterator_groupbased." + t); } } -void -variableBasedSingleIteration( std::string const & file ) +void variableBasedSingleIteration(std::string const &file) { constexpr Extent::value_type extent = 1000; { Series writeSeries( file, Access::CREATE, - R"({"iteration_encoding": "variable_based"})" ); + R"({"iteration_encoding": "variable_based"})"); REQUIRE( writeSeries.iterationEncoding() == - IterationEncoding::variableBased ); + IterationEncoding::variableBased); auto iterations = writeSeries.writeIterations(); - auto iteration = writeSeries.iterations[ 0 ]; - auto E_x = iteration.meshes[ "E" ][ "x" ]; - E_x.resetDataset( - openPMD::Dataset( openPMD::Datatype::INT, { 1000 } ) ); - std::vector< int > data( 1000, 0 ); - std::iota( data.begin(), data.end(), 0 ); - E_x.storeChunk( data, { 0 }, { 1000 } ); + auto iteration = writeSeries.iterations[0]; + auto E_x = iteration.meshes["E"]["x"]; + E_x.resetDataset(openPMD::Dataset(openPMD::Datatype::INT, {1000})); + std::vector data(1000, 0); + std::iota(data.begin(), data.end(), 0); + E_x.storeChunk(data, {0}, {1000}); writeSeries.flush(); } { - Series readSeries( file, Access::READ_ONLY ); + Series readSeries(file, Access::READ_ONLY); - auto E_x = readSeries.iterations[ 0 ].meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 1 ); - REQUIRE( E_x.getExtent()[ 0 ] == extent ); - auto chunk = E_x.loadChunk< int >( { 0 }, { extent } ); + auto E_x = readSeries.iterations[0].meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 1); + REQUIRE(E_x.getExtent()[0] == extent); + auto chunk = E_x.loadChunk({0}, {extent}); readSeries.flush(); - for( size_t i = 0; i < extent; ++i ) + for (size_t i = 0; i < extent; ++i) { - REQUIRE( chunk.get()[ i ] == int( i ) ); + REQUIRE(chunk.get()[i] == int(i)); } } } -TEST_CASE( "variableBasedSingleIteration", "[serial][adios2]" ) +TEST_CASE("variableBasedSingleIteration", "[serial][adios2]") { - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - variableBasedSingleIteration( "../samples/variableBasedSingleIteration." + t ); + variableBasedSingleIteration( + "../samples/variableBasedSingleIteration." + t); } } namespace epsilon { -template< typename T > +template struct AreEqual { - static bool areEqual( T float1, T float2 ) + static bool areEqual(T float1, T float2) { #if 0 printf( @@ -3894,232 +4372,229 @@ struct AreEqual std::abs( float1 - float2 ) <= std::numeric_limits< T >::epsilon() ); #endif - return std::abs( float1 - float2 ) <= - std::numeric_limits< T >::epsilon(); + return std::abs(float1 - float2) <= std::numeric_limits::epsilon(); } }; -template< typename T > -struct AreEqual< std::vector< T > > +template +struct AreEqual> { - static bool areEqual( std::vector< T > v1, std::vector< T > v2 ) + static bool areEqual(std::vector v1, std::vector v2) { return v1.size() == v2.size() && - std::equal( - v1.begin(), v1.end(), v2.begin(), AreEqual< T >::areEqual ); + std::equal(v1.begin(), v1.end(), v2.begin(), AreEqual::areEqual); } }; -template< typename T > -bool areEqual( T a, T b ) +template +bool areEqual(T a, T b) { - return AreEqual< T >::areEqual( std::move( a ), std::move( b ) ); -} + return AreEqual::areEqual(std::move(a), std::move(b)); } +} // namespace epsilon #if openPMD_HAVE_ADIOS2 -TEST_CASE( "git_adios2_sample_test", "[serial][adios2]" ) +TEST_CASE("git_adios2_sample_test", "[serial][adios2]") { using namespace epsilon; - using vecstring = std::vector< std::string >; - using vecdouble = std::vector< double >; - using arr7 = std::array< double, 7 >; + using vecstring = std::vector; + using vecdouble = std::vector; + using arr7 = std::array; std::string const samplePath = "../samples/git-sample/3d-bp4/example-3d-bp4.bp"; - if( !auxiliary::directory_exists( samplePath ) ) + if (!auxiliary::directory_exists(samplePath)) { - std::cerr << "git sample '" - << samplePath << "' not accessible \n"; + std::cerr << "git sample '" << samplePath << "' not accessible \n"; return; } - Series o( samplePath, Access::READ_ONLY, R"({"backend": "adios2"})" ); - REQUIRE( o.openPMD() == "1.1.0" ); - REQUIRE( o.openPMDextension() == 0 ); - REQUIRE( o.basePath() == "/data/%T/" ); - REQUIRE( o.meshesPath() == "fields/" ); - REQUIRE( o.particlesPath() == "particles/" ); - REQUIRE( o.iterationEncoding() == IterationEncoding::groupBased ); - REQUIRE( o.iterationFormat() == "/data/%T/" ); - REQUIRE( o.name() == "example-3d-bp4" ); + Series o(samplePath, Access::READ_ONLY, R"({"backend": "adios2"})"); + REQUIRE(o.openPMD() == "1.1.0"); + REQUIRE(o.openPMDextension() == 0); + REQUIRE(o.basePath() == "/data/%T/"); + REQUIRE(o.meshesPath() == "fields/"); + REQUIRE(o.particlesPath() == "particles/"); + REQUIRE(o.iterationEncoding() == IterationEncoding::groupBased); + REQUIRE(o.iterationFormat() == "/data/%T/"); + REQUIRE(o.name() == "example-3d-bp4"); - REQUIRE( o.iterations.size() == 1 ); - REQUIRE( o.iterations.count( 550 ) == 1 ); + REQUIRE(o.iterations.size() == 1); + REQUIRE(o.iterations.count(550) == 1); - Iteration it = o.iterations[ 550 ]; + Iteration it = o.iterations[550]; - REQUIRE( areEqual( it.time< double >(), 5.5e+02 ) ); - REQUIRE( areEqual( it.timeUnitSI(), 1.39e-16 ) ); + REQUIRE(areEqual(it.time(), 5.5e+02)); + REQUIRE(areEqual(it.timeUnitSI(), 1.39e-16)); REQUIRE( - it.getAttribute( "particleBoundary" ).get< vecstring >() == - vecstring(6, "absorbing" )); + it.getAttribute("particleBoundary").get() == + vecstring(6, "absorbing")); REQUIRE( - it.getAttribute( "particleBoundaryParameters" ).get< vecstring >() == - vecstring(6, "without field correction" )); - REQUIRE( areEqual( - it.getAttribute( "mue0" ).get< float >(), 2.1550322708208114e-04f ) ); - REQUIRE( areEqual( - it.getAttribute( "eps0" ).get< float >(), 4.6403017578125000e+03f ) ); - REQUIRE( areEqual( it.dt< double >(), 1. ) ); - - REQUIRE( it.meshes.size() == 9 ); - REQUIRE( it.meshes.count( "E" ) == 1 ); - REQUIRE( it.meshes.count( "B" ) == 1 ); - REQUIRE( it.meshes.count( "e_all_chargeDensity" ) == 1 ); - REQUIRE( it.meshes.count( "e_all_energyDensity" ) == 1 ); - REQUIRE( it.meshes.count( "e_all_particleMomentumComponent" ) == 1 ); - REQUIRE( it.meshes.count( "i_all_chargeDensity" ) == 1 ); - REQUIRE( it.meshes.count( "i_all_energyDensity" ) == 1 ); - REQUIRE( it.meshes.count( "i_all_particleMomentumComponent" ) == 1 ); + it.getAttribute("particleBoundaryParameters").get() == + vecstring(6, "without field correction")); + REQUIRE(areEqual( + it.getAttribute("mue0").get(), 2.1550322708208114e-04f)); + REQUIRE(areEqual( + it.getAttribute("eps0").get(), 4.6403017578125000e+03f)); + REQUIRE(areEqual(it.dt(), 1.)); + + REQUIRE(it.meshes.size() == 9); + REQUIRE(it.meshes.count("E") == 1); + REQUIRE(it.meshes.count("B") == 1); + REQUIRE(it.meshes.count("e_all_chargeDensity") == 1); + REQUIRE(it.meshes.count("e_all_energyDensity") == 1); + REQUIRE(it.meshes.count("e_all_particleMomentumComponent") == 1); + REQUIRE(it.meshes.count("i_all_chargeDensity") == 1); + REQUIRE(it.meshes.count("i_all_energyDensity") == 1); + REQUIRE(it.meshes.count("i_all_particleMomentumComponent") == 1); // internal PIConGPU restarting information: - REQUIRE( it.meshes.count( "picongpu_idProvider" ) == 1 ); - - Mesh E = it.meshes[ "E" ]; - REQUIRE( E.geometry() == Mesh::Geometry::cartesian ); - REQUIRE( E.dataOrder() == Mesh::DataOrder::C ); - REQUIRE( E.axisLabels() == vecstring{ "z", "y", "x" } ); - REQUIRE( areEqual( - E.gridSpacing< double >(), + REQUIRE(it.meshes.count("picongpu_idProvider") == 1); + + Mesh E = it.meshes["E"]; + REQUIRE(E.geometry() == Mesh::Geometry::cartesian); + REQUIRE(E.dataOrder() == Mesh::DataOrder::C); + REQUIRE(E.axisLabels() == vecstring{"z", "y", "x"}); + REQUIRE(areEqual( + E.gridSpacing(), vecdouble{ 1.7416797876358032e+00, 1.7416797876358032e+00, - 1.7416797876358032e+00 } ) ); - REQUIRE( areEqual( E.gridGlobalOffset(), vecdouble{ 0., 0., 0. } ) ); - REQUIRE( areEqual( E.gridUnitSI(), 5.3662849982000001e-08 ) ); - REQUIRE( E.unitDimension() == arr7{ { 1, 1, -3, -1, 0, 0, 0 } } ); - REQUIRE( areEqual( E.timeOffset< double >(), 0. ) ); - - REQUIRE( E.size() == 3 ); - REQUIRE( E.count( "x" ) == 1 ); - REQUIRE( E.count( "y" ) == 1 ); - REQUIRE( E.count( "z" ) == 1 ); - - MeshRecordComponent E_x = E[ "x" ]; - REQUIRE( E_x.unitSI() == 9.5223987717519668e+12 ); - REQUIRE( E_x.position< double >() == vecdouble{ 0.5, 0., 0. } ); - REQUIRE( E_x.getDatatype() == Datatype::FLOAT ); - REQUIRE( E_x.getExtent() == Extent{ 32, 96, 64 } ); - REQUIRE( E_x.getDimensionality() == 3 ); - - float E_x_data[] = { -5.4223355837166309e-03, -5.5848993360996246e-03, - -5.7896804064512253e-03, -5.5147800594568253e-03, - -5.6304289028048515e-03, -5.8255749754607677e-03, - -5.5910930968821049e-03, -5.7385643012821674e-03, - -5.8903801254928112e-03, -5.3768581710755825e-03, - -5.5543538182973862e-03, -5.7734064757823944e-03, - -5.4399720393121243e-03, -5.5731507018208504e-03, - -5.7369144633412361e-03, -5.5461097508668900e-03, - -5.6645260192453861e-03, -5.8231339789927006e-03, - -5.4240114986896515e-03, -5.5798939429223537e-03, - -5.7610240764915943e-03, -5.4240110330283642e-03, - -5.5275037884712219e-03, -5.7047260925173759e-03, - -5.5050505325198174e-03, -5.6199040263891220e-03, - -5.7577718980610371e-03 }; - auto E_x_loaded = E_x.loadChunk< float >( { 16, 32, 32 }, { 3, 3, 3 } ); + 1.7416797876358032e+00})); + REQUIRE(areEqual(E.gridGlobalOffset(), vecdouble{0., 0., 0.})); + REQUIRE(areEqual(E.gridUnitSI(), 5.3662849982000001e-08)); + REQUIRE(E.unitDimension() == arr7{{1, 1, -3, -1, 0, 0, 0}}); + REQUIRE(areEqual(E.timeOffset(), 0.)); + + REQUIRE(E.size() == 3); + REQUIRE(E.count("x") == 1); + REQUIRE(E.count("y") == 1); + REQUIRE(E.count("z") == 1); + + MeshRecordComponent E_x = E["x"]; + REQUIRE(E_x.unitSI() == 9.5223987717519668e+12); + REQUIRE(E_x.position() == vecdouble{0.5, 0., 0.}); + REQUIRE(E_x.getDatatype() == Datatype::FLOAT); + REQUIRE(E_x.getExtent() == Extent{32, 96, 64}); + REQUIRE(E_x.getDimensionality() == 3); + + float E_x_data[] = {-5.4223355837166309e-03, -5.5848993360996246e-03, + -5.7896804064512253e-03, -5.5147800594568253e-03, + -5.6304289028048515e-03, -5.8255749754607677e-03, + -5.5910930968821049e-03, -5.7385643012821674e-03, + -5.8903801254928112e-03, -5.3768581710755825e-03, + -5.5543538182973862e-03, -5.7734064757823944e-03, + -5.4399720393121243e-03, -5.5731507018208504e-03, + -5.7369144633412361e-03, -5.5461097508668900e-03, + -5.6645260192453861e-03, -5.8231339789927006e-03, + -5.4240114986896515e-03, -5.5798939429223537e-03, + -5.7610240764915943e-03, -5.4240110330283642e-03, + -5.5275037884712219e-03, -5.7047260925173759e-03, + -5.5050505325198174e-03, -5.6199040263891220e-03, + -5.7577718980610371e-03}; + auto E_x_loaded = E_x.loadChunk({16, 32, 32}, {3, 3, 3}); E_x.seriesFlush(); - for( size_t i = 0; i < 27; ++i ) - { - REQUIRE( areEqual( E_x_data[ i ], E_x_loaded.get()[ i ] ) ); - } - - MeshRecordComponent E_y = E[ "y" ]; - REQUIRE( E_y.unitSI() == 9.5223987717519668e+12 ); - REQUIRE( E_y.position< double >() == vecdouble{ 0., 0.5, 0. } ); - REQUIRE( E_y.getDatatype() == Datatype::FLOAT ); - REQUIRE( E_y.getExtent() == Extent{ 32, 96, 64 } ); - REQUIRE( E_y.getDimensionality() == 3 ); - float E_y_data[] = { 1.9600236555561423e-04, 1.9210868049412966e-04, - 1.1112097854493186e-04, 9.0100722445640713e-05, - 1.2735779455397278e-04, 1.2597699242178351e-04, - -4.5422813855111599e-05, 2.8805377951357514e-05, - 8.3214777987450361e-05, 1.3271786156110466e-04, - 1.0011527047026902e-04, 5.8875859394902363e-05, - 2.5147232008748688e-05, 7.1912618295755237e-05, - 6.2157545471563935e-05, -8.6973857833072543e-05, - -8.1858233897946775e-06, -2.2509128029923886e-05, - 6.0511985793709755e-05, 4.9726430006558076e-05, - -1.7196462067659013e-05, -3.0460794732789509e-05, - 5.9892886383750010e-06, -1.4382616200236953e-06, - -1.3747414050158113e-04, -8.0163808888755739e-05, - -3.5486038541421294e-05 }; - auto E_y_loaded = E_y.loadChunk< float >( { 16, 32, 32 }, { 3, 3, 3 } ); + for (size_t i = 0; i < 27; ++i) + { + REQUIRE(areEqual(E_x_data[i], E_x_loaded.get()[i])); + } + + MeshRecordComponent E_y = E["y"]; + REQUIRE(E_y.unitSI() == 9.5223987717519668e+12); + REQUIRE(E_y.position() == vecdouble{0., 0.5, 0.}); + REQUIRE(E_y.getDatatype() == Datatype::FLOAT); + REQUIRE(E_y.getExtent() == Extent{32, 96, 64}); + REQUIRE(E_y.getDimensionality() == 3); + float E_y_data[] = {1.9600236555561423e-04, 1.9210868049412966e-04, + 1.1112097854493186e-04, 9.0100722445640713e-05, + 1.2735779455397278e-04, 1.2597699242178351e-04, + -4.5422813855111599e-05, 2.8805377951357514e-05, + 8.3214777987450361e-05, 1.3271786156110466e-04, + 1.0011527047026902e-04, 5.8875859394902363e-05, + 2.5147232008748688e-05, 7.1912618295755237e-05, + 6.2157545471563935e-05, -8.6973857833072543e-05, + -8.1858233897946775e-06, -2.2509128029923886e-05, + 6.0511985793709755e-05, 4.9726430006558076e-05, + -1.7196462067659013e-05, -3.0460794732789509e-05, + 5.9892886383750010e-06, -1.4382616200236953e-06, + -1.3747414050158113e-04, -8.0163808888755739e-05, + -3.5486038541421294e-05}; + auto E_y_loaded = E_y.loadChunk({16, 32, 32}, {3, 3, 3}); E_y.seriesFlush(); - for( size_t i = 0; i < 27; ++i ) - { - REQUIRE( areEqual( E_y_data[ i ], E_y_loaded.get()[ i ] ) ); - } - - MeshRecordComponent E_z = E[ "z" ]; - REQUIRE( E_z.unitSI() == 9.5223987717519668e+12 ); - REQUIRE( E_z.position< double >() == vecdouble{ 0., 0., 0.5 } ); - REQUIRE( E_z.getDatatype() == Datatype::FLOAT ); - REQUIRE( E_z.getExtent() == Extent{ 32, 96, 64 } ); - REQUIRE( E_z.getDimensionality() == 3 ); - float E_z_data[] = { -1.3665637234225869e-03, -1.3941071229055524e-03, - -1.4618652639910579e-03, -1.4528072206303477e-03, - -1.4355779858306050e-03, -1.4925430295988917e-03, - -1.6604729462414980e-03, -1.5911811497062445e-03, - -1.6420837491750717e-03, -1.1975304223597050e-03, - -1.2183464132249355e-03, -1.3470118865370750e-03, - -1.2645993847399950e-03, -1.2775690993294120e-03, - -1.3621025718748569e-03, -1.4198675053194165e-03, - -1.3927087420597672e-03, -1.3995743356645107e-03, - -9.9509279243648052e-04, -1.0950352298095822e-03, - -1.2131386902183294e-03, -1.0829739039763808e-03, - -1.1384176323190331e-03, -1.2189601548016071e-03, - -1.2028686469420791e-03, -1.1917919619008899e-03, - -1.2309787562116981e-03 }; - auto E_z_loaded = E_z.loadChunk< float >( { 16, 32, 32 }, { 3, 3, 3 } ); + for (size_t i = 0; i < 27; ++i) + { + REQUIRE(areEqual(E_y_data[i], E_y_loaded.get()[i])); + } + + MeshRecordComponent E_z = E["z"]; + REQUIRE(E_z.unitSI() == 9.5223987717519668e+12); + REQUIRE(E_z.position() == vecdouble{0., 0., 0.5}); + REQUIRE(E_z.getDatatype() == Datatype::FLOAT); + REQUIRE(E_z.getExtent() == Extent{32, 96, 64}); + REQUIRE(E_z.getDimensionality() == 3); + float E_z_data[] = {-1.3665637234225869e-03, -1.3941071229055524e-03, + -1.4618652639910579e-03, -1.4528072206303477e-03, + -1.4355779858306050e-03, -1.4925430295988917e-03, + -1.6604729462414980e-03, -1.5911811497062445e-03, + -1.6420837491750717e-03, -1.1975304223597050e-03, + -1.2183464132249355e-03, -1.3470118865370750e-03, + -1.2645993847399950e-03, -1.2775690993294120e-03, + -1.3621025718748569e-03, -1.4198675053194165e-03, + -1.3927087420597672e-03, -1.3995743356645107e-03, + -9.9509279243648052e-04, -1.0950352298095822e-03, + -1.2131386902183294e-03, -1.0829739039763808e-03, + -1.1384176323190331e-03, -1.2189601548016071e-03, + -1.2028686469420791e-03, -1.1917919619008899e-03, + -1.2309787562116981e-03}; + auto E_z_loaded = E_z.loadChunk({16, 32, 32}, {3, 3, 3}); E_z.seriesFlush(); - for( size_t i = 0; i < 27; ++i ) + for (size_t i = 0; i < 27; ++i) { - REQUIRE( areEqual( E_z_data[ i ], E_z_loaded.get()[ i ] ) ); + REQUIRE(areEqual(E_z_data[i], E_z_loaded.get()[i])); } - REQUIRE( it.particles.size() == 2 ); + REQUIRE(it.particles.size() == 2); - REQUIRE( it.particles.count( "e" ) == 1 ); - REQUIRE( it.particles.count( "i" ) == 1 ); + REQUIRE(it.particles.count("e") == 1); + REQUIRE(it.particles.count("i") == 1); - ParticleSpecies electrons = it.particles[ "e" ]; + ParticleSpecies electrons = it.particles["e"]; - REQUIRE( electrons.size() == 6 ); - REQUIRE( electrons.count( "charge" ) == 1 ); - REQUIRE( electrons.count( "mass" ) == 1 ); - REQUIRE( electrons.count( "momentum" ) == 1 ); - REQUIRE( electrons.count( "position" ) == 1 ); - REQUIRE( electrons.count( "positionOffset" ) == 1 ); - REQUIRE( electrons.count( "weighting" ) == 1 ); + REQUIRE(electrons.size() == 6); + REQUIRE(electrons.count("charge") == 1); + REQUIRE(electrons.count("mass") == 1); + REQUIRE(electrons.count("momentum") == 1); + REQUIRE(electrons.count("position") == 1); + REQUIRE(electrons.count("positionOffset") == 1); + REQUIRE(electrons.count("weighting") == 1); - Record charge = electrons[ "charge" ]; - REQUIRE( charge.unitDimension() == arr7{ { 0., 0., 1., 1., 0., 0., 0. } } ); - REQUIRE( charge.timeOffset< double >() == 0.0 ); + Record charge = electrons["charge"]; + REQUIRE(charge.unitDimension() == arr7{{0., 0., 1., 1., 0., 0., 0.}}); + REQUIRE(charge.timeOffset() == 0.0); - REQUIRE( charge.size() == 1 ); - REQUIRE( charge.count( RecordComponent::SCALAR ) == 1 ); + REQUIRE(charge.size() == 1); + REQUIRE(charge.count(RecordComponent::SCALAR) == 1); - RecordComponent & charge_scalar = charge[ RecordComponent::SCALAR ]; - REQUIRE( areEqual( charge_scalar.unitSI(), 5.2323446053125002e-17 ) ); - REQUIRE( charge_scalar.getDatatype() == Datatype::DOUBLE ); - REQUIRE( charge_scalar.getDimensionality() == 1 ); - REQUIRE( charge_scalar.getExtent() == Extent{ 96781 } ); + RecordComponent &charge_scalar = charge[RecordComponent::SCALAR]; + REQUIRE(areEqual(charge_scalar.unitSI(), 5.2323446053125002e-17)); + REQUIRE(charge_scalar.getDatatype() == Datatype::DOUBLE); + REQUIRE(charge_scalar.getDimensionality() == 1); + REQUIRE(charge_scalar.getExtent() == Extent{96781}); double const charge_value = -3.0620612669736147e-3; - REQUIRE( charge_scalar.getAttribute("value").get< double >() == charge_value ); + REQUIRE(charge_scalar.getAttribute("value").get() == charge_value); - Record & mass = electrons[ "mass" ]; - REQUIRE( mass.unitDimension() == arr7{ { 0., 1., 0., 0., 0., 0., 0. } } ); - REQUIRE( mass.timeOffset< double >() == 0.0 ); + Record &mass = electrons["mass"]; + REQUIRE(mass.unitDimension() == arr7{{0., 1., 0., 0., 0., 0., 0.}}); + REQUIRE(mass.timeOffset() == 0.0); - REQUIRE( mass.size() == 1 ); - REQUIRE( mass.count( RecordComponent::SCALAR ) == 1 ); + REQUIRE(mass.size() == 1); + REQUIRE(mass.count(RecordComponent::SCALAR) == 1); - RecordComponent & mass_scalar = mass[ RecordComponent::SCALAR ]; - REQUIRE( areEqual( mass_scalar.unitSI(), 2.9749182215581054e-28 ) ); - REQUIRE( mass_scalar.getDatatype() == Datatype::DOUBLE ); - REQUIRE( mass_scalar.getDimensionality() == 1 ); - REQUIRE( mass_scalar.getExtent() == Extent{ 96781 } ); + RecordComponent &mass_scalar = mass[RecordComponent::SCALAR]; + REQUIRE(areEqual(mass_scalar.unitSI(), 2.9749182215581054e-28)); + REQUIRE(mass_scalar.getDatatype() == Datatype::DOUBLE); + REQUIRE(mass_scalar.getDimensionality() == 1); + REQUIRE(mass_scalar.getExtent() == Extent{96781}); double const mass_value = 3.0620612669736147e-3; - REQUIRE( mass_scalar.getAttribute("value").get< double >() == mass_value ); + REQUIRE(mass_scalar.getAttribute("value").get() == mass_value); float position_x_data[] = { 5.4244494438171387e-01, @@ -4130,20 +4605,17 @@ TEST_CASE( "git_adios2_sample_test", "[serial][adios2]" ) 3.0412188172340393e-01, 5.9818041324615479e-01, 8.8785779476165771e-01, - 4.2273962497711182e-01 }; + 4.2273962497711182e-01}; auto position_x_loaded = - electrons[ "position" ][ "x" ].loadChunk< float >( { 32 }, { 9 } ); - auto charge_loaded = - charge_scalar.loadChunk< double >( { 32 }, { 9 } ); - auto mass_loaded = - mass_scalar.loadChunk< double >( { 32 }, { 9 } ); + electrons["position"]["x"].loadChunk({32}, {9}); + auto charge_loaded = charge_scalar.loadChunk({32}, {9}); + auto mass_loaded = mass_scalar.loadChunk({32}, {9}); electrons.seriesFlush(); - for( size_t i = 0; i < 9; ++i ) + for (size_t i = 0; i < 9; ++i) { - REQUIRE( - areEqual( position_x_data[ i ], position_x_loaded.get()[ i ] ) ); - REQUIRE( areEqual( charge_value, charge_loaded.get()[ i ] ) ); - REQUIRE( areEqual( mass_value, mass_loaded.get()[ i ] ) ); + REQUIRE(areEqual(position_x_data[i], position_x_loaded.get()[i])); + REQUIRE(areEqual(charge_value, charge_loaded.get()[i])); + REQUIRE(areEqual(mass_value, mass_loaded.get()[i])); } float position_y_data[] = { @@ -4155,14 +4627,13 @@ TEST_CASE( "git_adios2_sample_test", "[serial][adios2]" ) 2.6738378405570984e-01, 8.2502347230911255e-01, 9.2121642827987671e-01, - 9.0402549505233765e-01 }; + 9.0402549505233765e-01}; auto position_y_loaded = - electrons[ "position" ][ "y" ].loadChunk< float >( { 32 }, { 9 } ); + electrons["position"]["y"].loadChunk({32}, {9}); electrons.seriesFlush(); - for( size_t i = 0; i < 9; ++i ) + for (size_t i = 0; i < 9; ++i) { - REQUIRE( - areEqual( position_y_data[ i ], position_y_loaded.get()[ i ] ) ); + REQUIRE(areEqual(position_y_data[i], position_y_loaded.get()[i])); } float position_z_data[] = { @@ -4174,126 +4645,124 @@ TEST_CASE( "git_adios2_sample_test", "[serial][adios2]" ) 7.4185878038406372e-01, 4.5986607670783997e-01, 2.2350004315376282e-01, - 5.4723143577575684e-01 }; + 5.4723143577575684e-01}; auto position_z_loaded = - electrons[ "position" ][ "z" ].loadChunk< float >( { 32 }, { 9 } ); + electrons["position"]["z"].loadChunk({32}, {9}); electrons.seriesFlush(); - for( size_t i = 0; i < 9; ++i ) + for (size_t i = 0; i < 9; ++i) { - REQUIRE( - areEqual( position_z_data[ i ], position_z_loaded.get()[ i ] ) ); + REQUIRE(areEqual(position_z_data[i], position_z_loaded.get()[i])); } } -void variableBasedSeries( std::string const & file ) +void variableBasedSeries(std::string const &file) { std::string selectADIOS2 = R"({"backend": "adios2"})"; constexpr Extent::value_type extent = 1000; { - Series writeSeries( file, Access::CREATE, selectADIOS2 ); - writeSeries.setIterationEncoding( IterationEncoding::variableBased ); + Series writeSeries(file, Access::CREATE, selectADIOS2); + writeSeries.setIterationEncoding(IterationEncoding::variableBased); REQUIRE( - writeSeries.iterationEncoding() == IterationEncoding::variableBased ); + writeSeries.iterationEncoding() == + IterationEncoding::variableBased); auto iterations = writeSeries.writeIterations(); - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - auto iteration = iterations[ i ]; - auto E_x = iteration.meshes[ "E" ][ "x" ]; - E_x.resetDataset( { openPMD::Datatype::INT, { 1000 } } ); - std::vector< int > data( 1000, i ); - E_x.storeChunk( data, { 0 }, { 1000 } ); + auto iteration = iterations[i]; + auto E_x = iteration.meshes["E"]["x"]; + E_x.resetDataset({openPMD::Datatype::INT, {1000}}); + std::vector data(1000, i); + E_x.storeChunk(data, {0}, {1000}); // this tests changing extents and dimensionalities // across iterations - auto E_y = iteration.meshes[ "E" ][ "y" ]; + auto E_y = iteration.meshes["E"]["y"]; unsigned dimensionality = i % 3 + 1; unsigned len = i + 1; - Extent changingExtent( dimensionality, len ); - E_y.resetDataset( { openPMD::Datatype::INT, changingExtent } ); - std::vector< int > changingData( - std::pow( len, dimensionality ), dimensionality ); + Extent changingExtent(dimensionality, len); + E_y.resetDataset({openPMD::Datatype::INT, changingExtent}); + std::vector changingData( + std::pow(len, dimensionality), dimensionality); E_y.storeChunk( - changingData, Offset( dimensionality, 0 ), changingExtent ); + changingData, Offset(dimensionality, 0), changingExtent); // this tests datasets that are present in one iteration, but not // in others - auto E_z = iteration.meshes[ "E" ][ std::to_string( i ) ]; - E_z.resetDataset( { Datatype::INT, { 1 } } ); - E_z.makeConstant( i ); + auto E_z = iteration.meshes["E"][std::to_string(i)]; + E_z.resetDataset({Datatype::INT, {1}}); + E_z.makeConstant(i); // this tests attributes that are present in one iteration, but not // in others - iteration.meshes[ "E" ].setAttribute( - "attr_" + std::to_string( i ), i ); + iteration.meshes["E"].setAttribute("attr_" + std::to_string(i), i); iteration.close(); } } - REQUIRE( auxiliary::directory_exists( file ) ); + REQUIRE(auxiliary::directory_exists(file)); - auto testRead = [ &file, &extent, &selectADIOS2 ]( - std::string const & jsonConfig ) { + auto testRead = [&file, &extent, &selectADIOS2]( + std::string const &jsonConfig) { Series readSeries( - file, Access::READ_ONLY, json::merge( selectADIOS2, jsonConfig ) ); + file, Access::READ_ONLY, json::merge(selectADIOS2, jsonConfig)); size_t last_iteration_index = 0; - for( auto iteration : readSeries.readIterations() ) + for (auto iteration : readSeries.readIterations()) { - auto E_x = iteration.meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 1 ); - REQUIRE( E_x.getExtent()[ 0 ] == extent ); - auto chunk = E_x.loadChunk< int >( { 0 }, { extent } ); + auto E_x = iteration.meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 1); + REQUIRE(E_x.getExtent()[0] == extent); + auto chunk = E_x.loadChunk({0}, {extent}); iteration.close(); - for( size_t i = 0; i < extent; ++i ) + for (size_t i = 0; i < extent; ++i) { - REQUIRE( chunk.get()[ i ] == int( iteration.iterationIndex ) ); + REQUIRE(chunk.get()[i] == int(iteration.iterationIndex)); } - auto E_y = iteration.meshes[ "E" ][ "y" ]; + auto E_y = iteration.meshes["E"]["y"]; unsigned dimensionality = iteration.iterationIndex % 3 + 1; unsigned len = iteration.iterationIndex + 1; - Extent changingExtent( dimensionality, len ); - REQUIRE( E_y.getExtent() == changingExtent ); + Extent changingExtent(dimensionality, len); + REQUIRE(E_y.getExtent() == changingExtent); // this loop ensures that only the recordcomponent ["E"]["i"] is // present where i == iteration.iterationIndex - for( uint64_t otherIteration = 0; otherIteration < 10; - ++otherIteration ) + for (uint64_t otherIteration = 0; otherIteration < 10; + ++otherIteration) { // component is present <=> (otherIteration == i) REQUIRE( - iteration.meshes[ "E" ].contains( - std::to_string( otherIteration ) ) == - ( otherIteration == iteration.iterationIndex ) ); + iteration.meshes["E"].contains( + std::to_string(otherIteration)) == + (otherIteration == iteration.iterationIndex)); REQUIRE( - iteration.meshes[ "E" ].containsAttribute( - "attr_" + std::to_string( otherIteration ) ) == - ( otherIteration == iteration.iterationIndex ) ); + iteration.meshes["E"].containsAttribute( + "attr_" + std::to_string(otherIteration)) == + (otherIteration == iteration.iterationIndex)); } REQUIRE( - iteration - .meshes[ "E" ][ std::to_string( iteration.iterationIndex ) ] - .getAttribute( "value" ) - .get< int >() == int( iteration.iterationIndex ) ); + iteration.meshes["E"][std::to_string(iteration.iterationIndex)] + .getAttribute("value") + .get() == int(iteration.iterationIndex)); REQUIRE( - iteration.meshes[ "E" ] + iteration.meshes["E"] .getAttribute( - "attr_" + std::to_string( iteration.iterationIndex ) ) - .get< int >() == int( iteration.iterationIndex ) ); + "attr_" + std::to_string(iteration.iterationIndex)) + .get() == int(iteration.iterationIndex)); last_iteration_index = iteration.iterationIndex; } - REQUIRE( last_iteration_index == 9 ); + REQUIRE(last_iteration_index == 9); }; - testRead( "{\"defer_iteration_parsing\": true}" ); - testRead( "{\"defer_iteration_parsing\": false}" ); + testRead("{\"defer_iteration_parsing\": true}"); + testRead("{\"defer_iteration_parsing\": false}"); } #if openPMD_HAVE_ADIOS2 -TEST_CASE( "variableBasedSeries", "[serial][adios2]" ) +TEST_CASE("variableBasedSeries", "[serial][adios2]") { - variableBasedSeries( "../samples/variableBasedSeries.bp" ); + variableBasedSeries("../samples/variableBasedSeries.bp"); } #endif @@ -4305,29 +4774,28 @@ void variableBasedParticleData() { // open file for writing Series series = - Series( "../samples/variableBasedParticles.bp", Access::CREATE ); - series.setIterationEncoding( IterationEncoding::variableBased ); + Series("../samples/variableBasedParticles.bp", Access::CREATE); + series.setIterationEncoding(IterationEncoding::variableBased); - Datatype datatype = determineDatatype< position_t >(); - Extent global_extent = { length }; - Dataset dataset = Dataset( datatype, global_extent ); - std::shared_ptr< position_t > local_data( - new position_t[ length ], - []( position_t const * ptr ) { delete[] ptr; } ); + Datatype datatype = determineDatatype(); + Extent global_extent = {length}; + Dataset dataset = Dataset(datatype, global_extent); + std::shared_ptr local_data( + new position_t[length], + [](position_t const *ptr) { delete[] ptr; }); WriteIterations iterations = series.writeIterations(); - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - Iteration iteration = iterations[ i ]; - Record electronPositions = iteration.particles[ "e" ][ "position" ]; + Iteration iteration = iterations[i]; + Record electronPositions = iteration.particles["e"]["position"]; - std::iota( - local_data.get(), local_data.get() + length, i * length ); - for( auto const & dim : { "x", "y", "z" } ) + std::iota(local_data.get(), local_data.get() + length, i * length); + for (auto const &dim : {"x", "y", "z"}) { - RecordComponent pos = electronPositions[ dim ]; - pos.resetDataset( dataset ); - pos.storeChunk( local_data, Offset{ 0 }, global_extent ); + RecordComponent pos = electronPositions[dim]; + pos.resetDataset(dataset); + pos.storeChunk(local_data, Offset{0}, global_extent); } iteration.close(); } @@ -4336,43 +4804,43 @@ void variableBasedParticleData() { // open file for reading Series series = - Series( "../samples/variableBasedParticles.bp", Access::READ_ONLY ); + Series("../samples/variableBasedParticles.bp", Access::READ_ONLY); - for( IndexedIteration iteration : series.readIterations() ) + for (IndexedIteration iteration : series.readIterations()) { - Record electronPositions = iteration.particles[ "e" ][ "position" ]; - std::array< std::shared_ptr< position_t >, 3 > loadedChunks; - std::array< Extent, 3 > extents; - std::array< std::string, 3 > const dimensions{ { "x", "y", "z" } }; + Record electronPositions = iteration.particles["e"]["position"]; + std::array, 3> loadedChunks; + std::array extents; + std::array const dimensions{{"x", "y", "z"}}; - for( size_t i = 0; i < 3; ++i ) + for (size_t i = 0; i < 3; ++i) { - std::string dim = dimensions[ i ]; - RecordComponent rc = electronPositions[ dim ]; - loadedChunks[ i ] = rc.loadChunk< position_t >( - Offset( rc.getDimensionality(), 0 ), rc.getExtent() ); - extents[ i ] = rc.getExtent(); + std::string dim = dimensions[i]; + RecordComponent rc = electronPositions[dim]; + loadedChunks[i] = rc.loadChunk( + Offset(rc.getDimensionality(), 0), rc.getExtent()); + extents[i] = rc.getExtent(); } iteration.close(); - for( size_t i = 0; i < 3; ++i ) + for (size_t i = 0; i < 3; ++i) { - std::string dim = dimensions[ i ]; - Extent const & extent = extents[ i ]; - auto chunk = loadedChunks[ i ]; - for( size_t j = 0; j < extent[ 0 ]; ++j ) + std::string dim = dimensions[i]; + Extent const &extent = extents[i]; + auto chunk = loadedChunks[i]; + for (size_t j = 0; j < extent[0]; ++j) { REQUIRE( - chunk.get()[ j ] == - iteration.iterationIndex * length + j ); + chunk.get()[j] == + iteration.iterationIndex * length + j); } } } } } -TEST_CASE( "variableBasedParticleData", "[serial][adios2]" ) +TEST_CASE("variableBasedParticleData", "[serial][adios2]") { variableBasedParticleData(); } @@ -4380,17 +4848,17 @@ TEST_CASE( "variableBasedParticleData", "[serial][adios2]" ) #if openPMD_HAVE_ADIOS2 #ifdef ADIOS2_HAVE_BZIP2 -TEST_CASE( "automatically_deactivate_span", "[serial][adios2]" ) +TEST_CASE("automatically_deactivate_span", "[serial][adios2]") { // automatically (de)activate span-based storeChunking { - Series write( "../samples/span_based.bp", Access::CREATE ); - auto E_uncompressed = write.iterations[ 0 ].meshes[ "E" ][ "x" ]; - auto E_compressed = write.iterations[ 0 ].meshes[ "E" ][ "y" ]; + Series write("../samples/span_based.bp", Access::CREATE); + auto E_uncompressed = write.iterations[0].meshes["E"]["x"]; + auto E_compressed = write.iterations[0].meshes["E"]["y"]; - Dataset ds{ Datatype::INT, { 10 } }; + Dataset ds{Datatype::INT, {10}}; - E_uncompressed.resetDataset( ds ); + E_uncompressed.resetDataset(ds); std::string compression = R"END( { @@ -4406,26 +4874,25 @@ TEST_CASE( "automatically_deactivate_span", "[serial][adios2]" ) })END"; ds.options = compression; - E_compressed.resetDataset( ds ); + E_compressed.resetDataset(ds); bool spanWorkaround = false; - E_uncompressed.storeChunk< int >( - { 0 }, { 10 }, [ &spanWorkaround ]( size_t size ) { + E_uncompressed.storeChunk( + {0}, {10}, [&spanWorkaround](size_t size) { spanWorkaround = true; - return std::shared_ptr< int >( - new int[ size ]{}, []( auto * ptr ) { delete[] ptr; } ); - } ); + return std::shared_ptr( + new int[size]{}, [](auto *ptr) { delete[] ptr; }); + }); - REQUIRE( !spanWorkaround ); + REQUIRE(!spanWorkaround); - E_compressed.storeChunk< int >( - { 0 }, { 10 }, [ &spanWorkaround ]( size_t size ) { - spanWorkaround = true; - return std::shared_ptr< int >( - new int[ size ]{}, []( auto * ptr ) { delete[] ptr; } ); - } ); + E_compressed.storeChunk({0}, {10}, [&spanWorkaround](size_t size) { + spanWorkaround = true; + return std::shared_ptr( + new int[size]{}, [](auto *ptr) { delete[] ptr; }); + }); - REQUIRE( spanWorkaround ); + REQUIRE(spanWorkaround); } // enable span-based API indiscriminately @@ -4436,13 +4903,13 @@ TEST_CASE( "automatically_deactivate_span", "[serial][adios2]" ) "use_span_based_put": true } })END"; - Series write( "../samples/span_based.bp", Access::CREATE, enable ); - auto E_uncompressed = write.iterations[ 0 ].meshes[ "E" ][ "x" ]; - auto E_compressed = write.iterations[ 0 ].meshes[ "E" ][ "y" ]; + Series write("../samples/span_based.bp", Access::CREATE, enable); + auto E_uncompressed = write.iterations[0].meshes["E"]["x"]; + auto E_compressed = write.iterations[0].meshes["E"]["y"]; - Dataset ds{ Datatype::INT, { 10 } }; + Dataset ds{Datatype::INT, {10}}; - E_uncompressed.resetDataset( ds ); + E_uncompressed.resetDataset(ds); std::string compression = R"END( { @@ -4458,38 +4925,38 @@ TEST_CASE( "automatically_deactivate_span", "[serial][adios2]" ) })END"; ds.options = compression; - E_compressed.resetDataset( ds ); + E_compressed.resetDataset(ds); bool spanWorkaround = false; - E_uncompressed.storeChunk< int >( - { 0 }, { 10 }, [ &spanWorkaround ]( size_t size ) { + E_uncompressed.storeChunk( + {0}, {10}, [&spanWorkaround](size_t size) { spanWorkaround = true; - return std::shared_ptr< int >( - new int[ size ]{}, []( auto * ptr ) { delete[] ptr; } ); - } ); + return std::shared_ptr( + new int[size]{}, [](auto *ptr) { delete[] ptr; }); + }); - REQUIRE( !spanWorkaround ); + REQUIRE(!spanWorkaround); try { - E_compressed.storeChunk< int >( - { 0 }, { 10 }, [ &spanWorkaround ]( size_t size ) { + E_compressed.storeChunk( + {0}, {10}, [&spanWorkaround](size_t size) { spanWorkaround = true; - return std::shared_ptr< int >( - new int[ size ]{}, []( auto * ptr ) { delete[] ptr; } ); - } ); + return std::shared_ptr( + new int[size]{}, [](auto *ptr) { delete[] ptr; }); + }); } - catch( std::invalid_argument const & e ) + catch (std::invalid_argument const &e) { /* - * Using the span-based API in combination with compression is - * unsupported in ADIOS2. - * In newer versions of ADIOS2, an error is thrown. - */ + * Using the span-based API in combination with compression is + * unsupported in ADIOS2. + * In newer versions of ADIOS2, an error is thrown. + */ std::cerr << "Ignoring expected error: " << e.what() << std::endl; } - REQUIRE( !spanWorkaround ); + REQUIRE(!spanWorkaround); } // disable span-based API indiscriminately @@ -4500,13 +4967,13 @@ TEST_CASE( "automatically_deactivate_span", "[serial][adios2]" ) "use_span_based_put": false } })END"; - Series write( "../samples/span_based.bp", Access::CREATE, disable ); - auto E_uncompressed = write.iterations[ 0 ].meshes[ "E" ][ "x" ]; - auto E_compressed = write.iterations[ 0 ].meshes[ "E" ][ "y" ]; + Series write("../samples/span_based.bp", Access::CREATE, disable); + auto E_uncompressed = write.iterations[0].meshes["E"]["x"]; + auto E_compressed = write.iterations[0].meshes["E"]["y"]; - Dataset ds{ Datatype::INT, { 10 } }; + Dataset ds{Datatype::INT, {10}}; - E_uncompressed.resetDataset( ds ); + E_uncompressed.resetDataset(ds); std::string compression = R"END( { @@ -4522,27 +4989,26 @@ TEST_CASE( "automatically_deactivate_span", "[serial][adios2]" ) })END"; ds.options = compression; - E_compressed.resetDataset( ds ); + E_compressed.resetDataset(ds); bool spanWorkaround = false; - E_uncompressed.storeChunk< int >( - { 0 }, { 10 }, [ &spanWorkaround ]( size_t size ) { + E_uncompressed.storeChunk( + {0}, {10}, [&spanWorkaround](size_t size) { spanWorkaround = true; - return std::shared_ptr< int >( - new int[ size ]{}, []( auto * ptr ) { delete[] ptr; } ); - } ); + return std::shared_ptr( + new int[size]{}, [](auto *ptr) { delete[] ptr; }); + }); - REQUIRE( spanWorkaround ); + REQUIRE(spanWorkaround); spanWorkaround = false; - E_compressed.storeChunk< int >( - { 0 }, { 10 }, [ &spanWorkaround ]( size_t size ) { - spanWorkaround = true; - return std::shared_ptr< int >( - new int[ size ]{}, []( auto * ptr ) { delete[] ptr; } ); - } ); + E_compressed.storeChunk({0}, {10}, [&spanWorkaround](size_t size) { + spanWorkaround = true; + return std::shared_ptr( + new int[size]{}, [](auto *ptr) { delete[] ptr; }); + }); - REQUIRE( spanWorkaround ); + REQUIRE(spanWorkaround); } } #endif @@ -4550,57 +5016,55 @@ TEST_CASE( "automatically_deactivate_span", "[serial][adios2]" ) // @todo Upon switching to ADIOS2 2.7.0, test this the other way around also void iterate_nonstreaming_series( - std::string const & file, bool variableBasedLayout, std::string jsonConfig ) + std::string const &file, bool variableBasedLayout, std::string jsonConfig) { constexpr size_t extent = 100; { - Series writeSeries( file, Access::CREATE, jsonConfig ); - if( variableBasedLayout ) + Series writeSeries(file, Access::CREATE, jsonConfig); + if (variableBasedLayout) { - writeSeries.setIterationEncoding( - IterationEncoding::variableBased ); + writeSeries.setIterationEncoding(IterationEncoding::variableBased); } // use conventional API to write iterations auto iterations = writeSeries.iterations; - for( size_t i = 0; i < 10; ++i ) + for (size_t i = 0; i < 10; ++i) { - auto iteration = iterations[ i ]; - auto E_x = iteration.meshes[ "E" ][ "x" ]; + auto iteration = iterations[i]; + auto E_x = iteration.meshes["E"]["x"]; E_x.resetDataset( - openPMD::Dataset( openPMD::Datatype::INT, { 2, extent } ) ); - std::vector< int > data( extent, i ); - E_x.storeChunk( data, { 0, 0 }, { 1, extent } ); + openPMD::Dataset(openPMD::Datatype::INT, {2, extent})); + std::vector data(extent, i); + E_x.storeChunk(data, {0, 0}, {1, extent}); bool taskSupportedByBackend = true; - DynamicMemoryView< int > memoryView; + DynamicMemoryView memoryView; { auto currentBuffer = memoryView.currentBuffer(); - REQUIRE( currentBuffer.data() == nullptr ); - REQUIRE( currentBuffer.size() == 0 ); + REQUIRE(currentBuffer.data() == nullptr); + REQUIRE(currentBuffer.size() == 0); } - memoryView = E_x.storeChunk< int >( - { 1, 0 }, - { 1, extent }, + memoryView = E_x.storeChunk( + {1, 0}, + {1, extent}, /* * Hijack the functor that is called for buffer creation. * This allows us to check if the backend has explicit support * for buffer creation or if the fallback implementation is * used. */ - [ &taskSupportedByBackend ]( size_t size ) - { + [&taskSupportedByBackend](size_t size) { taskSupportedByBackend = false; - return std::shared_ptr< int >{ - new int[ size ], []( auto * ptr ) { delete[] ptr; } }; - } ); - if( writeSeries.backend() == "ADIOS2" ) + return std::shared_ptr{ + new int[size], [](auto *ptr) { delete[] ptr; }}; + }); + if (writeSeries.backend() == "ADIOS2") { // that backend must support span creation - REQUIRE( taskSupportedByBackend ); + REQUIRE(taskSupportedByBackend); } auto span = memoryView.currentBuffer(); - for( size_t j = 0; j < span.size(); ++j ) + for (size_t j = 0; j < span.size(); ++j) { - span[ j ] = j; + span[j] = j; } /* @@ -4609,17 +5073,17 @@ void iterate_nonstreaming_series( */ auto scalarMesh = iteration - .meshes[ "i_energyDensity" ][ MeshRecordComponent::SCALAR ]; - scalarMesh.resetDataset( Dataset( Datatype::INT, { 5 } ) ); + .meshes["i_energyDensity"][MeshRecordComponent::SCALAR]; + scalarMesh.resetDataset(Dataset(Datatype::INT, {5})); auto scalarSpan = - scalarMesh.storeChunk< int >( { 0 }, { 5 } ).currentBuffer(); - for( size_t j = 0; j < scalarSpan.size(); ++j ) + scalarMesh.storeChunk({0}, {5}).currentBuffer(); + for (size_t j = 0; j < scalarSpan.size(); ++j) { - scalarSpan[ j ] = j; + scalarSpan[j] = j; } // we encourage manually closing iterations, but it should not // matter so let's do the switcharoo for this test - if( i % 2 == 0 ) + if (i % 2 == 0) { writeSeries.flush(); } @@ -4633,22 +5097,22 @@ void iterate_nonstreaming_series( Series readSeries( file, Access::READ_ONLY, - json::merge( jsonConfig, R"({"defer_iteration_parsing": true})" ) ); + json::merge(jsonConfig, R"({"defer_iteration_parsing": true})")); size_t last_iteration_index = 0; // conventionally written Series must be readable with streaming-aware API! - for( auto iteration : readSeries.readIterations() ) + for (auto iteration : readSeries.readIterations()) { // ReadIterations takes care of Iteration::open()ing iterations - auto E_x = iteration.meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getDimensionality() == 2 ); - REQUIRE( E_x.getExtent()[ 0 ] == 2 ); - REQUIRE( E_x.getExtent()[ 1 ] == extent ); - auto chunk = E_x.loadChunk< int >( { 0, 0 }, { 1, extent } ); - auto chunk2 = E_x.loadChunk< int >( { 1, 0 }, { 1, extent } ); + auto E_x = iteration.meshes["E"]["x"]; + REQUIRE(E_x.getDimensionality() == 2); + REQUIRE(E_x.getExtent()[0] == 2); + REQUIRE(E_x.getExtent()[1] == extent); + auto chunk = E_x.loadChunk({0, 0}, {1, extent}); + auto chunk2 = E_x.loadChunk({1, 0}, {1, extent}); // we encourage manually closing iterations, but it should not matter // so let's do the switcharoo for this test - if( last_iteration_index % 2 == 0 ) + if (last_iteration_index % 2 == 0) { readSeries.flush(); } @@ -4657,209 +5121,209 @@ void iterate_nonstreaming_series( iteration.close(); } - for( size_t i = 0; i < extent; ++i ) + for (size_t i = 0; i < extent; ++i) { - REQUIRE( chunk.get()[ i ] == int(iteration.iterationIndex) ); - REQUIRE( chunk2.get()[ i ] == int(i) ); + REQUIRE(chunk.get()[i] == int(iteration.iterationIndex)); + REQUIRE(chunk2.get()[i] == int(i)); } last_iteration_index = iteration.iterationIndex; } - REQUIRE( last_iteration_index == 9 ); + REQUIRE(last_iteration_index == 9); } -TEST_CASE( "iterate_nonstreaming_series", "[serial][adios2]" ) +TEST_CASE("iterate_nonstreaming_series", "[serial][adios2]") { - for( auto const & backend : testedBackends() ) + for (auto const &backend : testedBackends()) { iterate_nonstreaming_series( "../samples/iterate_nonstreaming_series_filebased_%T." + backend.extension, false, - backend.jsonBaseConfig() ); + backend.jsonBaseConfig()); iterate_nonstreaming_series( "../samples/iterate_nonstreaming_series_groupbased." + backend.extension, false, - backend.jsonBaseConfig() ); + backend.jsonBaseConfig()); } #if openPMD_HAVE_ADIOS2 iterate_nonstreaming_series( "../samples/iterate_nonstreaming_series_variablebased.bp", true, - R"({"backend": "adios2"})" ); + R"({"backend": "adios2"})"); #endif } -void extendDataset( std::string const & ext, std::string const & jsonConfig ) +void extendDataset(std::string const &ext, std::string const &jsonConfig) { std::string filename = "../samples/extendDataset." + ext; - std::vector< int > data1( 25 ); - std::vector< int > data2( 25 ); - std::iota( data1.begin(), data1.end(), 0 ); - std::iota( data2.begin(), data2.end(), 25 ); + std::vector data1(25); + std::vector data2(25); + std::iota(data1.begin(), data1.end(), 0); + std::iota(data2.begin(), data2.end(), 25); { - Series write( filename, Access::CREATE, jsonConfig ); + Series write(filename, Access::CREATE, jsonConfig); // only one iteration written anyway - write.setIterationEncoding( IterationEncoding::variableBased ); + write.setIterationEncoding(IterationEncoding::variableBased); Dataset ds1{ Datatype::INT, - { 5, 5 }, - R"({ "resizable": true, "resizeble": "typo" })" }; - Dataset ds2{ Datatype::INT, { 10, 5 } }; + {5, 5}, + R"({ "resizable": true, "resizeble": "typo" })"}; + Dataset ds2{Datatype::INT, {10, 5}}; // array record component -> array record component // should work - auto E_x = write.iterations[ 0 ].meshes[ "E" ][ "x" ]; - E_x.resetDataset( ds1 ); - E_x.storeChunk( data1, { 0, 0 }, { 5, 5 } ); + auto E_x = write.iterations[0].meshes["E"]["x"]; + E_x.resetDataset(ds1); + E_x.storeChunk(data1, {0, 0}, {5, 5}); write.flush(); - E_x.resetDataset( ds2 ); - E_x.storeChunk( data2, { 5, 0 }, { 5, 5 } ); + E_x.resetDataset(ds2); + E_x.storeChunk(data2, {5, 0}, {5, 5}); // constant record component -> constant record component // should work - auto E_y = write.iterations[ 0 ].meshes[ "E" ][ "y" ]; - E_y.resetDataset( ds1 ); - E_y.makeConstant( 10 ); + auto E_y = write.iterations[0].meshes["E"]["y"]; + E_y.resetDataset(ds1); + E_y.makeConstant(10); write.flush(); - E_y.resetDataset( ds2 ); + E_y.resetDataset(ds2); write.flush(); // empty record component -> empty record component // should work // this does not make a lot of sense since we don't allow shrinking, // but let's just reset it to itself - auto E_z = write.iterations[ 0 ].meshes[ "E" ][ "z" ]; - E_z.makeEmpty< int >( 3 ); + auto E_z = write.iterations[0].meshes["E"]["z"]; + E_z.makeEmpty(3); write.flush(); - E_z.makeEmpty< int >( 3 ); + E_z.makeEmpty(3); write.flush(); // empty record component -> empty record component // (created by resetDataset) // should work - auto E_a = write.iterations[ 0 ].meshes[ "E" ][ "a" ]; - E_a.makeEmpty< int >( 3 ); + auto E_a = write.iterations[0].meshes["E"]["a"]; + E_a.makeEmpty(3); write.flush(); - E_a.resetDataset( Dataset( Datatype::UNDEFINED, { 0, 1, 2 } ) ); + E_a.resetDataset(Dataset(Datatype::UNDEFINED, {0, 1, 2})); write.flush(); // constant record component -> empty record component // should fail, since this implies shrinking - auto E_b = write.iterations[ 0 ].meshes[ "E" ][ "b" ]; - E_b.resetDataset( ds1 ); - E_b.makeConstant( 10 ); + auto E_b = write.iterations[0].meshes["E"]["b"]; + E_b.resetDataset(ds1); + E_b.makeConstant(10); write.flush(); - REQUIRE_THROWS( E_b.makeEmpty< int >( 2 ) ); + REQUIRE_THROWS(E_b.makeEmpty(2)); // empty record component -> constant record component // should work - auto E_c = write.iterations[ 0 ].meshes[ "E" ][ "c" ]; - E_c.makeEmpty< int >( 3 ); + auto E_c = write.iterations[0].meshes["E"]["c"]; + E_c.makeEmpty(3); write.flush(); - E_c.resetDataset( Dataset( { 1, 1, 2 } ) ); + E_c.resetDataset(Dataset({1, 1, 2})); write.flush(); // array record component -> constant record component // should fail - auto E_d = write.iterations[ 0 ].meshes[ "E" ][ "d" ]; - E_d.resetDataset( ds1 ); - E_d.storeChunk( data1, { 0, 0 }, { 5, 5 } ); + auto E_d = write.iterations[0].meshes["E"]["d"]; + E_d.resetDataset(ds1); + E_d.storeChunk(data1, {0, 0}, {5, 5}); write.flush(); - REQUIRE_THROWS( E_d.makeConstant( 5 ) ); + REQUIRE_THROWS(E_d.makeConstant(5)); // array record component -> empty record component // should fail - auto E_e = write.iterations[ 0 ].meshes[ "E" ][ "e" ]; - E_e.resetDataset( ds1 ); - E_e.storeChunk( data1, { 0, 0 }, { 5, 5 } ); + auto E_e = write.iterations[0].meshes["E"]["e"]; + E_e.resetDataset(ds1); + E_e.storeChunk(data1, {0, 0}, {5, 5}); write.flush(); - REQUIRE_THROWS( E_e.makeEmpty< int >( 5 ) ); + REQUIRE_THROWS(E_e.makeEmpty(5)); } { - Series read( filename, Access::READ_ONLY, jsonConfig ); - auto E_x = read.iterations[ 0 ].meshes[ "E" ][ "x" ]; - REQUIRE( E_x.getExtent() == Extent{ 10, 5 } ); - auto chunk = E_x.loadChunk< int >( { 0, 0 }, { 10, 5 } ); + Series read(filename, Access::READ_ONLY, jsonConfig); + auto E_x = read.iterations[0].meshes["E"]["x"]; + REQUIRE(E_x.getExtent() == Extent{10, 5}); + auto chunk = E_x.loadChunk({0, 0}, {10, 5}); read.flush(); - for( size_t i = 0; i < 50; ++i ) + for (size_t i = 0; i < 50; ++i) { - REQUIRE( chunk.get()[ i ] == int( i ) ); + REQUIRE(chunk.get()[i] == int(i)); } - auto E_y = read.iterations[ 0 ].meshes[ "E" ][ "y" ]; - REQUIRE( E_y.getExtent() == Extent{ 10, 5 } ); + auto E_y = read.iterations[0].meshes["E"]["y"]; + REQUIRE(E_y.getExtent() == Extent{10, 5}); - auto E_z = read.iterations[ 0 ].meshes[ "E" ][ "z" ]; - REQUIRE( E_z.getExtent() == Extent{ 0, 0, 0 } ); + auto E_z = read.iterations[0].meshes["E"]["z"]; + REQUIRE(E_z.getExtent() == Extent{0, 0, 0}); - auto E_a = read.iterations[ 0 ].meshes[ "E" ][ "a" ]; - REQUIRE( E_a.getExtent() == Extent{ 0, 1, 2 } ); + auto E_a = read.iterations[0].meshes["E"]["a"]; + REQUIRE(E_a.getExtent() == Extent{0, 1, 2}); // E_b could not be changed - auto E_c = read.iterations[ 0 ].meshes[ "E" ][ "c" ]; - REQUIRE( E_c.getExtent() == Extent{ 1, 1, 2 } ); - REQUIRE( !E_c.empty() ); + auto E_c = read.iterations[0].meshes["E"]["c"]; + REQUIRE(E_c.getExtent() == Extent{1, 1, 2}); + REQUIRE(!E_c.empty()); } } -TEST_CASE( "extend_dataset", "[serial]" ) +TEST_CASE("extend_dataset", "[serial]") { - extendDataset( "json", R"({"backend": "json"})" ); + extendDataset("json", R"({"backend": "json"})"); #if openPMD_HAVE_ADIOS2 - extendDataset( "bp", R"({"backend": "adios2"})" ); + extendDataset("bp", R"({"backend": "adios2"})"); #endif #if openPMD_HAVE_HDF5 // extensible datasets require chunking // skip this test for if chunking is disabled - if( auxiliary::getEnvString( "OPENPMD_HDF5_CHUNKS", "auto" ) != "none" ) + if (auxiliary::getEnvString("OPENPMD_HDF5_CHUNKS", "auto") != "none") { - extendDataset( "h5", R"({"backend": "hdf5"})" ); + extendDataset("h5", R"({"backend": "hdf5"})"); } #endif } -void deferred_parsing( std::string const & extension ) +void deferred_parsing(std::string const &extension) { - if( auxiliary::directory_exists( "../samples/lazy_parsing" ) ) - auxiliary::remove_directory( "../samples/lazy_parsing" ); + if (auxiliary::directory_exists("../samples/lazy_parsing")) + auxiliary::remove_directory("../samples/lazy_parsing"); std::string basename = "../samples/lazy_parsing/lazy_parsing_"; // create a single iteration { - Series series( basename + "%06T." + extension, Access::CREATE ); - std::vector< float > buffer( 20 ); - std::iota( buffer.begin(), buffer.end(), 0.f ); - auto dataset = series.iterations[ 1000 ].meshes[ "E" ][ "x" ]; - dataset.resetDataset( { Datatype::FLOAT, { 20 } } ); - dataset.storeChunk( buffer, { 0 }, { 20 } ); + Series series(basename + "%06T." + extension, Access::CREATE); + std::vector buffer(20); + std::iota(buffer.begin(), buffer.end(), 0.f); + auto dataset = series.iterations[1000].meshes["E"]["x"]; + dataset.resetDataset({Datatype::FLOAT, {20}}); + dataset.storeChunk(buffer, {0}, {20}); series.flush(); } // create some empty pseudo files // if the reader tries accessing them it's game over { - for( size_t i = 0; i < 1000; i += 100 ) + for (size_t i = 0; i < 1000; i += 100) { - std::string infix = std::to_string( i ); + std::string infix = std::to_string(i); std::string padding; - for( size_t j = 0; j < 6 - infix.size(); ++j ) + for (size_t j = 0; j < 6 - infix.size(); ++j) { padding += "0"; } infix = padding + infix; std::ofstream file; - file.open( basename + infix + "." + extension ); + file.open(basename + infix + "." + extension); file.close(); } } @@ -4867,76 +5331,72 @@ void deferred_parsing( std::string const & extension ) Series series( basename + "%06T." + extension, Access::READ_ONLY, - "{\"defer_iteration_parsing\": true}" ); - auto dataset = series.iterations[ 1000 ] - .open() - .meshes[ "E" ][ "x" ] - .loadChunk< float >( { 0 }, { 20 } ); + "{\"defer_iteration_parsing\": true}"); + auto dataset = + series.iterations[1000].open().meshes["E"]["x"].loadChunk( + {0}, {20}); series.flush(); - for( size_t i = 0; i < 20; ++i ) + for (size_t i = 0; i < 20; ++i) { REQUIRE( - std::abs( dataset.get()[ i ] - float( i ) ) <= - std::numeric_limits< float >::epsilon() ); + std::abs(dataset.get()[i] - float(i)) <= + std::numeric_limits::epsilon()); } } { Series series( basename + "%06T." + extension, Access::READ_WRITE, - "{\"defer_iteration_parsing\": true}" ); - auto dataset = series.iterations[ 1000 ] - .open() - .meshes[ "E" ][ "x" ] - .loadChunk< float >( { 0 }, { 20 } ); + "{\"defer_iteration_parsing\": true}"); + auto dataset = + series.iterations[1000].open().meshes["E"]["x"].loadChunk( + {0}, {20}); series.flush(); - for( size_t i = 0; i < 20; ++i ) + for (size_t i = 0; i < 20; ++i) { REQUIRE( - std::abs( dataset.get()[ i ] - float( i ) ) <= - std::numeric_limits< float >::epsilon() ); + std::abs(dataset.get()[i] - float(i)) <= + std::numeric_limits::epsilon()); } // create a new iteration - std::vector< float > buffer( 20 ); - std::iota( buffer.begin(), buffer.end(), 0.f ); - auto writeDataset = series.iterations[ 1001 ].meshes[ "E" ][ "x" ]; - writeDataset.resetDataset( { Datatype::FLOAT, { 20 } } ); - writeDataset.storeChunk( buffer, { 0 }, { 20 } ); + std::vector buffer(20); + std::iota(buffer.begin(), buffer.end(), 0.f); + auto writeDataset = series.iterations[1001].meshes["E"]["x"]; + writeDataset.resetDataset({Datatype::FLOAT, {20}}); + writeDataset.storeChunk(buffer, {0}, {20}); series.flush(); } { Series series( basename + "%06T." + extension, Access::READ_ONLY, - "{\"defer_iteration_parsing\": true}" ); - auto dataset = series.iterations[ 1001 ] - .open() - .meshes[ "E" ][ "x" ] - .loadChunk< float >( { 0 }, { 20 } ); + "{\"defer_iteration_parsing\": true}"); + auto dataset = + series.iterations[1001].open().meshes["E"]["x"].loadChunk( + {0}, {20}); series.flush(); - for( size_t i = 0; i < 20; ++i ) + for (size_t i = 0; i < 20; ++i) { REQUIRE( - std::abs( dataset.get()[ i ] - float( i ) ) <= - std::numeric_limits< float >::epsilon() ); + std::abs(dataset.get()[i] - float(i)) <= + std::numeric_limits::epsilon()); } } basename += "groupbased"; { - Series series( basename + "." + extension, Access::CREATE ); - std::vector< float > buffer( 20 ); - std::iota( buffer.begin(), buffer.end(), 0.f ); - for( unsigned i = 0; i < 10; ++i ) + Series series(basename + "." + extension, Access::CREATE); + std::vector buffer(20); + std::iota(buffer.begin(), buffer.end(), 0.f); + for (unsigned i = 0; i < 10; ++i) { - auto dataset = series.iterations[ i ].meshes[ "E" ][ "x" ]; - dataset.resetDataset( { Datatype::FLOAT, { 20 } } ); - dataset.storeChunk( buffer, { 0 }, { 20 } ); + auto dataset = series.iterations[i].meshes["E"]["x"]; + dataset.resetDataset({Datatype::FLOAT, {20}}); + dataset.storeChunk(buffer, {0}, {20}); } - series.iterations[ 9 ].setAttribute( - "time", "this is deliberately wrong" ); + series.iterations[9].setAttribute("time", "this is deliberately wrong"); series.flush(); } @@ -4944,19 +5404,19 @@ void deferred_parsing( std::string const & extension ) Series series( basename + "." + extension, Access::READ_ONLY, - "{\"defer_iteration_parsing\": true}" ); - for( auto iteration : series.readIterations() ) + "{\"defer_iteration_parsing\": true}"); + for (auto iteration : series.readIterations()) { - auto dataset = iteration.meshes[ "E" ][ "x" ].loadChunk< float >( - { 0 }, { 20 } ); + auto dataset = + iteration.meshes["E"]["x"].loadChunk({0}, {20}); iteration.close(); - for( size_t i = 0; i < 20; ++i ) + for (size_t i = 0; i < 20; ++i) { REQUIRE( - std::abs( dataset.get()[ i ] - float( i ) ) <= - std::numeric_limits< float >::epsilon() ); + std::abs(dataset.get()[i] - float(i)) <= + std::numeric_limits::epsilon()); } - if( iteration.iterationIndex == 8 ) + if (iteration.iterationIndex == 8) { // reading up until iteration 8 should work break; @@ -4965,19 +5425,19 @@ void deferred_parsing( std::string const & extension ) } } -TEST_CASE( "deferred_parsing", "[serial]" ) +TEST_CASE("deferred_parsing", "[serial]") { - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - deferred_parsing( t ); + deferred_parsing(t); } } // @todo merge this back with the chaotic_stream test of PR #949 // (bug noticed while working on that branch) -void no_explicit_flush( std::string filename ) +void no_explicit_flush(std::string filename) { - std::vector< uint64_t > sampleData{ 5, 9, 1, 3, 4, 6, 7, 8, 2, 0 }; + std::vector sampleData{5, 9, 1, 3, 4, 6, 7, 8, 2, 0}; std::string jsonConfig = R"( { "adios2": { @@ -4990,85 +5450,82 @@ void no_explicit_flush( std::string filename ) })"; { - Series series( filename, Access::CREATE, jsonConfig ); - for( uint64_t currentIteration = 0; currentIteration < 10; - ++currentIteration ) + Series series(filename, Access::CREATE, jsonConfig); + for (uint64_t currentIteration = 0; currentIteration < 10; + ++currentIteration) { auto dataset = - series.writeIterations()[ currentIteration ] - .meshes[ "iterationOrder" ][ MeshRecordComponent::SCALAR ]; - dataset.resetDataset( { determineDatatype< uint64_t >(), { 10 } } ); - dataset.storeChunk( sampleData, { 0 }, { 10 } ); + series.writeIterations()[currentIteration] + .meshes["iterationOrder"][MeshRecordComponent::SCALAR]; + dataset.resetDataset({determineDatatype(), {10}}); + dataset.storeChunk(sampleData, {0}, {10}); // series.writeIterations()[ currentIteration ].close(); } } { - Series series( filename, Access::READ_ONLY ); + Series series(filename, Access::READ_ONLY); size_t index = 0; - for( const auto& iteration : series.readIterations() ) + for (const auto &iteration : series.readIterations()) { - REQUIRE( iteration.iterationIndex == index ); + REQUIRE(iteration.iterationIndex == index); ++index; } - REQUIRE( index == 10 ); + REQUIRE(index == 10); } } -TEST_CASE( "no_explicit_flush", "[serial]" ) +TEST_CASE("no_explicit_flush", "[serial]") { - for( auto const & t : testedFileExtensions() ) + for (auto const &t : testedFileExtensions()) { - no_explicit_flush( "../samples/no_explicit_flush_filebased_%T." + t ); - no_explicit_flush( "../samples/no_explicit_flush." + t ); + no_explicit_flush("../samples/no_explicit_flush_filebased_%T." + t); + no_explicit_flush("../samples/no_explicit_flush." + t); } } -TEST_CASE( "late_setting_of_iterationencoding", "[serial]" ) +TEST_CASE("late_setting_of_iterationencoding", "[serial]") { { ::openPMD::Series series = ::openPMD::Series( - "../samples/error.json", ::openPMD::Access::CREATE ); - series.iterations[ 10 ]; + "../samples/error.json", ::openPMD::Access::CREATE); + series.iterations[10]; REQUIRE_THROWS_WITH( series.setIterationEncoding( - ::openPMD::IterationEncoding::fileBased ), - Catch::Equals( "Wrong API usage: For fileBased formats the " - "iteration expansion pattern %T must " - "be included in the file name" ) ); + ::openPMD::IterationEncoding::fileBased), + Catch::Equals("Wrong API usage: For fileBased formats the " + "iteration expansion pattern %T must " + "be included in the file name")); series.flush(); } { ::openPMD::Series series = ::openPMD::Series( - "../samples/asdf_%T.json", - ::openPMD::Access::CREATE ); - series.iterations[ 10 ]; - series.setName( - "change_name_%T" ); + "../samples/asdf_%T.json", ::openPMD::Access::CREATE); + series.iterations[10]; + series.setName("change_name_%T"); series.flush(); } { ::openPMD::Series series = ::openPMD::Series( "../samples/change_name_keep_filename_%T.json", - ::openPMD::Access::CREATE ); - series.iterations[ 10 ]; + ::openPMD::Access::CREATE); + series.iterations[10]; series.setName( - "expansion_pattern_was_specified_previously_filename_will_stay" ); + "expansion_pattern_was_specified_previously_filename_will_stay"); series.flush(); } { ::openPMD::Series series = ::openPMD::Series( - "../samples/asdf.json", ::openPMD::Access::CREATE ); - series.iterations[ 10 ]; - series.setName( "change_name_and_encoding_%T" ); - series.setIterationEncoding( IterationEncoding::fileBased ); + "../samples/asdf.json", ::openPMD::Access::CREATE); + series.iterations[10]; + series.setName("change_name_and_encoding_%T"); + series.setIterationEncoding(IterationEncoding::fileBased); series.flush(); } - REQUIRE( auxiliary::file_exists( - "../samples/change_name_10.json" ) ); - REQUIRE( auxiliary::file_exists( - "../samples/change_name_keep_filename_10.json" ) ); - REQUIRE( auxiliary::file_exists( - "../samples/change_name_and_encoding_10.json" ) ); + REQUIRE(auxiliary::file_exists("../samples/change_name_10.json")); + REQUIRE( + auxiliary::file_exists("../samples/change_name_keep_filename_10.json")); + REQUIRE( + auxiliary::file_exists("../samples/change_name_and_encoding_10.json")); }