Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions docs/source/usage/plugins/openPMD.rst
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,9 @@ Also see :ref:`common patterns of defining particle filters <usage-workflows-par
.cfg file
^^^^^^^^^

Note that all the following command line parameters can *alternatively* be specified in a ``.toml`` configuration file.
See the next section for further information: `Configuring the openPMD plugin with a TOML configuration file>`

You can use ``--openPMD.period`` to specify the output period.
The base filename is specified via ``--openPMD.file``.
The openPMD API will parse the file name to decide the chosen backend and iteration layout:
Expand Down Expand Up @@ -199,6 +202,18 @@ Performance
On the Summit compute system, specifying ``export IBM_largeblock_io=true`` disables data shipping, which leads to reduced overhead for large block write operations.
This setting is applied in the Summit templates found in ``etc/picongpu/summit-ornl``.

Configuring the openPMD plugin with a TOML configuration file
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^

The openPMD plugin can alternatively be configured by using a ``.toml`` configuration file.
Note the inline comments for a description of the used schema:

.. literalinclude:: openPMD.toml

The location of the ``.toml`` file on the filesystem is specified via ``--openPMD.toml``.
If using this parameter, no other parameters must be specified.
If another parameter is specified, the openPMD plugin will notice and abort.


Memory Complexity
^^^^^^^^^^^^^^^^^
Expand Down
30 changes: 30 additions & 0 deletions docs/source/usage/plugins/openPMD.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# The following parameters need not be specified
# If a parameter is left unspecified, it falls back to its default value
file = "simData" # replaces --openPMD.file,
# given value is the default
infix = "" # replaces --openPMD.infix,
# default is "%06T"
ext = "bp" # replaces --openPMD.ext,
# given value is the default
backend_config = "@./adios_config.json" # replaces --openPMD.json,
# default is "{}"
data_preparation_strategy = "mappedMemory" # replaces --openPMD.dataPreparationStrategy,
# default is "doubleBuffer"


# Periods and data sources are specified independently per reading application
# The application names can be arbitrary and are not interpreted, except
# potentially for logging and other messages.
[sink.saxs_scattering.period]
# Each entry here denotes a periodicity combined with data sources requested
# by the reading code from PIConGPU at the specified periodicity
500 = "species_all"

# A second data sink needs other output data
# All reading requests are merged into one single instance of the openPMD plugin
# Overlapping requests are no problem
[sink.some_other_name.period]
# Data sources can be specified as a list if needed
"400:400" = ["E", "B"]
# Time slice syntax is equivalent to that used by --openPMD.period
"100:300:200,444:444" = "fields_all"
25 changes: 24 additions & 1 deletion include/picongpu/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,23 @@ if(PIC_SEARCH_openPMD)
else()
find_package(nlohmann_json 3.9.1 CONFIG REQUIRED)
message(STATUS "nlohmann-json: Found version '${nlohmann_json_VERSION}'")
PIC_dependency_set_status(openPMD FALSE)
endif()

# TOML
set(
PIC_toml11_PROVIDER "intern" CACHE
STRING "Use internally shipped or external nlohmann_json library.")
set_property(
CACHE PIC_toml11_PROVIDER
PROPERTY STRINGS "intern;extern")
mark_as_advanced(PIC_toml11_PROVIDER)
if(${PIC_toml11_PROVIDER} STREQUAL "intern")
add_subdirectory(
"${PIConGPUapp_SOURCE_DIR}/../../thirdParty/toml11"
"${CMAKE_CURRENT_BINARY_DIR}/build_toml11")
else()
find_package(toml11 3.7.0 CONFIG REQUIRED)
message(STATUS "toml11: Found version '${toml11_VERSION}'")
endif()
set(HOST_LIBS ${HOST_LIBS} openPMD::openPMD)
else()
Expand Down Expand Up @@ -500,6 +516,13 @@ if(PIC_HAVE_openPMD)
SYSTEM PRIVATE
$<TARGET_PROPERTY:nlohmann_json::nlohmann_json,INTERFACE_INCLUDE_DIRECTORIES>)
target_link_libraries(picongpu-hostonly PRIVATE nlohmann_json::nlohmann_json)

# same tricks for toml11
target_include_directories(
picongpu-hostonly
SYSTEM PRIVATE
$<TARGET_PROPERTY:toml11::toml11,INTERFACE_INCLUDE_DIRECTORIES>)
target_link_libraries(picongpu-hostonly PRIVATE toml11::toml11)
endif()


Expand Down
86 changes: 86 additions & 0 deletions include/picongpu/plugins/common/MPIHelpers.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
/* Copyright 2021 Franz Poeschel
*
* This file is part of PIConGPU.
*
* PIConGPU is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PIConGPU is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PIConGPU.
* If not, see <http://www.gnu.org/licenses/>.
*/

#include "picongpu/plugins/common/MPIHelpers.hpp"

#include <pmacc/communication/manager_common.hpp>

#include <algorithm>
#include <fstream>
#include <numeric>
#include <sstream>
#include <vector>


namespace picongpu
{
/**
* @brief Read a file in MPI-collective manner.
*
* The file is read on rank 0 and its contents subsequently distributed
* to all other ranks.
*
* @param path Path for the file to read.
* @param comm MPI communicator.
* @return std::string Full file content.
*/
std::string collective_file_read(std::string const& path, MPI_Comm comm)
{
int rank, size;
MPI_CHECK(MPI_Comm_rank(comm, &rank));
MPI_CHECK(MPI_Comm_size(comm, &size));

std::string res;
size_t stringLength = 0;
if(rank == 0)
{
std::fstream handle;
handle.open(path, std::ios_base::in);
std::stringstream stream;
stream << handle.rdbuf();
res = stream.str();
if(!handle.good())
{
throw std::runtime_error("Failed reading JSON config from file " + path + ".");
}
stringLength = res.size() + 1;
}
MPI_Datatype datatype = MPI_Types<size_t>{}.value;
int err = MPI_Bcast(&stringLength, 1, datatype, 0, comm);
if(err != MPI_SUCCESS)
{
throw std::runtime_error("[collective_file_read] MPI_Bcast stringLength failure.");
}
std::vector<char> recvbuf(stringLength, 0);
if(rank == 0)
{
std::copy_n(res.c_str(), stringLength, recvbuf.data());
}
err = MPI_Bcast(recvbuf.data(), stringLength, MPI_CHAR, 0, comm);
if(err != MPI_SUCCESS)
{
throw std::runtime_error("[collective_file_read] MPI_Bcast file content failure.");
}
if(rank != 0)
{
res = recvbuf.data();
}
return res;
}
} // namespace picongpu
67 changes: 67 additions & 0 deletions include/picongpu/plugins/common/MPIHelpers.hpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
/* Copyright 2021 Franz Poeschel
*
* This file is part of PIConGPU.
*
* PIConGPU is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* PIConGPU is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with PIConGPU.
* If not, see <http://www.gnu.org/licenses/>.
*/

#pragma once

#include <string>
#include <vector>

#include <mpi.h>

namespace picongpu
{
/**
* @brief Helper class to help figure out a platform-independent
* MPI_Datatype for size_t.
*/
template<typename>
struct MPI_Types;

template<>
struct MPI_Types<unsigned long>
{
// can't make this constexpr due to MPI
// so, make this non-static for simplicity
MPI_Datatype value = MPI_UNSIGNED_LONG;
};

template<>
struct MPI_Types<unsigned long long>
{
MPI_Datatype value = MPI_UNSIGNED_LONG_LONG;
};

template<>
struct MPI_Types<unsigned>
{
MPI_Datatype value = MPI_UNSIGNED;
};

/**
* @brief Read a file in MPI-collective manner.
*
* The file is read on rank 0 and its contents subsequently distributed
* to all other ranks.
*
* @param path Path for the file to read.
* @param comm MPI communicator.
* @return std::string Full file content.
*/
std::string collective_file_read(std::string const& path, MPI_Comm comm);
} // namespace picongpu
56 changes: 0 additions & 56 deletions include/picongpu/plugins/openPMD/Json.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,6 @@

# include <algorithm> // std::copy_n, std::find
# include <cctype> // std::isspace
# include <fstream>
# include <sstream>

/*
* Note:
Expand Down Expand Up @@ -239,60 +237,6 @@ namespace
}
}

/**
* @brief Read a file in MPI-collective manner.
*
* The file is read on rank 0 and its contents subsequently distributed
* to all other ranks.
*
* @param path Path for the file to read.
* @param comm MPI communicator.
* @return std::string Full file content.
*/
std::string collective_file_read(std::string const& path, MPI_Comm comm)
{
int rank, size;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);

std::string res;
size_t stringLength = 0;
if(rank == 0)
{
std::fstream handle;
handle.open(path, std::ios_base::in);
std::stringstream stream;
stream << handle.rdbuf();
res = stream.str();
if(!handle.good())
{
throw std::runtime_error("Failed reading JSON config from file " + path + ".");
}
stringLength = res.size() + 1;
}
MPI_Datatype datatype = MPI_Types<size_t>{}.value;
int err = MPI_Bcast(&stringLength, 1, datatype, 0, comm);
if(err)
{
throw std::runtime_error("[collective_file_read] MPI_Bcast stringLength failure.");
}
std::vector<char> recvbuf(stringLength, 0);
if(rank == 0)
{
std::copy_n(res.c_str(), stringLength, recvbuf.data());
}
err = MPI_Bcast(recvbuf.data(), stringLength, MPI_CHAR, 0, comm);
if(err)
{
throw std::runtime_error("[collective_file_read] MPI_Bcast file content failure.");
}
if(rank != 0)
{
res = recvbuf.data();
}
return res;
}

KindOfConfig readPattern(
std::vector<picongpu::json::Pattern>& patterns,
nlohmann::json& defaultConfig,
Expand Down
41 changes: 1 addition & 40 deletions include/picongpu/plugins/openPMD/Json_private.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,13 @@

#pragma once

#include "picongpu/plugins/common/MPIHelpers.hpp"
#include "picongpu/plugins/openPMD/Json.hpp"

#include <regex>
#include <string>
#include <vector>

#include <mpi.h>
#include <nlohmann/json.hpp>

/*
Expand Down Expand Up @@ -208,45 +208,6 @@ namespace
*/
std::string extractFilename(std::string const& unparsed);

/**
* @brief Helper class to help figure out a platform-independent
* MPI_Datatype for size_t.
*/
template<typename>
struct MPI_Types;

template<>
struct MPI_Types<unsigned long>
{
// can't make this constexpr due to MPI
// so, make this non-static for simplicity
MPI_Datatype value = MPI_UNSIGNED_LONG;
};

template<>
struct MPI_Types<unsigned long long>
{
MPI_Datatype value = MPI_UNSIGNED_LONG_LONG;
};

template<>
struct MPI_Types<unsigned>
{
MPI_Datatype value = MPI_UNSIGNED;
};

/**
* @brief Read a file in MPI-collective manner.
*
* The file is read on rank 0 and its contents subsequently distributed
* to all other ranks.
*
* @param path Path for the file to read.
* @param comm MPI communicator.
* @return std::string Full file content.
*/
std::string collective_file_read(std::string const& path, MPI_Comm comm);

enum class KindOfConfig : char
{
Pattern,
Expand Down
Loading