diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3774065f12..7c368b2ff6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -56,16 +56,17 @@ repos: # clang-format # Python Formatting -#- repo: https://github.com/psf/black -# rev: 21.10b0 # Keep in sync with blacken-docs -# hooks: -# - id: black -#- repo: https://github.com/asottile/blacken-docs -# rev: v1.11.0 -# hooks: -# - id: blacken-docs -# additional_dependencies: -# - black==21.10b0 # keep in sync with black hook +- repo: https://github.com/psf/black + rev: 21.12b0 # Keep in sync with blacken-docs + hooks: + - id: black +- repo: https://github.com/asottile/blacken-docs + rev: v1.11.0 + hooks: + - id: blacken-docs + additional_dependencies: + - black==21.12b0 # keep in sync with black hook + - openpmd-api # Checks the manifest for missing files (native support) - repo: https://github.com/mgedmin/check-manifest diff --git a/NEWS.rst b/NEWS.rst index cf23818ae5..dbb9291965 100644 --- a/NEWS.rst +++ b/NEWS.rst @@ -43,9 +43,9 @@ The old setter function (``set_data_order``) and read-only property (``data_orde series = io.Series("data%T.h5", io.Access.read_only) rho = series.iterations[0].meshes["rho"] - rho.data_order = 'C' # or 'F' + rho.data_order = "C" # or 'F' - print(rho.data_order == 'C') # True + print(rho.data_order == "C") # True Note: we recommend using ``'C'`` order since version 2 of the openPMD-standard will simplify this option to ``'C'``, too. For Fortran-ordered indices, please just invert the attributes ``axis_labels``, ``grid_spacing`` and ``grid_global_offset`` accordingly. @@ -163,7 +163,13 @@ The new order allows to make use of defaults in many cases in order reduce compl electrons["position"]["x"].reset_dataset(d) # old code - electrons["position"]["x"].store_chunk([0, ], particlePos_x.shape, particlePos_x) + electrons["position"]["x"].store_chunk( + [ + 0, + ], + particlePos_x.shape, + particlePos_x, + ) # new code electrons["position"]["x"].store_chunk(particlePos_x) diff --git a/docs/source/citation.rst b/docs/source/citation.rst index 4a049e3c54..fed7e96763 100644 --- a/docs/source/citation.rst +++ b/docs/source/citation.rst @@ -126,7 +126,5 @@ Python import openpmd_api as io - print("openPMD-api: {}" - .format(io.__version__)) - print("openPMD-api backend variants: {}" - .format(io.variants)) + print("openPMD-api: {}".format(io.__version__)) + print("openPMD-api backend variants: {}".format(io.variants)) diff --git a/docs/source/conf.py b/docs/source/conf.py index 7a55e977e8..90562adea9 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -16,6 +16,7 @@ import os import subprocess from recommonmark.parser import CommonMarkParser + # import sys # sys.path.insert(0, os.path.abspath('.')) @@ -23,7 +24,7 @@ # -- General configuration ------------------------------------------------ # RTD -on_rtd = os.environ.get('READTHEDOCS', None) == 'True' +on_rtd = os.environ.get("READTHEDOCS", None) == "True" show_authors = True @@ -34,58 +35,63 @@ # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. -extensions = ['sphinx.ext.mathjax', - 'breathe', - 'sphinxcontrib.programoutput', - 'sphinxcontrib.rsvgconverter', - 'matplotlib.sphinxext.plot_directive'] +extensions = [ + "sphinx.ext.mathjax", + "breathe", + "sphinxcontrib.programoutput", + "sphinxcontrib.rsvgconverter", + "matplotlib.sphinxext.plot_directive", +] if not on_rtd: - extensions.append('sphinx.ext.githubpages') + extensions.append("sphinx.ext.githubpages") # breathe config -breathe_projects = {'openPMD-api': '../xml'} -breathe_default_project = 'openPMD-api' +breathe_projects = {"openPMD-api": "../xml"} +breathe_default_project = "openPMD-api" -subprocess.call('cd ..; doxygen;' - 'mkdir -p source/_static;' - 'cp -r doxyhtml source/_static/;' - 'cp openpmd-api-doxygen-web.tag.xml source/_static/doxyhtml/', - shell=True) +subprocess.call( + "cd ..; doxygen;" + "mkdir -p source/_static;" + "cp -r doxyhtml source/_static/;" + "cp openpmd-api-doxygen-web.tag.xml source/_static/doxyhtml/", + shell=True, +) if not on_rtd: import sphinx_rtd_theme + html_theme = "sphinx_rtd_theme" html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] +templates_path = ["_templates"] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_parsers = { - '.md': CommonMarkParser, + ".md": CommonMarkParser, } -source_suffix = ['.rst', '.md'] +source_suffix = [".rst", ".md"] # The master toctree document. -master_doc = 'index' +master_doc = "index" # General information about the project. -project = u'openPMD-api' -copyright = u'Documentation under CC-BY 4.0, The openPMD Community' -author = u'The openPMD Community' +project = u"openPMD-api" +copyright = u"Documentation under CC-BY 4.0, The openPMD Community" +author = u"The openPMD Community" # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. -version = u'0.15.0' +version = u"0.15.0" # The full version, including alpha/beta/rc tags. -release = u'0.15.0-dev' +release = u"0.15.0-dev" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. @@ -100,7 +106,7 @@ exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'default' +pygments_style = "default" # If true, `todo` and `todoList` produce output, else they produce nothing. todo_include_todos = False @@ -108,7 +114,7 @@ # -- Options for HTML output ---------------------------------------------- -html_logo = 'openPMD.png' +html_logo = "openPMD.png" # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. @@ -124,34 +130,31 @@ # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] +html_static_path = ["_static"] # -- Options for HTMLHelp output ------------------------------------------ # Output file base name for HTML help builder. -htmlhelp_basename = 'openPMD-apidoc' +htmlhelp_basename = "openPMD-apidoc" # -- Options for LaTeX output --------------------------------------------- -latex_logo = 'openPMD.png' +latex_logo = "openPMD.png" latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', - 'papersize': 'a4paper', - + "papersize": "a4paper", # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', - # Additional stuff for the LaTeX preamble. # # 'preamble': '', - 'preamble': r'\setcounter{tocdepth}{2}', - + "preamble": r"\setcounter{tocdepth}{2}", # Latex figure (float) alignment # # 'figure_align': 'htbp', @@ -161,8 +164,13 @@ # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ - (master_doc, 'openPMD-api.tex', u'openPMD-api Documentation', - u'The openPMD Community', 'manual'), + ( + master_doc, + "openPMD-api.tex", + u"openPMD-api Documentation", + u"The openPMD Community", + "manual", + ), ] @@ -170,10 +178,7 @@ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'openPMD-api', u'openPMD-api Documentation', - [author], 1) -] +man_pages = [(master_doc, "openPMD-api", u"openPMD-api Documentation", [author], 1)] # -- Options for Texinfo output ------------------------------------------- @@ -182,14 +187,19 @@ # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ - (master_doc, 'openPMD-api', u'openPMD-api Documentation', - author, 'openPMD-api', - 'C++ and Python APIs for the openPMD meta-standard', - """ + ( + master_doc, + "openPMD-api", + u"openPMD-api Documentation", + author, + "openPMD-api", + "C++ and Python APIs for the openPMD meta-standard", + """ The openPMD standard, short for open standard for particle-mesh data files is not a file format per se. It is a standard for meta data and naming schemes. openPMD provides naming and attribute conventions that allow to exchange particle and mesh based data from scientific simulations and experiments. - """), + """, + ), ] diff --git a/docs/source/usage/firstread.rst b/docs/source/usage/firstread.rst index cea43a934b..17a0cee193 100644 --- a/docs/source/usage/firstread.rst +++ b/docs/source/usage/firstread.rst @@ -95,9 +95,7 @@ Python .. code-block:: python3 - series = io.Series( - "data%T.h5", - io.Access.read_only) + series = io.Series("data%T.h5", io.Access.read_only) Iteration --------- @@ -142,12 +140,10 @@ Python .. code-block:: python3 - print("openPMD version: ", - series.openPMD) + print("openPMD version: ", series.openPMD) if series.contains_attribute("author"): - print("Author: ", - series.author) + print("Author: ", series.author) Record ------ @@ -328,10 +324,7 @@ Python extent = E_x.shape - print( - "First values in E_x " - "of shape: ", - extent) + print("First values in E_x " "of shape: ", extent) print(x_data[0, 0, :5]) diff --git a/docs/source/usage/firstwrite.rst b/docs/source/usage/firstwrite.rst index 48a37793fe..e13d33c52f 100644 --- a/docs/source/usage/firstwrite.rst +++ b/docs/source/usage/firstwrite.rst @@ -93,9 +93,7 @@ Python .. code-block:: python3 - series = io.Series( - "myOutput/data_%05T.h5", - io.Access.create) + series = io.Series("myOutput/data_%05T.h5", io.Access.create) Iteration --------- @@ -141,13 +139,10 @@ Python .. code-block:: python3 - series.author = \ - "Axel Huebl " + series.author = "Axel Huebl " series.machine = "Hall Probe 5000, Model 3" - series.set_attribute( - "dinner", "Pizza and Coke") - i.set_attribute( - "vacuum", True) + series.set_attribute("dinner", "Pizza and Coke") + i.set_attribute("vacuum", True) Data ---- @@ -178,16 +173,12 @@ Python .. code-block:: python3 - x_data = np.arange( - 150 * 300, - dtype=np.float - ).reshape(150, 300) - + x_data = np.arange(150 * 300, dtype=np.float).reshape(150, 300) - y_data = 4. + y_data = 4.0 - z_data = x_data.copy() - 8000. + z_data = x_data.copy() - 8000.0 Record ------ @@ -228,9 +219,7 @@ Python B_y = B["y"] B_z = B["z"] - dataset = io.Dataset( - x_data.dtype, - x_data.shape) + dataset = io.Dataset(x_data.dtype, x_data.shape) B_x.reset_dataset(dataset) B_y.reset_dataset(dataset) B_z.reset_dataset(dataset) @@ -268,15 +257,15 @@ Python # unit system agnostic dimension B.unit_dimension = { - io.Unit_Dimension.M: 1, + io.Unit_Dimension.M: 1, io.Unit_Dimension.I: -1, - io.Unit_Dimension.T: -2 + io.Unit_Dimension.T: -2, } # conversion to SI - B_x.unit_SI = 1.e-4 - B_y.unit_SI = 1.e-4 - B_z.unit_SI = 1.e-4 + B_x.unit_SI = 1.0e-4 + B_y.unit_SI = 1.0e-4 + B_z.unit_SI = 1.0e-4 .. tip:: @@ -316,7 +305,6 @@ Python B_z.store_chunk(z_data) - B_y.make_constant(y_data) .. attention:: diff --git a/examples/10_streaming_read.py b/examples/10_streaming_read.py index 4169f541f5..be353da38d 100755 --- a/examples/10_streaming_read.py +++ b/examples/10_streaming_read.py @@ -5,9 +5,9 @@ # pass-through for ADIOS2 engine parameters # https://adios2.readthedocs.io/en/latest/engines/engines.html -config = {'adios2': {'engine': {}, 'dataset': {}}} -config['adios2']['engine'] = {'parameters': {'Threads': '4'}} -config['adios2']['dataset'] = {'operators': [{'type': 'bzip2'}]} +config = {"adios2": {"engine": {}, "dataset": {}}} +config["adios2"]["engine"] = {"parameters": {"Threads": "4"}} +config["adios2"]["dataset"] = {"operators": [{"type": "bzip2"}]} if __name__ == "__main__": # this block is for our CI, SST engine is not present on all systems @@ -16,8 +16,7 @@ print("SST engine not available in ADIOS2.") sys.exit(0) - series = io.Series("simData.sst", io.Access_Type.read_only, - json.dumps(config)) + series = io.Series("simData.sst", io.Access_Type.read_only, json.dumps(config)) # Read all available iterations and print electron position data. # Use `series.read_iterations()` instead of `series.iterations` diff --git a/examples/10_streaming_write.py b/examples/10_streaming_write.py index c6ead08c47..d256c81d3c 100755 --- a/examples/10_streaming_write.py +++ b/examples/10_streaming_write.py @@ -6,9 +6,9 @@ # pass-through for ADIOS2 engine parameters # https://adios2.readthedocs.io/en/latest/engines/engines.html -config = {'adios2': {'engine': {}, 'dataset': {}}} -config['adios2']['engine'] = {'parameters': {'Threads': '4'}} -config['adios2']['dataset'] = {'operators': [{'type': 'bzip2'}]} +config = {"adios2": {"engine": {}, "dataset": {}}} +config["adios2"]["engine"] = {"parameters": {"Threads": "4"}} +config["adios2"]["dataset"] = {"operators": [{"type": "bzip2"}]} if __name__ == "__main__": # this block is for our CI, SST engine is not present on all systems @@ -19,8 +19,7 @@ # create a series and specify some global metadata # change the file extension to .json, .h5 or .bp for regular file writing - series = io.Series("simData.sst", io.Access_Type.create, - json.dumps(config)) + series = io.Series("simData.sst", io.Access_Type.create, json.dumps(config)) series.set_author("Franz Poeschel ") series.set_software("openPMD-api-python-examples") @@ -48,8 +47,7 @@ electronPositions.set_attribute("comment", "I'm a comment") length = 10 - local_data = np.arange(i * length, (i + 1) * length, - dtype=np.dtype("double")) + local_data = np.arange(i * length, (i + 1) * length, dtype=np.dtype("double")) for dim in ["x", "y", "z"]: pos = electronPositions[dim] pos.reset_dataset(io.Dataset(local_data.dtype, [length])) @@ -65,12 +63,11 @@ temperature = iteration.meshes["temperature"] temperature.unit_dimension = {io.Unit_Dimension.theta: 1.0} temperature.axis_labels = ["x", "y"] - temperature.grid_spacing = [1., 1.] + temperature.grid_spacing = [1.0, 1.0] # temperature has no x,y,z components, so skip the last layer: temperature_dataset = temperature[io.Mesh_Record_Component.SCALAR] # let's say we are in a 3x3 mesh - temperature_dataset.reset_dataset( - io.Dataset(np.dtype("double"), [3, 3])) + temperature_dataset.reset_dataset(io.Dataset(np.dtype("double"), [3, 3])) # temperature is constant temperature_dataset.make_constant(273.15) diff --git a/examples/11_particle_dataframe.py b/examples/11_particle_dataframe.py index d3ae34a18d..f7e47fa29d 100755 --- a/examples/11_particle_dataframe.py +++ b/examples/11_particle_dataframe.py @@ -9,6 +9,7 @@ import openpmd_api as io import numpy as np import sys + try: import pandas as pd except ImportError: @@ -19,6 +20,7 @@ from dask.delayed import delayed import dask.array as da import dask + found_dask = True except ImportError: print("dask NOT found. Install dask to run the 2nd example.") @@ -42,7 +44,7 @@ # the default schedulers are local/threaded, not requiring much. # But multi-node, "distributed" and local "processes" need object # pickle capabilities, so we test this here: - dask.config.set(scheduler='processes') + dask.config.set(scheduler="processes") df = electrons.to_dask() print(df) @@ -55,9 +57,12 @@ print("={}".format(df["momentum_z"].mean().compute())) # example2: momentum histogram - h, bins = da.histogram(df["momentum_z"].to_dask_array(), bins=50, - range=[-8.0e-23, 8.0e-23], - weights=df["weighting"].to_dask_array()) + h, bins = da.histogram( + df["momentum_z"].to_dask_array(), + bins=50, + range=[-8.0e-23, 8.0e-23], + weights=df["weighting"].to_dask_array(), + ) print(h.compute()) # example3: longitudinal phase space (dask 2021.04.0+) @@ -65,10 +70,10 @@ z_max = df["position_z"].max().compute() z_pz, z_pz_bins = da.histogramdd( - df[['position_z', 'momentum_z']].to_dask_array(), + df[["position_z", "momentum_z"]].to_dask_array(), bins=[80, 80], range=[[z_min, z_max], [-8.0e-23, 8.0e-23]], - weights=df["weighting"].to_dask_array() + weights=df["weighting"].to_dask_array(), ) print(z_pz.compute()) @@ -90,7 +95,11 @@ Intensity = darr_x * darr_x + darr_y * darr_y + darr_z * darr_z Intensity_max = Intensity.max().compute() idx_max = da.argwhere(Intensity == Intensity_max).compute()[0] - pos_max = E.grid_unit_SI * 1.0e6 * ( - idx_max * E.grid_spacing + E.grid_global_offset) - print("maximum intensity I={} at index={} z={}mu".format( - Intensity_max, idx_max, pos_max[2])) + pos_max = ( + E.grid_unit_SI * 1.0e6 * (idx_max * E.grid_spacing + E.grid_global_offset) + ) + print( + "maximum intensity I={} at index={} z={}mu".format( + Intensity_max, idx_max, pos_max[2] + ) + ) diff --git a/examples/13_write_dynamic_configuration.py b/examples/13_write_dynamic_configuration.py index d3af270075..23f3605817 100644 --- a/examples/13_write_dynamic_configuration.py +++ b/examples/13_write_dynamic_configuration.py @@ -52,14 +52,13 @@ def main(): - if not io.variants['adios2']: + if not io.variants["adios2"]: # Example configuration below selects the ADIOS2 backend return # create a series and specify some global metadata # change the file extension to .json, .h5 or .bp for regular file writing - series = io.Series("../samples/dynamicConfig.bp", io.Access_Type.create, - defaults) + series = io.Series("../samples/dynamicConfig.bp", io.Access_Type.create, defaults) # now, write a number of iterations (or: snapshots, time steps) for i in range(10): @@ -85,8 +84,7 @@ def main(): electronPositions.set_attribute("comment", "I'm a comment") length = 10 - local_data = np.arange(i * length, (i + 1) * length, - dtype=np.dtype("double")) + local_data = np.arange(i * length, (i + 1) * length, dtype=np.dtype("double")) for dim in ["x", "y", "z"]: pos = electronPositions[dim] pos.reset_dataset(io.Dataset(local_data.dtype, [length])) @@ -103,32 +101,21 @@ def main(): # so we override the defaults # let's use JSON this time config = { - 'resizable': True, - 'adios2': { - 'dataset': { - 'operators': [] - } - }, - 'adios1': { - 'dataset': {} - } + "resizable": True, + "adios2": {"dataset": {"operators": []}}, + "adios1": {"dataset": {}}, } - config['adios2']['dataset'] = { - 'operators': [{ - 'type': 'zlib', - 'parameters': { - 'clevel': 9 - } - }] + config["adios2"]["dataset"] = { + "operators": [{"type": "zlib", "parameters": {"clevel": 9}}] } - config['adios1']['dataset'] = { - 'transform': 'blosc:compressor=zlib,shuffle=bit,lvl=1;nometa' + config["adios1"]["dataset"] = { + "transform": "blosc:compressor=zlib,shuffle=bit,lvl=1;nometa" } temperature = iteration.meshes["temperature"] temperature.unit_dimension = {io.Unit_Dimension.theta: 1.0} temperature.axis_labels = ["x", "y"] - temperature.grid_spacing = [1., 1.] + temperature.grid_spacing = [1.0, 1.0] # temperature has no x,y,z components, so skip the last layer: temperature_dataset = temperature[io.Mesh_Record_Component.SCALAR] # let's say we are in a 3x3 mesh diff --git a/examples/2_read_serial.py b/examples/2_read_serial.py index 4c1bf84402..a4ca21054a 100755 --- a/examples/2_read_serial.py +++ b/examples/2_read_serial.py @@ -10,10 +10,8 @@ if __name__ == "__main__": - series = io.Series("../samples/git-sample/data%T.h5", - io.Access.read_only) - print("Read a Series with openPMD standard version %s" % - series.openPMD) + series = io.Series("../samples/git-sample/data%T.h5", io.Access.read_only) + print("Read a Series with openPMD standard version %s" % series.openPMD) print("The Series contains {0} iterations:".format(len(series.iterations))) for i in series.iterations: @@ -25,8 +23,7 @@ for m in i.meshes: print("\t {0}".format(m)) print("") - print("Iteration 100 contains {0} particle species:".format( - len(i.particles))) + print("Iteration 100 contains {0} particle species:".format(len(i.particles))) for ps in i.particles: print("\t {0}".format(ps)) print("With records:") @@ -37,22 +34,19 @@ electrons = i.particles["electrons"] charge = electrons["charge"][io.Mesh_Record_Component.SCALAR] series.flush() - print("And the first electron particle has a charge {}" - .format(charge[0])) + print("And the first electron particle has a charge {}".format(charge[0])) print("") E_x = i.meshes["E"]["x"] shape = E_x.shape - print("Field E.x has shape {0} and datatype {1}".format( - shape, E_x.dtype)) + print("Field E.x has shape {0} and datatype {1}".format(shape, E_x.dtype)) chunk_data = E_x[1:3, 1:3, 1:2] # print("Queued the loading of a single chunk from disk, " # "ready to execute") series.flush() - print("Chunk has been read from disk\n" - "Read chunk contains:") + print("Chunk has been read from disk\n" "Read chunk contains:") print(chunk_data) # for row in range(2): # for col in range(2): diff --git a/examples/2a_read_thetaMode_serial.py b/examples/2a_read_thetaMode_serial.py index 1a88ac54c0..a42bedbc93 100755 --- a/examples/2a_read_thetaMode_serial.py +++ b/examples/2a_read_thetaMode_serial.py @@ -10,8 +10,7 @@ if __name__ == "__main__": - series = io.Series("../samples/git-sample/thetaMode/data%T.h5", - io.Access.read_only) + series = io.Series("../samples/git-sample/thetaMode/data%T.h5", io.Access.read_only) i = series.iterations[500] E_z_modes = i.meshes["E"]["z"] @@ -20,9 +19,9 @@ # read E_z in all modes E_z_raw = E_z_modes[:, :, :] # read E_z in mode_0 (one scalar field) - E_z_m0 = E_z_modes[0:1, 0:shape[1], 0:shape[2]] + E_z_m0 = E_z_modes[0:1, 0 : shape[1], 0 : shape[2]] # read E_z in mode_1 (two fields; skip mode_0 with one scalar field) - E_z_m1 = E_z_modes[1:3, 0:shape[1], 0:shape[2]] + E_z_m1 = E_z_modes[1:3, 0 : shape[1], 0 : shape[2]] series.flush() print(E_z_raw) # still mode-decomposed data, not too useful for users diff --git a/examples/3_write_serial.py b/examples/3_write_serial.py index 46dc536fb2..fccc338682 100755 --- a/examples/3_write_serial.py +++ b/examples/3_write_serial.py @@ -15,27 +15,25 @@ size = 3 # matrix dataset to write with values 0...size*size-1 - data = np.arange(size*size, dtype=np.double).reshape(3, 3) + data = np.arange(size * size, dtype=np.double).reshape(3, 3) - print("Set up a 2D square array ({0}x{1}) that will be written".format( - size, size)) + print("Set up a 2D square array ({0}x{1}) that will be written".format(size, size)) # open file for writing - series = io.Series( - "../samples/3_write_serial_py.h5", - io.Access.create - ) + series = io.Series("../samples/3_write_serial_py.h5", io.Access.create) print("Created an empty {0} Series".format(series.iteration_encoding)) print(len(series.iterations)) - rho = series.iterations[1]. \ - meshes["rho"][io.Mesh_Record_Component.SCALAR] + rho = series.iterations[1].meshes["rho"][io.Mesh_Record_Component.SCALAR] dataset = io.Dataset(data.dtype, data.shape) - print("Created a Dataset of size {0}x{1} and Datatype {2}".format( - dataset.extent[0], dataset.extent[1], dataset.dtype)) + print( + "Created a Dataset of size {0}x{1} and Datatype {2}".format( + dataset.extent[0], dataset.extent[1], dataset.dtype + ) + ) rho.reset_dataset(dataset) print("Set the dataset properties for the scalar field rho in iteration 1") @@ -45,8 +43,10 @@ rho[()] = data - print("Stored the whole Dataset contents as a single chunk, " + - "ready to write content") + print( + "Stored the whole Dataset contents as a single chunk, " + + "ready to write content" + ) series.flush() print("Dataset content has been fully written") diff --git a/examples/3a_write_thetaMode_serial.py b/examples/3a_write_thetaMode_serial.py index 7d3f581adf..9f1936c78b 100755 --- a/examples/3a_write_thetaMode_serial.py +++ b/examples/3a_write_thetaMode_serial.py @@ -12,22 +12,21 @@ if __name__ == "__main__": # open file for writing - series = io.Series( - "../samples/3_write_thetaMode_serial_py.h5", - io.Access.create - ) + series = io.Series("../samples/3_write_thetaMode_serial_py.h5", io.Access.create) # configure and setup geometry num_modes = 5 - num_fields = 1 + (num_modes-1) * 2 # the first mode is purely real + num_fields = 1 + (num_modes - 1) * 2 # the first mode is purely real N_r = 60 N_z = 200 # write values 0...size-1 - E_r_data = np.arange(num_fields*N_r*N_z, dtype=np.double) \ - .reshape(num_fields, N_r, N_z) - E_t_data = np.arange(num_fields*N_r*N_z, dtype=np.single) \ - .reshape(num_fields, N_r, N_z) + E_r_data = np.arange(num_fields * N_r * N_z, dtype=np.double).reshape( + num_fields, N_r, N_z + ) + E_t_data = np.arange(num_fields * N_r * N_z, dtype=np.single).reshape( + num_fields, N_r, N_z + ) geometry_parameters = "m={0};imag=+".format(num_modes) @@ -39,12 +38,11 @@ E.grid_unit_SI = 1.0 E.axis_labels = ["r", "z"] E.data_order = "C" - E.unit_dimension = {io.Unit_Dimension.I: 1.0, - io.Unit_Dimension.J: 2.0} + E.unit_dimension = {io.Unit_Dimension.I: 1.0, io.Unit_Dimension.J: 2.0} # write components: E_z, E_r, E_t E_z = E["z"] - E_z.unit_SI = 10. + E_z.unit_SI = 10.0 E_z.position = [0.0, 0.5] # (modes, r, z) see geometry_parameters E_z.reset_dataset(io.Dataset(io.Datatype.FLOAT, [num_fields, N_r, N_z])) @@ -52,13 +50,13 @@ # write all modes at once (otherwise iterate over modes and first index E_r = E["r"] - E_r.unit_SI = 10. + E_r.unit_SI = 10.0 E_r.position = [0.5, 0.0] E_r.reset_dataset(io.Dataset(E_r_data.dtype, E_r_data.shape)) E_r.store_chunk(E_r_data) E_t = E["t"] - E_t.unit_SI = 10. + E_t.unit_SI = 10.0 E_t.position = [0.0, 0.0] E_t.reset_dataset(io.Dataset(E_t_data.dtype, E_t_data.shape)) E_t.store_chunk(E_t_data) diff --git a/examples/3b_write_resizable_particles.py b/examples/3b_write_resizable_particles.py index 03dd780b29..f64e64679e 100755 --- a/examples/3b_write_resizable_particles.py +++ b/examples/3b_write_resizable_particles.py @@ -13,15 +13,14 @@ if __name__ == "__main__": # open file for writing series = io.Series( - "../samples/3b_write_resizable_particles_py.h5", - io.Access.create + "../samples/3b_write_resizable_particles_py.h5", io.Access.create ) electrons = series.iterations[0].particles["electrons"] # our initial data to write - x = np.array([0., 1., 2., 3., 4.], dtype=np.double) - y = np.array([-2., -3., -4., -5., -6.], dtype=np.double) + x = np.array([0.0, 1.0, 2.0, 3.0, 4.0], dtype=np.double) + y = np.array([-2.0, -3.0, -4.0, -5.0, -6.0], dtype=np.double) # both x and y the same type, otherwise we use two distinct datasets dataset = io.Dataset(x.dtype, x.shape, '{ "resizable": true }') @@ -47,8 +46,8 @@ series.flush() # extend and append more particles - x = np.array([5., 6., 7.], dtype=np.double) - y = np.array([-7., -8., -9.], dtype=np.double) + x = np.array([5.0, 6.0, 7.0], dtype=np.double) + y = np.array([-7.0, -8.0, -9.0], dtype=np.double) offset += dataset.extent[0] dataset = io.Dataset([dataset.extent[0] + x.shape[0]]) diff --git a/examples/4_read_parallel.py b/examples/4_read_parallel.py index 4956ca78c9..054e8cbb9f 100755 --- a/examples/4_read_parallel.py +++ b/examples/4_read_parallel.py @@ -19,14 +19,9 @@ # also works with any other MPI communicator comm = MPI.COMM_WORLD - series = io.Series( - "../samples/git-sample/data%T.h5", - io.Access.read_only, - comm - ) + series = io.Series("../samples/git-sample/data%T.h5", io.Access.read_only, comm) if 0 == comm.rank: - print("Read a series in parallel with {} MPI ranks".format( - comm.size)) + print("Read a series in parallel with {} MPI ranks".format(comm.size)) E_x = series.iterations[100].meshes["E"]["x"] @@ -36,8 +31,10 @@ chunk_data = E_x.load_chunk(chunk_offset, chunk_extent) if 0 == comm.rank: - print("Queued the loading of a single chunk per MPI rank from disk, " - "ready to execute") + print( + "Queued the loading of a single chunk per MPI rank from disk, " + "ready to execute" + ) series.flush() if 0 == comm.rank: @@ -48,11 +45,14 @@ print("Rank {} - Read chunk contains:".format(i)) for row in range(chunk_extent[0]): for col in range(chunk_extent[1]): - print("\t({}|{}|1)\t{:e}".format( - row + chunk_offset[0], - col + chunk_offset[1], - chunk_data[row, col, 0] - ), end='') + print( + "\t({}|{}|1)\t{:e}".format( + row + chunk_offset[0], + col + chunk_offset[1], + chunk_data[row, col, 0], + ), + end="", + ) print("") # this barrier is not necessary but structures the example output diff --git a/examples/5_write_parallel.py b/examples/5_write_parallel.py index 662cb95353..d7371a59f5 100755 --- a/examples/5_write_parallel.py +++ b/examples/5_write_parallel.py @@ -23,43 +23,45 @@ # global data set to write: [MPI_Size * 10, 300] # each rank writes a 10x300 slice with its MPI rank as values local_value = comm.size - local_data = np.ones(10 * 300, - dtype=np.double).reshape(10, 300) * local_value + local_data = np.ones(10 * 300, dtype=np.double).reshape(10, 300) * local_value if 0 == comm.rank: - print("Set up a 2D array with 10x300 elements per MPI rank ({}x) " - "that will be written to disk".format(comm.size)) + print( + "Set up a 2D array with 10x300 elements per MPI rank ({}x) " + "that will be written to disk".format(comm.size) + ) # open file for writing - series = io.Series( - "../samples/5_parallel_write_py.h5", - io.Access.create, - comm - ) + series = io.Series("../samples/5_parallel_write_py.h5", io.Access.create, comm) if 0 == comm.rank: - print("Created an empty series in parallel with {} MPI ranks".format( - comm.size)) + print("Created an empty series in parallel with {} MPI ranks".format(comm.size)) - mymesh = series.iterations[1]. \ - meshes["mymesh"][io.Mesh_Record_Component.SCALAR] + mymesh = series.iterations[1].meshes["mymesh"][io.Mesh_Record_Component.SCALAR] # example 1D domain decomposition in first index global_extent = [comm.size * 10, 300] dataset = io.Dataset(local_data.dtype, global_extent) if 0 == comm.rank: - print("Prepared a Dataset of size {} and Datatype {}".format( - dataset.extent, dataset.dtype)) + print( + "Prepared a Dataset of size {} and Datatype {}".format( + dataset.extent, dataset.dtype + ) + ) mymesh.reset_dataset(dataset) if 0 == comm.rank: - print("Set the global Dataset properties for the scalar field " - "mymesh in iteration 1") + print( + "Set the global Dataset properties for the scalar field " + "mymesh in iteration 1" + ) # example shows a 1D domain decomposition in first index - mymesh[comm.rank*10:(comm.rank+1)*10, :] = local_data + mymesh[comm.rank * 10 : (comm.rank + 1) * 10, :] = local_data if 0 == comm.rank: - print("Registered a single chunk per MPI rank containing its " - "contribution, ready to write content to disk") + print( + "Registered a single chunk per MPI rank containing its " + "contribution, ready to write content to disk" + ) series.flush() if 0 == comm.rank: diff --git a/examples/7_extended_write_serial.py b/examples/7_extended_write_serial.py index a9cdcd291e..06acd20bfc 100755 --- a/examples/7_extended_write_serial.py +++ b/examples/7_extended_write_serial.py @@ -6,8 +6,7 @@ Authors: Axel Huebl, Fabian Koller License: LGPLv3+ """ -from openpmd_api import Series, Access, Dataset, Mesh_Record_Component, \ - Unit_Dimension +from openpmd_api import Series, Access, Dataset, Mesh_Record_Component, Unit_Dimension import json import numpy as np @@ -17,10 +16,7 @@ if __name__ == "__main__": # open file for writing - f = Series( - "working/directory/2D_simData_py.h5", - Access.create - ) + f = Series("working/directory/2D_simData_py.h5", Access.create) # all required openPMD attributes will be set to reasonable default values # (all ones, all zeros, empty strings,...) @@ -33,7 +29,7 @@ f.set_attribute( "custom_attribute_name", "This attribute is manually added and can contain about any datatype " - "you would want" + "you would want", ) # note that removing attributes required by the standard typically makes # the file unusable for post-processing @@ -51,15 +47,17 @@ # this is a reference to an iteration reference = f.iterations[1] - reference.comment = "Modifications to a reference will always be visible" \ - " in the output" + reference.comment = ( + "Modifications to a reference will always be visible" " in the output" + ) del reference # alternatively, a copy may be created and later re-assigned to # f.iterations[1] copy = f.iterations[1] # TODO .copy() - copy.comment = "Modifications to copies will only take effect after you " \ - "reassign the copy" + copy.comment = ( + "Modifications to copies will only take effect after you " "reassign the copy" + ) f.iterations[1] = copy del copy @@ -71,7 +69,9 @@ # https://github.com/openPMD/openPMD-standard/blob/upcoming-1.0.1/STANDARD.md#scalar-vector-and-tensor-records # Meshes are specialized records cur_it.meshes["generic_2D_field"].unit_dimension = { - Unit_Dimension.L: -3, Unit_Dimension.M: 1} + Unit_Dimension.L: -3, + Unit_Dimension.M: 1, + } # as this is a reference, it modifies the original resource lowRez = cur_it.meshes["generic_2D_field"] @@ -85,12 +85,12 @@ # particles are handled very similar electrons = cur_it.particles["electrons"] electrons.set_attribute( - "NoteWorthyParticleSpeciesProperty", - "Observing this species was a blast.") + "NoteWorthyParticleSpeciesProperty", "Observing this species was a blast." + ) electrons["displacement"].unit_dimension = {Unit_Dimension.M: 1} - electrons["displacement"]["x"].unit_SI = 1.e-6 + electrons["displacement"]["x"].unit_SI = 1.0e-6 del electrons["displacement"] - electrons["weighting"][SCALAR].make_constant(1.e-5) + electrons["weighting"][SCALAR].make_constant(1.0e-5) mesh = cur_it.meshes["lowRez_2D_field"] mesh.axis_labels = ["x", "y"] @@ -105,20 +105,11 @@ d = Dataset(partial_mesh.dtype, extent=[2, 5]) dataset_config = { "adios1": { - "dataset": { - "transform": "blosc:compressor=zlib,shuffle=bit,lvl=1;nometa" - } + "dataset": {"transform": "blosc:compressor=zlib,shuffle=bit,lvl=1;nometa"} }, "adios2": { - "dataset": { - "operators": [{ - "type": "zlib", - "parameters": { - "clevel": 9 - } - }] - } - } + "dataset": {"operators": [{"type": "zlib", "parameters": {"clevel": 9}}]} + }, } d.options = json.dumps(dataset_config) mesh["x"].reset_dataset(d) @@ -136,15 +127,12 @@ dset = Dataset(np.dtype("uint64"), extent=[2]) electrons.particle_patches["numParticles"][SCALAR].reset_dataset(dset) - electrons.particle_patches["numParticlesOffset"][SCALAR]. \ - reset_dataset(dset) + electrons.particle_patches["numParticlesOffset"][SCALAR].reset_dataset(dset) dset = Dataset(partial_particlePos.dtype, extent=[2]) - electrons.particle_patches["offset"].unit_dimension = \ - {Unit_Dimension.L: 1} + electrons.particle_patches["offset"].unit_dimension = {Unit_Dimension.L: 1} electrons.particle_patches["offset"]["x"].reset_dataset(dset) - electrons.particle_patches["extent"].unit_dimension = \ - {Unit_Dimension.L: 1} + electrons.particle_patches["extent"].unit_dimension = {Unit_Dimension.L: 1} electrons.particle_patches["extent"]["x"].reset_dataset(dset) # at any point in time you may decide to dump already created output to @@ -154,10 +142,7 @@ # chunked writing of the final dataset at a time is supported # this loop writes one row at a time - mesh_x = np.array([ - [1, 3, 5, 7, 9], - [11, 13, 15, 17, 19] - ]) + mesh_x = np.array([[1, 3, 5, 7, 9], [11, 13, 15, 17, 19]]) particle_position = np.array([0.1, 0.2, 0.3, 0.4], dtype=np.float32) particle_position_offset = [0, 1, 2, 3] for i in [0, 1]: @@ -172,10 +157,10 @@ # resource is returned to the caller for idx in [0, 1]: - partial_particlePos[idx] = particle_position[idx + 2*i] - partial_particleOff[idx] = particle_position_offset[idx + 2*i] + partial_particlePos[idx] = particle_position[idx + 2 * i] + partial_particleOff[idx] = particle_position_offset[idx + 2 * i] - numParticlesOffset = 2*i + numParticlesOffset = 2 * i numParticles = 2 o = numParticlesOffset @@ -184,20 +169,25 @@ electrons["positionOffset"]["x"][o:u] = partial_particleOff electrons.particle_patches["numParticles"][SCALAR].store( - i, np.array([numParticles], dtype=np.uint64)) + i, np.array([numParticles], dtype=np.uint64) + ) electrons.particle_patches["numParticlesOffset"][SCALAR].store( - i, np.array([numParticlesOffset], dtype=np.uint64)) + i, np.array([numParticlesOffset], dtype=np.uint64) + ) electrons.particle_patches["offset"]["x"].store( - i, - np.array([particle_position[numParticlesOffset]], - dtype=np.float32)) + i, np.array([particle_position[numParticlesOffset]], dtype=np.float32) + ) electrons.particle_patches["extent"]["x"].store( i, - np.array([ - particle_position[numParticlesOffset + numParticles - 1] - - particle_position[numParticlesOffset] - ], dtype=np.float32)) + np.array( + [ + particle_position[numParticlesOffset + numParticles - 1] + - particle_position[numParticlesOffset] + ], + dtype=np.float32, + ), + ) mesh["y"].reset_dataset(d) mesh["y"].unit_SI = 4 diff --git a/examples/9_particle_write_serial.py b/examples/9_particle_write_serial.py index 4d96c83592..e33c08a63a 100755 --- a/examples/9_particle_write_serial.py +++ b/examples/9_particle_write_serial.py @@ -6,8 +6,7 @@ Authors: Axel Huebl License: LGPLv3+ """ -from openpmd_api import Series, Access, Dataset, Mesh_Record_Component, \ - Unit_Dimension +from openpmd_api import Series, Access, Dataset, Mesh_Record_Component, Unit_Dimension import numpy as np @@ -16,10 +15,7 @@ if __name__ == "__main__": # open file for writing - f = Series( - "../samples/7_particle_write_serial_py.h5", - Access.create - ) + f = Series("../samples/7_particle_write_serial_py.h5", Access.create) # all required openPMD attributes will be set to reasonable default values # (all ones, all zeros, empty strings,...) @@ -33,19 +29,19 @@ # particles electrons = cur_it.particles["electrons"] electrons.set_attribute( - "Electrons... the necessary evil for ion acceleration! ", - "Just kidding.") + "Electrons... the necessary evil for ion acceleration! ", "Just kidding." + ) # let's set a weird user-defined record this time electrons["displacement"].unit_dimension = {Unit_Dimension.M: 1} - electrons["displacement"][SCALAR].unit_SI = 1.e-6 + electrons["displacement"][SCALAR].unit_SI = 1.0e-6 dset = Dataset(np.dtype("float64"), extent=[2]) electrons["displacement"][SCALAR].reset_dataset(dset) electrons["displacement"][SCALAR].make_constant(42.43) # don't like it anymore? remove it with: # del electrons["displacement"] - electrons["weighting"][SCALAR].make_constant(1.e-5) + electrons["weighting"][SCALAR].make_constant(1.0e-5) particlePos_x = np.random.rand(234).astype(np.float32) particlePos_y = np.random.rand(234).astype(np.float32) diff --git a/setup.py b/setup.py index 34973eea4f..84c00db7d3 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,7 @@ class CMakeExtension(Extension): - def __init__(self, name, sourcedir=''): + def __init__(self, name, sourcedir=""): Extension.__init__(self, name, sources=[]) self.sourcedir = os.path.abspath(sourcedir) @@ -18,174 +18,167 @@ def __init__(self, name, sourcedir=''): class CMakeBuild(build_ext): def run(self): try: - out = subprocess.check_output(['cmake', '--version']) + out = subprocess.check_output(["cmake", "--version"]) except OSError: raise RuntimeError( - "CMake 3.15.0+ must be installed to build the following " + - "extensions: " + - ", ".join(e.name for e in self.extensions)) - - cmake_version = LooseVersion(re.search( - r'version\s*([\d.]+)', - out.decode() - ).group(1)) - if cmake_version < '3.15.0': + "CMake 3.15.0+ must be installed to build the following " + + "extensions: " + + ", ".join(e.name for e in self.extensions) + ) + + cmake_version = LooseVersion( + re.search(r"version\s*([\d.]+)", out.decode()).group(1) + ) + if cmake_version < "3.15.0": raise RuntimeError("CMake >= 3.15.0 is required") for ext in self.extensions: self.build_extension(ext) def build_extension(self, ext): - extdir = os.path.abspath(os.path.dirname( - self.get_ext_fullpath(ext.name) - )) + extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) # required for auto-detection of auxiliary "native" libs if not extdir.endswith(os.path.sep): extdir += os.path.sep cmake_args = [ - '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + - os.path.join(extdir, "openpmd_api"), + "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + os.path.join(extdir, "openpmd_api"), # '-DCMAKE_RUNTIME_OUTPUT_DIRECTORY=' + extdir, - '-DCMAKE_PYTHON_OUTPUT_DIRECTORY=' + extdir, - '-DPython_EXECUTABLE=' + sys.executable, - '-DopenPMD_USE_PYTHON:BOOL=ON', + "-DCMAKE_PYTHON_OUTPUT_DIRECTORY=" + extdir, + "-DPython_EXECUTABLE=" + sys.executable, + "-DopenPMD_USE_PYTHON:BOOL=ON", # variants - '-DopenPMD_USE_MPI:BOOL=' + openPMD_USE_MPI, + "-DopenPMD_USE_MPI:BOOL=" + openPMD_USE_MPI, # skip building cli tools, examples & tests # note: CLI tools provided as console scripts - '-DopenPMD_BUILD_CLI_TOOLS:BOOL=OFF', - '-DopenPMD_BUILD_EXAMPLES:BOOL=' + BUILD_EXAMPLES, - '-DopenPMD_BUILD_TESTING:BOOL=' + BUILD_TESTING, + "-DopenPMD_BUILD_CLI_TOOLS:BOOL=OFF", + "-DopenPMD_BUILD_EXAMPLES:BOOL=" + BUILD_EXAMPLES, + "-DopenPMD_BUILD_TESTING:BOOL=" + BUILD_TESTING, # static/shared libs - '-DopenPMD_BUILD_SHARED_LIBS:BOOL=' + BUILD_SHARED_LIBS, - '-DHDF5_USE_STATIC_LIBRARIES:BOOL=' + HDF5_USE_STATIC_LIBRARIES, - '-DADIOS_USE_STATIC_LIBS:BOOL=' + ADIOS_USE_STATIC_LIBS, + "-DopenPMD_BUILD_SHARED_LIBS:BOOL=" + BUILD_SHARED_LIBS, + "-DHDF5_USE_STATIC_LIBRARIES:BOOL=" + HDF5_USE_STATIC_LIBRARIES, + "-DADIOS_USE_STATIC_LIBS:BOOL=" + ADIOS_USE_STATIC_LIBS, # Unix: rpath to current dir when packaged # needed for shared (here non-default) builds and ADIOS1 # wrapper libraries - '-DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=ON', - '-DCMAKE_INSTALL_RPATH_USE_LINK_PATH:BOOL=OFF', + "-DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=ON", + "-DCMAKE_INSTALL_RPATH_USE_LINK_PATH:BOOL=OFF", # Windows: has no RPath concept, all `.dll`s must be in %PATH% # or same dir as calling executable ] if CMAKE_INTERPROCEDURAL_OPTIMIZATION is not None: - cmake_args.append('-DCMAKE_INTERPROCEDURAL_OPTIMIZATION=' + - CMAKE_INTERPROCEDURAL_OPTIMIZATION) + cmake_args.append( + "-DCMAKE_INTERPROCEDURAL_OPTIMIZATION=" + + CMAKE_INTERPROCEDURAL_OPTIMIZATION + ) if sys.platform == "darwin": - cmake_args.append('-DCMAKE_INSTALL_RPATH=@loader_path') + cmake_args.append("-DCMAKE_INSTALL_RPATH=@loader_path") else: # values: linux*, aix, freebsd, ... # just as well win32 & cygwin (although Windows has no RPaths) - cmake_args.append('-DCMAKE_INSTALL_RPATH=$ORIGIN') + cmake_args.append("-DCMAKE_INSTALL_RPATH=$ORIGIN") - cfg = 'Debug' if self.debug else 'Release' - build_args = ['--config', cfg] + cfg = "Debug" if self.debug else "Release" + build_args = ["--config", cfg] if platform.system() == "Windows": cmake_args += [ - '-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format( - cfg.upper(), - os.path.join(extdir, "openpmd_api") + "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}".format( + cfg.upper(), os.path.join(extdir, "openpmd_api") ) ] - if sys.maxsize > 2**32: - cmake_args += ['-A', 'x64'] - build_args += ['--', '/m'] + if sys.maxsize > 2 ** 32: + cmake_args += ["-A", "x64"] + build_args += ["--", "/m"] else: - cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg] - build_args += ['--', '-j2'] + cmake_args += ["-DCMAKE_BUILD_TYPE=" + cfg] + build_args += ["--", "-j2"] env = os.environ.copy() - env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format( - env.get('CXXFLAGS', ''), - self.distribution.get_version() + env["CXXFLAGS"] = '{} -DVERSION_INFO=\\"{}\\"'.format( + env.get("CXXFLAGS", ""), self.distribution.get_version() ) if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) subprocess.check_call( - ['cmake', ext.sourcedir] + cmake_args, - cwd=self.build_temp, - env=env + ["cmake", ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env ) subprocess.check_call( - ['cmake', '--build', '.'] + build_args, - cwd=self.build_temp + ["cmake", "--build", "."] + build_args, cwd=self.build_temp ) # note that this does not call install; # we pick up artifacts directly from the build output dirs -with open('./README.md', encoding='utf-8') as f: +with open("./README.md", encoding="utf-8") as f: long_description = f.read() # Allow to control options via environment vars. # Work-around for https://github.com/pypa/setuptools/issues/1712 # note: changed default for SHARED, MPI, TESTING and EXAMPLES -openPMD_USE_MPI = os.environ.get('openPMD_USE_MPI', 'OFF') -HDF5_USE_STATIC_LIBRARIES = os.environ.get('HDF5_USE_STATIC_LIBRARIES', 'OFF') -ADIOS_USE_STATIC_LIBS = os.environ.get('ADIOS_USE_STATIC_LIBS', 'OFF') +openPMD_USE_MPI = os.environ.get("openPMD_USE_MPI", "OFF") +HDF5_USE_STATIC_LIBRARIES = os.environ.get("HDF5_USE_STATIC_LIBRARIES", "OFF") +ADIOS_USE_STATIC_LIBS = os.environ.get("ADIOS_USE_STATIC_LIBS", "OFF") # deprecated: backwards compatibility to <= 0.13.* -BUILD_SHARED_LIBS = os.environ.get('BUILD_SHARED_LIBS', 'OFF') -BUILD_TESTING = os.environ.get('BUILD_TESTING', 'OFF') -BUILD_EXAMPLES = os.environ.get('BUILD_EXAMPLES', 'OFF') +BUILD_SHARED_LIBS = os.environ.get("BUILD_SHARED_LIBS", "OFF") +BUILD_TESTING = os.environ.get("BUILD_TESTING", "OFF") +BUILD_EXAMPLES = os.environ.get("BUILD_EXAMPLES", "OFF") # end deprecated -BUILD_SHARED_LIBS = os.environ.get('openPMD_BUILD_SHARED_LIBS', - BUILD_SHARED_LIBS) -BUILD_TESTING = os.environ.get('openPMD_BUILD_TESTING', - BUILD_TESTING) -BUILD_EXAMPLES = os.environ.get('openPMD_BUILD_EXAMPLES', - BUILD_EXAMPLES) +BUILD_SHARED_LIBS = os.environ.get("openPMD_BUILD_SHARED_LIBS", BUILD_SHARED_LIBS) +BUILD_TESTING = os.environ.get("openPMD_BUILD_TESTING", BUILD_TESTING) +BUILD_EXAMPLES = os.environ.get("openPMD_BUILD_EXAMPLES", BUILD_EXAMPLES) CMAKE_INTERPROCEDURAL_OPTIMIZATION = os.environ.get( - 'CMAKE_INTERPROCEDURAL_OPTIMIZATION', None) + "CMAKE_INTERPROCEDURAL_OPTIMIZATION", None +) # https://cmake.org/cmake/help/v3.0/command/if.html -if openPMD_USE_MPI.upper() in ['1', 'ON', 'TRUE', 'YES']: +if openPMD_USE_MPI.upper() in ["1", "ON", "TRUE", "YES"]: openPMD_USE_MPI = "ON" else: openPMD_USE_MPI = "OFF" # Get the package requirements from the requirements.txt file -with open('./requirements.txt') as f: - install_requires = [line.strip('\n') for line in f.readlines()] +with open("./requirements.txt") as f: + install_requires = [line.strip("\n") for line in f.readlines()] if openPMD_USE_MPI == "ON": - install_requires.append('mpi4py>=2.1.0') + install_requires.append("mpi4py>=2.1.0") # keyword reference: # https://packaging.python.org/guides/distributing-packages-using-setuptools setup( - name='openPMD-api', + name="openPMD-api", # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version='0.15.0.dev', - author='Axel Huebl, Franz Poeschel, Fabian Koller, Junmin Gu', - author_email='axelhuebl@lbl.gov, f.poeschel@hzdr.de', - maintainer='Axel Huebl', - maintainer_email='axelhuebl@lbl.gov', - description='C++ & Python API for Scientific I/O with openPMD', + version="0.15.0.dev", + author="Axel Huebl, Franz Poeschel, Fabian Koller, Junmin Gu", + author_email="axelhuebl@lbl.gov, f.poeschel@hzdr.de", + maintainer="Axel Huebl", + maintainer_email="axelhuebl@lbl.gov", + description="C++ & Python API for Scientific I/O with openPMD", long_description=long_description, - long_description_content_type='text/markdown', - keywords=('openPMD openscience hdf5 adios mpi hpc research ' - 'file-format file-handling'), - url='https://www.openPMD.org', + long_description_content_type="text/markdown", + keywords=( + "openPMD openscience hdf5 adios mpi hpc research " "file-format file-handling" + ), + url="https://www.openPMD.org", project_urls={ - 'Documentation': 'https://openpmd-api.readthedocs.io', - 'Doxygen': 'https://www.openpmd.org/openPMD-api', - 'Reference': 'https://doi.org/10.14278/rodare.27', - 'Source': 'https://github.com/openPMD/openPMD-api', - 'Tracker': 'https://github.com/openPMD/openPMD-api/issues', + "Documentation": "https://openpmd-api.readthedocs.io", + "Doxygen": "https://www.openpmd.org/openPMD-api", + "Reference": "https://doi.org/10.14278/rodare.27", + "Source": "https://github.com/openPMD/openPMD-api", + "Tracker": "https://github.com/openPMD/openPMD-api/issues", }, - ext_modules=[CMakeExtension('openpmd_api_cxx')], + ext_modules=[CMakeExtension("openpmd_api_cxx")], cmdclass=dict(build_ext=CMakeBuild), # scripts=['openpmd-ls'], zip_safe=False, - python_requires='>=3.6, <3.11', + python_requires=">=3.6, <3.11", # tests_require=['pytest'], install_requires=install_requires, # see: src/bindings/python/cli entry_points={ - 'console_scripts': [ - 'openpmd-ls = openpmd_api.ls.__main__:main', - 'openpmd-pipe = openpmd_api.pipe.__main__:main' + "console_scripts": [ + "openpmd-ls = openpmd_api.ls.__main__:main", + "openpmd-pipe = openpmd_api.pipe.__main__:main", ] }, # we would like to use this mechanism, but pip / setuptools do not @@ -198,21 +191,23 @@ def build_extension(self, ext): # cmdclass={'test': PyTest}, # platforms='any', classifiers=[ - 'Development Status :: 4 - Beta', - 'Natural Language :: English', - 'Environment :: Console', - 'Intended Audience :: Science/Research', - 'Operating System :: OS Independent', - 'Topic :: Scientific/Engineering', - 'Topic :: Database :: Front-Ends', - 'Programming Language :: C++', - 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.6', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', - 'Programming Language :: Python :: 3.10', - ('License :: OSI Approved :: ' - 'GNU Lesser General Public License v3 or later (LGPLv3+)'), + "Development Status :: 4 - Beta", + "Natural Language :: English", + "Environment :: Console", + "Intended Audience :: Science/Research", + "Operating System :: OS Independent", + "Topic :: Scientific/Engineering", + "Topic :: Database :: Front-Ends", + "Programming Language :: C++", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + ( + "License :: OSI Approved :: " + "GNU Lesser General Public License v3 or later (LGPLv3+)" + ), ], ) diff --git a/src/binding/python/openpmd_api/DaskArray.py b/src/binding/python/openpmd_api/DaskArray.py index 1cb4260700..4cde297c8b 100644 --- a/src/binding/python/openpmd_api/DaskArray.py +++ b/src/binding/python/openpmd_api/DaskArray.py @@ -7,8 +7,10 @@ """ import math import numpy as np + try: from dask.array import from_array + found_dask = True except ImportError: found_dask = False @@ -79,8 +81,7 @@ def record_component_to_daskarray(record_component): dask.array : the (potentially distributed) array object created here """ if not found_dask: - raise ImportError("dask NOT found. Install dask for Dask DataFrame " - "support.") + raise ImportError("dask NOT found. Install dask for Dask DataFrame " "support.") # get optimal chunks chunks = record_component.available_chunks() diff --git a/src/binding/python/openpmd_api/DaskDataFrame.py b/src/binding/python/openpmd_api/DaskDataFrame.py index 7d0fb9204c..dba0b1f8e0 100644 --- a/src/binding/python/openpmd_api/DaskDataFrame.py +++ b/src/binding/python/openpmd_api/DaskDataFrame.py @@ -6,21 +6,24 @@ License: LGPLv3+ """ import numpy as np + try: import dask.dataframe as dd from dask.delayed import delayed + found_dask = True except ImportError: found_dask = False try: import pandas # noqa + found_pandas = True except ImportError: found_pandas = False def read_chunk_to_df(species, chunk): - stride = np.s_[chunk.offset[0]:chunk.offset[0]+chunk.extent[0]] + stride = np.s_[chunk.offset[0] : chunk.offset[0] + chunk.extent[0]] return species.to_df(stride) @@ -51,11 +54,9 @@ def particles_to_daskdataframe(particle_species): dask.dataframe : the central dataframe object created here """ if not found_dask: - raise ImportError("dask NOT found. Install dask for Dask DataFrame " - "support.") + raise ImportError("dask NOT found. Install dask for Dask DataFrame " "support.") if not found_pandas: # catch this early: before delayed functions - raise ImportError("pandas NOT found. Install pandas for DataFrame " - "support.") + raise ImportError("pandas NOT found. Install pandas for DataFrame " "support.") # get optimal chunks: query first non-constant record component and # assume the same chunking applies for all of them @@ -80,9 +81,7 @@ def particles_to_daskdataframe(particle_species): break # merge DataFrames - dfs = [ - delayed(read_chunk_to_df)(particle_species, chunk) for chunk in chunks - ] + dfs = [delayed(read_chunk_to_df)(particle_species, chunk) for chunk in chunks] df = dd.from_delayed(dfs) return df diff --git a/src/binding/python/openpmd_api/DataFrame.py b/src/binding/python/openpmd_api/DataFrame.py index 55e14fcafb..c6a952ea6f 100644 --- a/src/binding/python/openpmd_api/DataFrame.py +++ b/src/binding/python/openpmd_api/DataFrame.py @@ -7,8 +7,10 @@ """ import math import numpy as np + try: import pandas as pd + found_pandas = True except ImportError: found_pandas = False @@ -45,8 +47,7 @@ def particles_to_dataframe(particle_species, slice=None): pandas.DataFrame : the central dataframe object created here """ if not found_pandas: - raise ImportError("pandas NOT found. Install pandas for DataFrame " - "support.") + raise ImportError("pandas NOT found. Install pandas for DataFrame " "support.") if slice is None: slice = np.s_[()] @@ -61,7 +62,6 @@ def particles_to_dataframe(particle_species, slice=None): columns[column_name] = rc[slice] particle_species.series_flush() if not math.isclose(1.0, rc.unit_SI): - columns[column_name] = np.multiply( - columns[column_name], rc.unit_SI) + columns[column_name] = np.multiply(columns[column_name], rc.unit_SI) return pd.DataFrame(columns) diff --git a/src/binding/python/openpmd_api/ls/__main__.py b/src/binding/python/openpmd_api/ls/__main__.py index 2a4007755e..fb0b5547a3 100644 --- a/src/binding/python/openpmd_api/ls/__main__.py +++ b/src/binding/python/openpmd_api/ls/__main__.py @@ -13,7 +13,7 @@ def main(): - """ for usage documentation, call this with a --help argument """ + """for usage documentation, call this with a --help argument""" return _ls_run(sys.argv) diff --git a/src/binding/python/openpmd_api/pipe/__main__.py b/src/binding/python/openpmd_api/pipe/__main__.py index 50392344d9..0069cc13d4 100755 --- a/src/binding/python/openpmd_api/pipe/__main__.py +++ b/src/binding/python/openpmd_api/pipe/__main__.py @@ -16,6 +16,7 @@ # MPI is an optional dependency try: from mpi4py import MPI + HAVE_MPI = True except ImportError: HAVE_MPI = False @@ -51,18 +52,19 @@ def parse_args(program_name): --outfile simData_%T.bp {0} --infile uncompressed.bp \\ --outfile compressed.bp --outconfig @compressionConfig.json -""".format(os.path.basename(program_name))) - - parser.add_argument('--infile', type=str, help='In file') - parser.add_argument('--outfile', type=str, help='Out file') - parser.add_argument('--inconfig', - type=str, - default='{}', - help='JSON config for the in file') - parser.add_argument('--outconfig', - type=str, - default='{}', - help='JSON config for the out file') +""".format( + os.path.basename(program_name) + ), + ) + + parser.add_argument("--infile", type=str, help="In file") + parser.add_argument("--outfile", type=str, help="Out file") + parser.add_argument( + "--inconfig", type=str, default="{}", help="JSON config for the in file" + ) + parser.add_argument( + "--outconfig", type=str, default="{}", help="JSON config for the out file" + ) return parser.parse_args() @@ -72,8 +74,9 @@ class Chunk: A Chunk is an n-dimensional hypercube, defined by an offset and an extent. Offset and extent must be of the same dimensionality (Chunk.__len__). """ + def __init__(self, offset, extent): - assert (len(offset) == len(extent)) + assert len(offset) == len(extent) self.offset = offset self.extent = extent @@ -95,9 +98,9 @@ def slice1D(self, mpi_rank, mpi_size, dimension=None): for k, v in enumerate(self.extent): if v > maximum: dimension = k - assert (dimension < len(self)) + assert dimension < len(self) # no offset - assert (self.offset == [0 for _ in range(len(self))]) + assert self.offset == [0 for _ in range(len(self))] offset = [0 for _ in range(len(self))] stride = self.extent[dimension] // mpi_size rest = self.extent[dimension] % mpi_size @@ -147,6 +150,7 @@ class particle_patch_load: read from the sink. This class stores the needed parameters to .store(). """ + def __init__(self, data, dest): self.data = data self.dest = dest @@ -160,6 +164,7 @@ class pipe: """ Represents the configuration of one "pipe" pass. """ + def __init__(self, infile, outfile, inconfig, outconfig, comm): self.infile = infile self.outfile = outfile @@ -172,23 +177,23 @@ def run(self): if self.comm.size == 1: print("Opening data source") sys.stdout.flush() - inseries = io.Series(self.infile, io.Access.read_only, - self.inconfig) + inseries = io.Series(self.infile, io.Access.read_only, self.inconfig) print("Opening data sink") sys.stdout.flush() - outseries = io.Series(self.outfile, io.Access.create, - self.outconfig) + outseries = io.Series(self.outfile, io.Access.create, self.outconfig) print("Opened input and output") sys.stdout.flush() else: print("Opening data source on rank {}.".format(self.comm.rank)) sys.stdout.flush() - inseries = io.Series(self.infile, io.Access.read_only, self.comm, - self.inconfig) + inseries = io.Series( + self.infile, io.Access.read_only, self.comm, self.inconfig + ) print("Opening data sink on rank {}.".format(self.comm.rank)) sys.stdout.flush() - outseries = io.Series(self.outfile, io.Access.create, self.comm, - self.outconfig) + outseries = io.Series( + self.outfile, io.Access.create, self.comm, self.outconfig + ) print("Opened input and output on rank {}.".format(self.comm.rank)) sys.stdout.flush() self.__copy(inseries, outseries) @@ -199,18 +204,18 @@ def __copy(self, src, dest, current_path="/data/"): Copies data from src to dest. May represent any point in the openPMD hierarchy, but src and dest must both represent the same layer. """ - if (type(src) != type(dest) - and not isinstance(src, io.IndexedIteration) - and not isinstance(dest, io.Iteration)): - raise RuntimeError( - "Internal error: Trying to copy mismatching types") + if ( + type(src) != type(dest) + and not isinstance(src, io.IndexedIteration) + and not isinstance(dest, io.Iteration) + ): + raise RuntimeError("Internal error: Trying to copy mismatching types") attribute_dtypes = src.attribute_dtypes # The following attributes are written automatically by openPMD-api # and should not be manually overwritten here ignored_attributes = { - io.Series: - ["basePath", "iterationEncoding", "iterationFormat", "openPMD"], - io.Iteration: ["snapshot"] + io.Series: ["basePath", "iterationEncoding", "iterationFormat", "openPMD"], + io.Iteration: ["snapshot"], } for key in src.attributes: ignore_this_attribute = False @@ -224,24 +229,32 @@ def __copy(self, src, dest, current_path="/data/"): attr_type = attribute_dtypes[key] dest.set_attribute(key, attr, attr_type) container_types = [ - io.Mesh_Container, io.Particle_Container, io.ParticleSpecies, - io.Record, io.Mesh, io.Particle_Patches, io.Patch_Record + io.Mesh_Container, + io.Particle_Container, + io.ParticleSpecies, + io.Record, + io.Mesh, + io.Particle_Patches, + io.Patch_Record, ] if isinstance(src, io.Series): # main loop: read iterations of src, write to dest write_iterations = dest.write_iterations() for in_iteration in src.read_iterations(): if self.comm.rank == 0: - print("Iteration {0} contains {1} meshes:".format( - in_iteration.iteration_index, - len(in_iteration.meshes))) + print( + "Iteration {0} contains {1} meshes:".format( + in_iteration.iteration_index, len(in_iteration.meshes) + ) + ) for m in in_iteration.meshes: print("\t {0}".format(m)) print("") print( "Iteration {0} contains {1} particle species:".format( - in_iteration.iteration_index, - len(in_iteration.particles))) + in_iteration.iteration_index, len(in_iteration.particles) + ) + ) for ps in in_iteration.particles: print("\t {0}".format(ps)) print("With records:") @@ -251,12 +264,16 @@ def __copy(self, src, dest, current_path="/data/"): sys.stdout.flush() self.__particle_patches = [] self.__copy( - in_iteration, out_iteration, - current_path + str(in_iteration.iteration_index) + "/") + in_iteration, + out_iteration, + current_path + str(in_iteration.iteration_index) + "/", + ) for deferred in self.loads: deferred.source.load_chunk( - deferred.dynamicView.current_buffer(), deferred.offset, - deferred.extent) + deferred.dynamicView.current_buffer(), + deferred.offset, + deferred.extent, + ) in_iteration.close() for patch_load in self.__particle_patches: patch_load.run() @@ -282,26 +299,29 @@ def __copy(self, src, dest, current_path="/data/"): end = local_chunk.offset.copy() for i in range(len(end)): end[i] += local_chunk.extent[i] - print("{}\t{}/{}:\t{} -- {}".format( - current_path, self.comm.rank, self.comm.size, - local_chunk.offset, end)) + print( + "{}\t{}/{}:\t{} -- {}".format( + current_path, + self.comm.rank, + self.comm.size, + local_chunk.offset, + end, + ) + ) span = dest.store_chunk(local_chunk.offset, local_chunk.extent) self.loads.append( - deferred_load(src, span, local_chunk.offset, - local_chunk.extent)) + deferred_load(src, span, local_chunk.offset, local_chunk.extent) + ) elif isinstance(src, io.Patch_Record_Component): dest.reset_dataset(io.Dataset(src.dtype, src.shape)) if self.comm.rank == 0: - self.__particle_patches.append( - particle_patch_load(src.load(), dest)) + self.__particle_patches.append(particle_patch_load(src.load(), dest)) elif isinstance(src, io.Iteration): self.__copy(src.meshes, dest.meshes, current_path + "meshes/") - self.__copy(src.particles, dest.particles, - current_path + "particles/") - elif any([ - isinstance(src, container_type) - for container_type in container_types - ]): + self.__copy(src.particles, dest.particles, current_path + "particles/") + elif any( + [isinstance(src, container_type) for container_type in container_types] + ): for key in src: self.__copy(src[key], dest[key], current_path + key + "/") if isinstance(src, io.ParticleSpecies): @@ -315,12 +335,18 @@ def main(): if not args.infile or not args.outfile: print("Please specify parameters --infile and --outfile.") sys.exit(1) - if (HAVE_MPI): - run_pipe = pipe(args.infile, args.outfile, args.inconfig, - args.outconfig, MPI.COMM_WORLD) + if HAVE_MPI: + run_pipe = pipe( + args.infile, args.outfile, args.inconfig, args.outconfig, MPI.COMM_WORLD + ) else: - run_pipe = pipe(args.infile, args.outfile, args.inconfig, - args.outconfig, FallbackMPICommunicator()) + run_pipe = pipe( + args.infile, + args.outfile, + args.inconfig, + args.outconfig, + FallbackMPICommunicator(), + ) run_pipe.run() diff --git a/test/python/unittest/API/APITest.py b/test/python/unittest/API/APITest.py index 8c314b7a00..ecf041ab94 100644 --- a/test/python/unittest/API/APITest.py +++ b/test/python/unittest/API/APITest.py @@ -13,8 +13,10 @@ import shutil import unittest import ctypes + try: import numpy as np + found_numpy = True print("numpy version: ", np.__version__) except ImportError: @@ -24,34 +26,35 @@ from TestUtilities.TestUtilities import generateTestFilePath tested_file_extensions = [ - ext for ext in io.file_extensions if ext != 'sst' and ext != 'ssc' + ext for ext in io.file_extensions if ext != "sst" and ext != "ssc" ] class APITest(unittest.TestCase): - """ Test class testing the openPMD python API (plus some IO). """ + """Test class testing the openPMD python API (plus some IO).""" @classmethod def setUpClass(cls): - """ Setting up the test class. """ + """Setting up the test class.""" pass @classmethod def tearDownClass(cls): - """ Tearing down the test class. """ + """Tearing down the test class.""" pass def setUp(self): - """ Setting up a test. """ + """Setting up a test.""" self.__files_to_remove = [] self.__dirs_to_remove = [] path_to_field_data = generateTestFilePath( - os.path.join("issue-sample", "no_particles", "data%T.h5")) + os.path.join("issue-sample", "no_particles", "data%T.h5") + ) path_to_particle_data = generateTestFilePath( - os.path.join("issue-sample", "no_fields", "data%T.h5")) - path_to_data = generateTestFilePath( - os.path.join("git-sample", "data%T.h5")) + os.path.join("issue-sample", "no_fields", "data%T.h5") + ) + path_to_data = generateTestFilePath(os.path.join("git-sample", "data%T.h5")) mode = io.Access.read_only self.__field_series = io.Series(path_to_field_data, mode) self.__particle_series = io.Series(path_to_particle_data, mode) @@ -67,7 +70,7 @@ def setUp(self): # assert io.__author__ != "" def tearDown(self): - """ Tearing down a test. """ + """Tearing down a test.""" for f in self.__files_to_remove: if os.path.isfile(f): os.remove(f) @@ -80,7 +83,7 @@ def tearDown(self): del self.__series def testFieldData(self): - """ Testing serial IO on a pure field dataset. """ + """Testing serial IO on a pure field dataset.""" # Get reference to series stored on test case. series = self.__field_series @@ -103,7 +106,7 @@ def testFieldData(self): self.assertEqual(len(i.particles), 0) def testParticleData(self): - """ Testing serial IO on a pure particle dataset. """ + """Testing serial IO on a pure particle dataset.""" # Get reference to series stored on test case. series = self.__field_series @@ -123,10 +126,7 @@ def testParticleData(self): def attributeRoundTrip(self, file_ending): # write - series = io.Series( - "unittest_py_API." + file_ending, - io.Access.create - ) + series = io.Series("unittest_py_API." + file_ending, io.Access.create) # meta data series.set_software("nonsense") # with unspecified version @@ -135,7 +135,7 @@ def attributeRoundTrip(self, file_ending): series.machine = "testMachine" # write one of each supported types - series.set_attribute("char", 'c') # string + series.set_attribute("char", "c") # string series.set_attribute("pyint", 13) series.set_attribute("pyfloat", 3.1416) series.set_attribute("pystring", "howdy!") @@ -144,10 +144,42 @@ def attributeRoundTrip(self, file_ending): series.set_attribute("pybool", False) # array of ... - series.set_attribute("arr_pyint", (13, 26, 39, 52, )) - series.set_attribute("arr_pyfloat", (1.2, 3.4, 4.5, 5.6, )) - series.set_attribute("arr_pystring", ("x", "y", "z", "www", )) - series.set_attribute("arr_pybool", (False, True, True, False, )) + series.set_attribute( + "arr_pyint", + ( + 13, + 26, + 39, + 52, + ), + ) + series.set_attribute( + "arr_pyfloat", + ( + 1.2, + 3.4, + 4.5, + 5.6, + ), + ) + series.set_attribute( + "arr_pystring", + ( + "x", + "y", + "z", + "www", + ), + ) + series.set_attribute( + "arr_pybool", + ( + False, + True, + True, + False, + ), + ) # list of ... series.set_attribute("l_pyint", [13, 26, 39, 52]) series.set_attribute("l_pyfloat", [1.2, 3.4, 4.5, 5.6]) @@ -164,24 +196,67 @@ def attributeRoundTrip(self, file_ending): series.set_attribute("single", np.single(1.234)) series.set_attribute("double", np.double(1.234567)) series.set_attribute("longdouble", np.longdouble(1.23456789)) - series.set_attribute("csingle", np.complex64(1.+2.j)) - series.set_attribute("cdouble", np.complex128(3.+4.j)) + series.set_attribute("csingle", np.complex64(1.0 + 2.0j)) + series.set_attribute("cdouble", np.complex128(3.0 + 4.0j)) if file_ending != "bp": - series.set_attribute("clongdouble", np.clongdouble(5.+6.j)) + series.set_attribute("clongdouble", np.clongdouble(5.0 + 6.0j)) # array of ... - series.set_attribute("arr_int16", (np.int16(23), np.int16(26), )) - series.set_attribute("arr_int32", (np.int32(34), np.int32(37), )) - series.set_attribute("arr_int64", (np.int64(45), np.int64(48), )) - series.set_attribute("arr_uint16", - (np.uint16(23), np.uint16(26), )) - series.set_attribute("arr_uint32", - (np.uint32(34), np.uint32(37), )) - series.set_attribute("arr_uint64", - (np.uint64(45), np.uint64(48), )) - series.set_attribute("arr_single", - (np.single(5.6), np.single(5.9), )) - series.set_attribute("arr_double", - (np.double(6.7), np.double(7.1), )) + series.set_attribute( + "arr_int16", + ( + np.int16(23), + np.int16(26), + ), + ) + series.set_attribute( + "arr_int32", + ( + np.int32(34), + np.int32(37), + ), + ) + series.set_attribute( + "arr_int64", + ( + np.int64(45), + np.int64(48), + ), + ) + series.set_attribute( + "arr_uint16", + ( + np.uint16(23), + np.uint16(26), + ), + ) + series.set_attribute( + "arr_uint32", + ( + np.uint32(34), + np.uint32(37), + ), + ) + series.set_attribute( + "arr_uint64", + ( + np.uint64(45), + np.uint64(48), + ), + ) + series.set_attribute( + "arr_single", + ( + np.single(5.6), + np.single(5.9), + ), + ) + series.set_attribute( + "arr_double", + ( + np.double(6.7), + np.double(7.1), + ), + ) # list of ... series.set_attribute("l_int16", [np.int16(23), np.int16(26)]) series.set_attribute("l_int32", [np.int32(34), np.int32(37)]) @@ -191,8 +266,9 @@ def attributeRoundTrip(self, file_ending): series.set_attribute("l_uint64", [np.uint64(45), np.uint64(48)]) series.set_attribute("l_single", [np.single(5.6), np.single(5.9)]) series.set_attribute("l_double", [np.double(6.7), np.double(7.1)]) - series.set_attribute("l_longdouble", - [np.longdouble(7.8e9), np.longdouble(8.2e3)]) + series.set_attribute( + "l_longdouble", [np.longdouble(7.8e9), np.longdouble(8.2e3)] + ) # TODO: ComplexWarning: Casting complex values to real discards the # imaginary part # series.set_attribute("l_csingle", @@ -207,33 +283,32 @@ def attributeRoundTrip(self, file_ending): # np.clongfloat(8.2e3-9.1e3j)]) # numpy.array of ... - series.set_attribute("nparr_int16", - np.array([234, 567], dtype=np.int16)) - series.set_attribute("nparr_int32", - np.array([456, 789], dtype=np.int32)) - series.set_attribute("nparr_int64", - np.array([678, 901], dtype=np.int64)) - series.set_attribute("nparr_single", - np.array([1.2, 2.3], dtype=np.single)) - series.set_attribute("nparr_double", - np.array([4.5, 6.7], dtype=np.double)) - series.set_attribute("nparr_longdouble", - np.array([8.9, 7.6], dtype=np.longdouble)) + series.set_attribute("nparr_int16", np.array([234, 567], dtype=np.int16)) + series.set_attribute("nparr_int32", np.array([456, 789], dtype=np.int32)) + series.set_attribute("nparr_int64", np.array([678, 901], dtype=np.int64)) + series.set_attribute("nparr_single", np.array([1.2, 2.3], dtype=np.single)) + series.set_attribute("nparr_double", np.array([4.5, 6.7], dtype=np.double)) + series.set_attribute( + "nparr_longdouble", np.array([8.9, 7.6], dtype=np.longdouble) + ) # note: looks like ADIOS 1.13.1 cannot write arrays of complex # as attributes (writes 1st value for single and crashes # in write for complex double) # https://github.com/ornladios/ADIOS/issues/212 if series.backend != "ADIOS1": - series.set_attribute("nparr_csingle", - np.array([1.2 - 0.3j, 2.3 + 4.2j], - dtype=np.complex64)) - series.set_attribute("nparr_cdouble", - np.array([4.5 + 1.1j, 6.7 - 2.2j], - dtype=np.complex128)) + series.set_attribute( + "nparr_csingle", + np.array([1.2 - 0.3j, 2.3 + 4.2j], dtype=np.complex64), + ) + series.set_attribute( + "nparr_cdouble", + np.array([4.5 + 1.1j, 6.7 - 2.2j], dtype=np.complex128), + ) if file_ending != "bp": - series.set_attribute("nparr_clongdouble", - np.array([8.9 + 7.8j, 7.6 + 9.2j], - dtype=np.clongdouble)) + series.set_attribute( + "nparr_clongdouble", + np.array([8.9 + 7.8j, 7.6 + 9.2j], dtype=np.clongdouble), + ) # c_types # TODO remove the .value and handle types directly? @@ -246,18 +321,15 @@ def attributeRoundTrip(self, file_ending): series.set_attribute("uint16_c", ctypes.c_uint16(5).value) series.set_attribute("uint32_c", ctypes.c_uint32(6).value) series.set_attribute("uint64_c", ctypes.c_uint64(7).value) - series.set_attribute("float_c", ctypes.c_float(8.e9).value) - series.set_attribute("double_c", ctypes.c_double(7.e289).value) + series.set_attribute("float_c", ctypes.c_float(8.0e9).value) + series.set_attribute("double_c", ctypes.c_double(7.0e289).value) # TODO init of > e304 ? - series.set_attribute("longdouble_c", ctypes.c_longdouble(6.e200).value) + series.set_attribute("longdouble_c", ctypes.c_longdouble(6.0e200).value) del series # read back - series = io.Series( - "unittest_py_API." + file_ending, - io.Access.read_only - ) + series = io.Series("unittest_py_API." + file_ending, io.Access.read_only) self.assertEqual(series.software, "openPMD-api-python-tests") self.assertEqual(series.software_version, "0.42.0") @@ -266,8 +338,7 @@ def attributeRoundTrip(self, file_ending): self.assertEqual(series.get_attribute("char"), "c") self.assertEqual(series.get_attribute("pystring"), "howdy!") self.assertEqual(series.get_attribute("pystring2"), "howdy, too!") - self.assertEqual(bytes(series.get_attribute("pystring3")), - b"howdy, again!") + self.assertEqual(bytes(series.get_attribute("pystring3")), b"howdy, again!") self.assertEqual(series.get_attribute("pyint"), 13) self.assertAlmostEqual(series.get_attribute("pyfloat"), 3.1416) self.assertEqual(series.get_attribute("pybool"), False) @@ -277,39 +348,50 @@ def attributeRoundTrip(self, file_ending): self.assertEqual(series.get_attribute("int32"), 43) self.assertEqual(series.get_attribute("int64"), 987654321) self.assertAlmostEqual(series.get_attribute("single"), 1.234) - self.assertAlmostEqual(series.get_attribute("double"), - 1.234567) - self.assertAlmostEqual(series.get_attribute("longdouble"), - 1.23456789) - np.testing.assert_almost_equal(series.get_attribute("csingle"), - np.complex64(1.+2.j)) - self.assertAlmostEqual(series.get_attribute("cdouble"), - 3.+4.j) + self.assertAlmostEqual(series.get_attribute("double"), 1.234567) + self.assertAlmostEqual(series.get_attribute("longdouble"), 1.23456789) + np.testing.assert_almost_equal( + series.get_attribute("csingle"), np.complex64(1.0 + 2.0j) + ) + self.assertAlmostEqual(series.get_attribute("cdouble"), 3.0 + 4.0j) if file_ending != "bp": - self.assertAlmostEqual(series.get_attribute("clongdouble"), - 5.+6.j) + self.assertAlmostEqual(series.get_attribute("clongdouble"), 5.0 + 6.0j) # array of ... (will be returned as list) - self.assertListEqual(series.get_attribute("arr_int16"), - [np.int16(23), np.int16(26), ]) + self.assertListEqual( + series.get_attribute("arr_int16"), + [ + np.int16(23), + np.int16(26), + ], + ) # list of ... - self.assertListEqual(series.get_attribute("l_int16"), - [np.int16(23), np.int16(26)]) - self.assertListEqual(series.get_attribute("l_int32"), - [np.int32(34), np.int32(37)]) - self.assertListEqual(series.get_attribute("l_int64"), - [np.int64(45), np.int64(48)]) - self.assertListEqual(series.get_attribute("l_uint16"), - [np.uint16(23), np.uint16(26)]) - self.assertListEqual(series.get_attribute("l_uint32"), - [np.uint32(34), np.uint32(37)]) - self.assertListEqual(series.get_attribute("l_uint64"), - [np.uint64(45), np.uint64(48)]) + self.assertListEqual( + series.get_attribute("l_int16"), [np.int16(23), np.int16(26)] + ) + self.assertListEqual( + series.get_attribute("l_int32"), [np.int32(34), np.int32(37)] + ) + self.assertListEqual( + series.get_attribute("l_int64"), [np.int64(45), np.int64(48)] + ) + self.assertListEqual( + series.get_attribute("l_uint16"), [np.uint16(23), np.uint16(26)] + ) + self.assertListEqual( + series.get_attribute("l_uint32"), [np.uint32(34), np.uint32(37)] + ) + self.assertListEqual( + series.get_attribute("l_uint64"), [np.uint64(45), np.uint64(48)] + ) # self.assertListEqual(series.get_attribute("l_single"), # [np.single(5.6), np.single(5.9)]) - self.assertListEqual(series.get_attribute("l_double"), - [np.double(6.7), np.double(7.1)]) - self.assertListEqual(series.get_attribute("l_longdouble"), - [np.longdouble(7.8e9), np.longdouble(8.2e3)]) + self.assertListEqual( + series.get_attribute("l_double"), [np.double(6.7), np.double(7.1)] + ) + self.assertListEqual( + series.get_attribute("l_longdouble"), + [np.longdouble(7.8e9), np.longdouble(8.2e3)], + ) # TODO: l_csingle # self.assertListEqual(series.get_attribute("l_cdouble"), # [np.complex128(6.7 + 6.8j), @@ -320,31 +402,31 @@ def attributeRoundTrip(self, file_ending): # np.clongdouble(8.2e3 - 9.1e3j)]) # numpy.array of ... - self.assertListEqual(series.get_attribute("nparr_int16"), - [234, 567]) - self.assertListEqual(series.get_attribute("nparr_int32"), - [456, 789]) - self.assertListEqual(series.get_attribute("nparr_int64"), - [678, 901]) + self.assertListEqual(series.get_attribute("nparr_int16"), [234, 567]) + self.assertListEqual(series.get_attribute("nparr_int32"), [456, 789]) + self.assertListEqual(series.get_attribute("nparr_int64"), [678, 901]) np.testing.assert_almost_equal( - series.get_attribute("nparr_single"), [1.2, 2.3]) + series.get_attribute("nparr_single"), [1.2, 2.3] + ) np.testing.assert_almost_equal( - series.get_attribute("nparr_double"), [4.5, 6.7]) + series.get_attribute("nparr_double"), [4.5, 6.7] + ) np.testing.assert_almost_equal( - series.get_attribute("nparr_longdouble"), [8.9, 7.6]) + series.get_attribute("nparr_longdouble"), [8.9, 7.6] + ) # see https://github.com/ornladios/ADIOS/issues/212 if series.backend != "ADIOS1": np.testing.assert_almost_equal( series.get_attribute("nparr_csingle"), - np.array([1.2 - 0.3j, 2.3 + 4.2j], - dtype=np.complex64)) + np.array([1.2 - 0.3j, 2.3 + 4.2j], dtype=np.complex64), + ) np.testing.assert_almost_equal( - series.get_attribute("nparr_cdouble"), - [4.5 + 1.1j, 6.7 - 2.2j]) + series.get_attribute("nparr_cdouble"), [4.5 + 1.1j, 6.7 - 2.2j] + ) if file_ending != "bp": # not in ADIOS 1.13.1 nor ADIOS 2.7.0 np.testing.assert_almost_equal( - series.get_attribute("nparr_clongdouble"), - [8.9 + 7.8j, 7.6 + 9.2j]) + series.get_attribute("nparr_clongdouble"), [8.9 + 7.8j, 7.6 + 9.2j] + ) # TODO instead of returning lists, return all arrays as np.array? # self.assertEqual( # series.get_attribute("nparr_int16").dtype, np.int16) @@ -363,17 +445,18 @@ def attributeRoundTrip(self, file_ending): self.assertEqual(series.get_attribute("byte_c"), 30) self.assertEqual(series.get_attribute("ubyte_c"), 50) if file_ending != "json": # TODO: returns [100] instead of 100 in json - self.assertEqual(chr(series.get_attribute("char_c")), 'd') + self.assertEqual(chr(series.get_attribute("char_c")), "d") self.assertEqual(series.get_attribute("int16_c"), 2) self.assertEqual(series.get_attribute("int32_c"), 3) self.assertEqual(series.get_attribute("int64_c"), 4) self.assertEqual(series.get_attribute("uint16_c"), 5) self.assertEqual(series.get_attribute("uint32_c"), 6) self.assertEqual(series.get_attribute("uint64_c"), 7) - self.assertAlmostEqual(series.get_attribute("float_c"), 8.e9) - self.assertAlmostEqual(series.get_attribute("double_c"), 7.e289) - self.assertAlmostEqual(series.get_attribute("longdouble_c"), - ctypes.c_longdouble(6.e200).value) + self.assertAlmostEqual(series.get_attribute("float_c"), 8.0e9) + self.assertAlmostEqual(series.get_attribute("double_c"), 7.0e289) + self.assertAlmostEqual( + series.get_attribute("longdouble_c"), ctypes.c_longdouble(6.0e200).value + ) # check listing API io.list_series(series) @@ -384,10 +467,7 @@ def testAttributes(self): def makeConstantRoundTrip(self, file_ending): # write - series = io.Series( - "unittest_py_constant_API." + file_ending, - io.Access.create - ) + series = io.Series("unittest_py_constant_API." + file_ending, io.Access.create) ms = series.iterations[0].meshes SCALAR = io.Mesh_Record_Component.SCALAR @@ -407,10 +487,10 @@ def makeConstantRoundTrip(self, file_ending): ms["pybool"][SCALAR].make_constant(False) # just testing the data_order attribute - ms["char"].data_order = 'C' - ms["pyint"].data_order = 'F' - self.assertEqual(ms["char"].data_order, 'C') - self.assertEqual(ms["pyint"].data_order, 'F') + ms["char"].data_order = "C" + ms["pyint"].data_order = "F" + self.assertEqual(ms["char"].data_order, "C") + self.assertEqual(ms["pyint"].data_order, "F") # staggering meta data ms["pyint"][SCALAR].position = [0.25, 0.5] @@ -440,39 +520,35 @@ def makeConstantRoundTrip(self, file_ending): ms["single"][SCALAR].make_constant(np.single(1.234)) ms["double"][SCALAR].reset_dataset(DS(np.dtype("double"), extent)) ms["double"][SCALAR].make_constant(np.double(1.234567)) - ms["longdouble"][SCALAR].reset_dataset(DS(np.dtype("longdouble"), - extent)) + ms["longdouble"][SCALAR].reset_dataset(DS(np.dtype("longdouble"), extent)) ms["longdouble"][SCALAR].make_constant(np.longdouble(1.23456789)) - ms["complex64"][SCALAR].reset_dataset( - DS(np.dtype("complex64"), extent)) - ms["complex64"][SCALAR].make_constant( - np.complex64(1.234 + 2.345j)) - ms["complex128"][SCALAR].reset_dataset( - DS(np.dtype("complex128"), extent)) - ms["complex128"][SCALAR].make_constant( - np.complex128(1.234567 + 2.345678j)) + ms["complex64"][SCALAR].reset_dataset(DS(np.dtype("complex64"), extent)) + ms["complex64"][SCALAR].make_constant(np.complex64(1.234 + 2.345j)) + ms["complex128"][SCALAR].reset_dataset(DS(np.dtype("complex128"), extent)) + ms["complex128"][SCALAR].make_constant(np.complex128(1.234567 + 2.345678j)) if file_ending != "bp": ms["clongdouble"][SCALAR].reset_dataset( - DS(np.dtype("clongdouble"), extent)) + DS(np.dtype("clongdouble"), extent) + ) ms["clongdouble"][SCALAR].make_constant( - np.clongdouble(1.23456789 + 2.34567890j)) + np.clongdouble(1.23456789 + 2.34567890j) + ) # flush and close file del series # read back series = io.Series( - "unittest_py_constant_API." + file_ending, - io.Access.read_only + "unittest_py_constant_API." + file_ending, io.Access.read_only ) ms = series.iterations[0].meshes o = [1, 2, 3] e = [1, 1, 1] - self.assertEqual(ms["char"].data_order, 'C') - self.assertEqual(ms["pyint"].data_order, 'F') + self.assertEqual(ms["char"].data_order, "C") + self.assertEqual(ms["pyint"].data_order, "F") self.assertTrue(ms["char"].scalar) self.assertTrue(ms["pyint"].scalar) @@ -485,17 +561,15 @@ def makeConstantRoundTrip(self, file_ending): self.assertTrue(ms["pybool"][SCALAR].constant) if found_numpy: - self.assertEqual(ms["char"][SCALAR].load_chunk(o, e), ord('c')) + self.assertEqual(ms["char"][SCALAR].load_chunk(o, e), ord("c")) self.assertEqual(ms["pyint"][SCALAR].load_chunk(o, e), 13) self.assertEqual(ms["pyfloat"][SCALAR].load_chunk(o, e), 3.1416) self.assertEqual(ms["pybool"][SCALAR].load_chunk(o, e), False) if found_numpy: # staggering meta data - np.testing.assert_allclose(ms["pyint"][SCALAR].position, - [0.25, 0.5]) - np.testing.assert_allclose(ms["pyfloat"][SCALAR].position, - [0.5, 0.75]) + np.testing.assert_allclose(ms["pyint"][SCALAR].position, [0.25, 0.5]) + np.testing.assert_allclose(ms["pyfloat"][SCALAR].position, [0.5, 0.75]) self.assertTrue(ms["int16"].scalar) self.assertTrue(ms["int32"].scalar) @@ -511,58 +585,73 @@ def makeConstantRoundTrip(self, file_ending): self.assertTrue(ms["uint64"][SCALAR].constant) self.assertTrue(ms["double"][SCALAR].constant) - self.assertTrue(ms["int16"][SCALAR].load_chunk(o, e).dtype == - np.dtype('int16')) - self.assertTrue(ms["int32"][SCALAR].load_chunk(o, e).dtype == - np.dtype('int32')) - self.assertTrue(ms["int64"][SCALAR].load_chunk(o, e).dtype == - np.dtype('int64')) - self.assertTrue(ms["uint16"][SCALAR].load_chunk(o, e).dtype == - np.dtype('uint16')) - self.assertTrue(ms["uint32"][SCALAR].load_chunk(o, e).dtype == - np.dtype('uint32')) - self.assertTrue(ms["uint64"][SCALAR].load_chunk(o, e).dtype == - np.dtype('uint64')) - self.assertTrue(ms["single"][SCALAR].load_chunk(o, e).dtype == - np.dtype('single')) - self.assertTrue(ms["double"][SCALAR].load_chunk(o, e).dtype == - np.dtype('double')) - self.assertTrue(ms["longdouble"][SCALAR].load_chunk(o, e).dtype - == np.dtype('longdouble')) - self.assertTrue(ms["complex64"][SCALAR].load_chunk(o, e).dtype - == np.dtype('complex64')) - self.assertTrue(ms["complex128"][SCALAR].load_chunk(o, e).dtype - == np.dtype('complex128')) + self.assertTrue( + ms["int16"][SCALAR].load_chunk(o, e).dtype == np.dtype("int16") + ) + self.assertTrue( + ms["int32"][SCALAR].load_chunk(o, e).dtype == np.dtype("int32") + ) + self.assertTrue( + ms["int64"][SCALAR].load_chunk(o, e).dtype == np.dtype("int64") + ) + self.assertTrue( + ms["uint16"][SCALAR].load_chunk(o, e).dtype == np.dtype("uint16") + ) + self.assertTrue( + ms["uint32"][SCALAR].load_chunk(o, e).dtype == np.dtype("uint32") + ) + self.assertTrue( + ms["uint64"][SCALAR].load_chunk(o, e).dtype == np.dtype("uint64") + ) + self.assertTrue( + ms["single"][SCALAR].load_chunk(o, e).dtype == np.dtype("single") + ) + self.assertTrue( + ms["double"][SCALAR].load_chunk(o, e).dtype == np.dtype("double") + ) + self.assertTrue( + ms["longdouble"][SCALAR].load_chunk(o, e).dtype + == np.dtype("longdouble") + ) + self.assertTrue( + ms["complex64"][SCALAR].load_chunk(o, e).dtype == np.dtype("complex64") + ) + self.assertTrue( + ms["complex128"][SCALAR].load_chunk(o, e).dtype + == np.dtype("complex128") + ) if file_ending != "bp": - self.assertTrue(ms["clongdouble"][SCALAR].load_chunk(o, e) - .dtype == np.dtype('clongdouble')) + self.assertTrue( + ms["clongdouble"][SCALAR].load_chunk(o, e).dtype + == np.dtype("clongdouble") + ) # FIXME: why does this even work w/o a flush() ? - self.assertEqual(ms["int16"][SCALAR].load_chunk(o, e), - np.int16(234)) - self.assertEqual(ms["int32"][SCALAR].load_chunk(o, e), - np.int32(43)) - self.assertEqual(ms["int64"][SCALAR].load_chunk(o, e), - np.int64(987654321)) - self.assertEqual(ms["uint16"][SCALAR].load_chunk(o, e), - np.uint16(134)) - self.assertEqual(ms["uint32"][SCALAR].load_chunk(o, e), - np.uint32(32)) - self.assertEqual(ms["uint64"][SCALAR].load_chunk(o, e), - np.uint64(9876543210)) - self.assertEqual(ms["single"][SCALAR].load_chunk(o, e), - np.single(1.234)) - self.assertEqual(ms["longdouble"][SCALAR].load_chunk(o, e), - np.longdouble(1.23456789)) - self.assertEqual(ms["double"][SCALAR].load_chunk(o, e), - np.double(1.234567)) - self.assertEqual(ms["complex64"][SCALAR].load_chunk(o, e), - np.complex64(1.234 + 2.345j)) - self.assertEqual(ms["complex128"][SCALAR].load_chunk(o, e), - np.complex128(1.234567 + 2.345678j)) + self.assertEqual(ms["int16"][SCALAR].load_chunk(o, e), np.int16(234)) + self.assertEqual(ms["int32"][SCALAR].load_chunk(o, e), np.int32(43)) + self.assertEqual(ms["int64"][SCALAR].load_chunk(o, e), np.int64(987654321)) + self.assertEqual(ms["uint16"][SCALAR].load_chunk(o, e), np.uint16(134)) + self.assertEqual(ms["uint32"][SCALAR].load_chunk(o, e), np.uint32(32)) + self.assertEqual( + ms["uint64"][SCALAR].load_chunk(o, e), np.uint64(9876543210) + ) + self.assertEqual(ms["single"][SCALAR].load_chunk(o, e), np.single(1.234)) + self.assertEqual( + ms["longdouble"][SCALAR].load_chunk(o, e), np.longdouble(1.23456789) + ) + self.assertEqual(ms["double"][SCALAR].load_chunk(o, e), np.double(1.234567)) + self.assertEqual( + ms["complex64"][SCALAR].load_chunk(o, e), np.complex64(1.234 + 2.345j) + ) + self.assertEqual( + ms["complex128"][SCALAR].load_chunk(o, e), + np.complex128(1.234567 + 2.345678j), + ) if file_ending != "bp": - self.assertEqual(ms["clongdouble"][SCALAR].load_chunk(o, e), - np.clongdouble(1.23456789 + 2.34567890j)) + self.assertEqual( + ms["clongdouble"][SCALAR].load_chunk(o, e), + np.clongdouble(1.23456789 + 2.34567890j), + ) def testConstantRecords(self): for ext in tested_file_extensions: @@ -573,10 +662,7 @@ def makeDataRoundTrip(self, file_ending): return # write - series = io.Series( - "unittest_py_data_API." + file_ending, - io.Access.create - ) + series = io.Series("unittest_py_data_API." + file_ending, io.Access.create) it = series.iterations[0] @@ -589,31 +675,26 @@ def makeDataRoundTrip(self, file_ending): extent = [42, 24, 11] - ms["complex64"][SCALAR].reset_dataset( - DS(np.dtype("complex64"), extent)) + ms["complex64"][SCALAR].reset_dataset(DS(np.dtype("complex64"), extent)) ms["complex64"][SCALAR].store_chunk( - np.ones(extent, dtype=np.complex64) * - np.complex64(1.234 + 2.345j)) - ms["complex128"][SCALAR].reset_dataset( - DS(np.dtype("complex128"), extent)) + np.ones(extent, dtype=np.complex64) * np.complex64(1.234 + 2.345j) + ) + ms["complex128"][SCALAR].reset_dataset(DS(np.dtype("complex128"), extent)) ms["complex128"][SCALAR].store_chunk( - np.ones(extent, dtype=np.complex128) * - np.complex128(1.234567 + 2.345678j)) + np.ones(extent, dtype=np.complex128) * np.complex128(1.234567 + 2.345678j) + ) if file_ending != "bp": - ms["clongdouble"][SCALAR].reset_dataset( - DS(np.dtype("clongdouble"), extent)) + ms["clongdouble"][SCALAR].reset_dataset(DS(np.dtype("clongdouble"), extent)) ms["clongdouble"][SCALAR].store_chunk( - np.ones(extent, dtype=np.clongdouble) * - np.clongdouble(1.23456789 + 2.34567890j)) + np.ones(extent, dtype=np.clongdouble) + * np.clongdouble(1.23456789 + 2.34567890j) + ) # flush and close file del series # read back - series = io.Series( - "unittest_py_data_API." + file_ending, - io.Access.read_only - ) + series = io.Series("unittest_py_data_API." + file_ending, io.Access.read_only) it = series.iterations[0] @@ -632,21 +713,17 @@ def makeDataRoundTrip(self, file_ending): if file_ending != "bp": dc256 = ms["clongdouble"][SCALAR].load_chunk(o, e) - self.assertTrue(dc64.dtype == np.dtype('complex64')) - self.assertTrue(dc128.dtype == np.dtype('complex128')) + self.assertTrue(dc64.dtype == np.dtype("complex64")) + self.assertTrue(dc128.dtype == np.dtype("complex128")) if file_ending != "bp": - self.assertTrue( - dc256.dtype == np.dtype('clongdouble')) + self.assertTrue(dc256.dtype == np.dtype("clongdouble")) series.flush() - self.assertEqual(dc64, - np.complex64(1.234 + 2.345j)) - self.assertEqual(dc128, - np.complex128(1.234567 + 2.345678j)) + self.assertEqual(dc64, np.complex64(1.234 + 2.345j)) + self.assertEqual(dc128, np.complex128(1.234567 + 2.345678j)) if file_ending != "bp": - self.assertEqual(dc256, - np.clongdouble(1.23456789 + 2.34567890j)) + self.assertEqual(dc256, np.clongdouble(1.23456789 + 2.34567890j)) def testDataRoundTrip(self): for ext in tested_file_extensions: @@ -655,8 +732,7 @@ def testDataRoundTrip(self): def makeEmptyRoundTrip(self, file_ending): # write series = io.Series( - "unittest_py_empty_API." + file_ending, - io.Access_Type.create + "unittest_py_empty_API." + file_ending, io.Access_Type.create ) ms = series.iterations[0].meshes @@ -692,98 +768,34 @@ def makeEmptyRoundTrip(self, file_ending): # read back series = io.Series( - "unittest_py_empty_API." + file_ending, - io.Access_Type.read_only + "unittest_py_empty_API." + file_ending, io.Access_Type.read_only ) ms = series.iterations[0].meshes - self.assertEqual( - ms["CHAR"][SCALAR].shape, - [0 for _ in range(1)] - ) - self.assertEqual( - ms["UCHAR"][SCALAR].shape, - [0 for _ in range(2)] - ) - self.assertEqual( - ms["SHORT"][SCALAR].shape, - [0 for _ in range(3)] - ) - self.assertEqual( - ms["INT"][SCALAR].shape, - [0 for _ in range(4)] - ) - self.assertEqual( - ms["LONG"][SCALAR].shape, - [0 for _ in range(5)] - ) - self.assertEqual( - ms["LONGLONG"][SCALAR].shape, - [0 for _ in range(6)] - ) - self.assertEqual( - ms["USHORT"][SCALAR].shape, - [0 for _ in range(7)] - ) - self.assertEqual( - ms["UINT"][SCALAR].shape, - [0 for _ in range(8)] - ) - self.assertEqual( - ms["ULONG"][SCALAR].shape, - [0 for _ in range(9)] - ) - self.assertEqual( - ms["ULONGLONG"][SCALAR].shape, - [0 for _ in range(10)] - ) - self.assertEqual( - ms["FLOAT"][SCALAR].shape, - [0 for _ in range(11)] - ) - self.assertEqual( - ms["DOUBLE"][SCALAR].shape, - [0 for _ in range(12)] - ) - self.assertEqual( - ms["LONG_DOUBLE"][SCALAR].shape, - [0 for _ in range(13)] - ) + self.assertEqual(ms["CHAR"][SCALAR].shape, [0 for _ in range(1)]) + self.assertEqual(ms["UCHAR"][SCALAR].shape, [0 for _ in range(2)]) + self.assertEqual(ms["SHORT"][SCALAR].shape, [0 for _ in range(3)]) + self.assertEqual(ms["INT"][SCALAR].shape, [0 for _ in range(4)]) + self.assertEqual(ms["LONG"][SCALAR].shape, [0 for _ in range(5)]) + self.assertEqual(ms["LONGLONG"][SCALAR].shape, [0 for _ in range(6)]) + self.assertEqual(ms["USHORT"][SCALAR].shape, [0 for _ in range(7)]) + self.assertEqual(ms["UINT"][SCALAR].shape, [0 for _ in range(8)]) + self.assertEqual(ms["ULONG"][SCALAR].shape, [0 for _ in range(9)]) + self.assertEqual(ms["ULONGLONG"][SCALAR].shape, [0 for _ in range(10)]) + self.assertEqual(ms["FLOAT"][SCALAR].shape, [0 for _ in range(11)]) + self.assertEqual(ms["DOUBLE"][SCALAR].shape, [0 for _ in range(12)]) + self.assertEqual(ms["LONG_DOUBLE"][SCALAR].shape, [0 for _ in range(13)]) if found_numpy: - self.assertEqual( - ms["int16"][SCALAR].shape, - [0 for _ in range(14)] - ) - self.assertEqual( - ms["int32"][SCALAR].shape, - [0 for _ in range(15)] - ) - self.assertEqual( - ms["int64"][SCALAR].shape, - [0 for _ in range(16)] - ) - self.assertEqual( - ms["uint16"][SCALAR].shape, - [0 for _ in range(17)] - ) - self.assertEqual( - ms["uint32"][SCALAR].shape, - [0 for _ in range(18)] - ) - self.assertEqual( - ms["uint64"][SCALAR].shape, - [0 for _ in range(19)] - ) - self.assertEqual( - ms["single"][SCALAR].shape, - [0 for _ in range(20)] - ) - self.assertEqual( - ms["np_double"][SCALAR].shape, - [0 for _ in range(21)] - ) + self.assertEqual(ms["int16"][SCALAR].shape, [0 for _ in range(14)]) + self.assertEqual(ms["int32"][SCALAR].shape, [0 for _ in range(15)]) + self.assertEqual(ms["int64"][SCALAR].shape, [0 for _ in range(16)]) + self.assertEqual(ms["uint16"][SCALAR].shape, [0 for _ in range(17)]) + self.assertEqual(ms["uint32"][SCALAR].shape, [0 for _ in range(18)]) + self.assertEqual(ms["uint64"][SCALAR].shape, [0 for _ in range(19)]) + self.assertEqual(ms["single"][SCALAR].shape, [0 for _ in range(20)]) + self.assertEqual(ms["np_double"][SCALAR].shape, [0 for _ in range(21)]) # test datatypes for fixed-sized types only if found_numpy: @@ -794,22 +806,21 @@ def makeEmptyRoundTrip(self, file_ending): self.assertTrue(ms["uint32"][SCALAR].dtype == np.dtype("uint32")) self.assertTrue(ms["uint64"][SCALAR].dtype == np.dtype("uint64")) self.assertTrue(ms["single"][SCALAR].dtype == np.dtype("single")) - self.assertTrue( - ms["np_double"][SCALAR].dtype == np.dtype("double")) + self.assertTrue(ms["np_double"][SCALAR].dtype == np.dtype("double")) def testEmptyRecords(self): backend_filesupport = { - 'json': 'json', - 'hdf5': 'h5', - 'adios1': 'bp', - 'adios2': 'bp' + "json": "json", + "hdf5": "h5", + "adios1": "bp", + "adios2": "bp", } for b in io.variants: if io.variants[b] is True and b in backend_filesupport: self.makeEmptyRoundTrip(backend_filesupport[b]) def testData(self): - """ Test IO on data containing particles and meshes.""" + """Test IO on data containing particles and meshes.""" # Get series. series = self.__series @@ -856,41 +867,80 @@ def testData(self): assert pos_y.dtype == np.double assert w.dtype == np.double - self.assertSequenceEqual(pos_y.shape, [270625, ]) - self.assertSequenceEqual(w.shape, [270625, ]) + self.assertSequenceEqual( + pos_y.shape, + [ + 270625, + ], + ) + self.assertSequenceEqual( + w.shape, + [ + 270625, + ], + ) if found_numpy: self.assertEqual(pos_y.dtype, np.float64) self.assertEqual(w.dtype, np.float64) - y_data = pos_y.load_chunk([200000, ], [10, ]) - w_data = w.load_chunk([200000, ], [10, ]) + y_data = pos_y.load_chunk( + [ + 200000, + ], + [ + 10, + ], + ) + w_data = w.load_chunk( + [ + 200000, + ], + [ + 10, + ], + ) electrons.series_flush() - self.assertSequenceEqual(y_data.shape, [10, ]) - self.assertSequenceEqual(w_data.shape, [10, ]) + self.assertSequenceEqual( + y_data.shape, + [ + 10, + ], + ) + self.assertSequenceEqual( + w_data.shape, + [ + 10, + ], + ) self.assertEqual(y_data.dtype, np.float64) self.assertEqual(w_data.dtype, np.float64) np.testing.assert_allclose( y_data, - [-9.60001131e-06, -8.80004967e-06, -8.00007455e-06, - -7.20008487e-06, -6.40007232e-06, -5.60002710e-06, - -4.79993871e-06, -3.99980648e-06, -3.19964406e-06, - -2.39947455e-06] - ) - np.testing.assert_allclose( - w_data, - np.ones((10,)) * 1600000. + [ + -9.60001131e-06, + -8.80004967e-06, + -8.00007455e-06, + -7.20008487e-06, + -6.40007232e-06, + -5.60002710e-06, + -4.79993871e-06, + -3.99980648e-06, + -3.19964406e-06, + -2.39947455e-06, + ], ) + np.testing.assert_allclose(w_data, np.ones((10,)) * 1600000.0) E_x = E["x"] shape = E_x.shape if found_numpy: - np.testing.assert_allclose(E.unit_dimension, - [1., 1., -3., -1., 0., 0., 0.]) - np.testing.assert_allclose(E_x.position, - [0.5, 0., 0.]) + np.testing.assert_allclose( + E.unit_dimension, [1.0, 1.0, -3.0, -1.0, 0.0, 0.0, 0.0] + ) + np.testing.assert_allclose(E_x.position, [0.5, 0.0, 0.0]) self.assertAlmostEqual(E_x.unit_SI, 1.0) self.assertSequenceEqual(shape, [26, 26, 201]) @@ -908,20 +958,11 @@ def testData(self): self.assertEqual(chunk_data.dtype, np.float64) np.testing.assert_allclose( chunk_data, - [ - [ - [6.26273197e7], - [2.70402498e8] - ], - [ - [-1.89238617e8], - [-1.66413019e8] - ] - ] + [[[6.26273197e7], [2.70402498e8]], [[-1.89238617e8], [-1.66413019e8]]], ) def testPickle(self): - """ test pickling of any attributable, especially record components.""" + """test pickling of any attributable, especially record components.""" import pickle # Get series. @@ -973,8 +1014,7 @@ def testPickle(self): pos = pickle.loads(pickled_pos) pos_y = pickle.loads(pickled_pos_y) w = pickle.loads(pickled_w) - print( - f"This is E_x.position of the unpickled object:\n{E_x.position}\n") + print(f"This is E_x.position of the unpickled object:\n{E_x.position}\n") self.assertIsInstance(E, io.Mesh) self.assertIsInstance(E_x, io.Mesh_Record_Component) @@ -1005,21 +1045,17 @@ def testPickle(self): # get particle data if found_numpy: - np.testing.assert_allclose(E.unit_dimension, - [1., 1., -3., -1., 0., 0., 0.]) - np.testing.assert_allclose(E_x.position, - [0.5, 0., 0.]) + np.testing.assert_allclose( + E.unit_dimension, [1.0, 1.0, -3.0, -1.0, 0.0, 0.0, 0.0] + ) + np.testing.assert_allclose(E_x.position, [0.5, 0.0, 0.0]) # indirectly accessed record component after pickle - np.testing.assert_allclose(data_indir, - data) + np.testing.assert_allclose(data_indir, data) # indirectly accessed record component after pickle - np.testing.assert_allclose(data_pos_y_indir1, - data_pos_y) - np.testing.assert_allclose(data_pos_y_indir2, - data_pos_y) + np.testing.assert_allclose(data_pos_y_indir1, data_pos_y) + np.testing.assert_allclose(data_pos_y_indir2, data_pos_y) # original data access vs. pickled access - np.testing.assert_allclose(data_pos_y_org, - data_pos_y) + np.testing.assert_allclose(data_pos_y_org, data_pos_y) self.assertAlmostEqual(E_x.unit_SI, 1.0) self.assertSequenceEqual(E_x.shape, [26, 26, 201]) @@ -1037,20 +1073,11 @@ def testPickle(self): self.assertEqual(chunk_data.dtype, np.float64) np.testing.assert_allclose( chunk_data, - [ - [ - [6.26273197e7], - [2.70402498e8] - ], - [ - [-1.89238617e8], - [-1.66413019e8] - ] - ] + [[[6.26273197e7], [2.70402498e8]], [[-1.89238617e8], [-1.66413019e8]]], ) def testLoadSeries(self): - """ Test loading an openPMD series from hdf5.""" + """Test loading an openPMD series from hdf5.""" # Get series. series = self.__series @@ -1059,7 +1086,7 @@ def testLoadSeries(self): self.assertEqual(series.openPMD, "1.1.0") def testListSeries(self): - """ Test print()-ing and openPMD series from hdf5.""" + """Test print()-ing and openPMD series from hdf5.""" # Get series. series = self.__series @@ -1071,7 +1098,7 @@ def testListSeries(self): print(io.list_series.__doc__) def testSliceRead(self): - """ Testing sliced read on record components. """ + """Testing sliced read on record components.""" # Get series. series = self.__series @@ -1103,16 +1130,31 @@ def testSliceRead(self): if not found_numpy: return - np.testing.assert_allclose(electrons["position"].unit_dimension, - [1., 0., 0., 0., 0., 0., 0.]) + np.testing.assert_allclose( + electrons["position"].unit_dimension, [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + ) offset = [4, 5, 9] extent = [4, 2, 3] E_x_data = E_x.load_chunk(offset, extent) E_x_data_slice = E_x[4:8, 5:7, 9:12] - y_data = pos_y.load_chunk([200000, ], [10, ]) - w_data = w.load_chunk([200000, ], [10, ]) + y_data = pos_y.load_chunk( + [ + 200000, + ], + [ + 10, + ], + ) + w_data = w.load_chunk( + [ + 200000, + ], + [ + 10, + ], + ) y_data_slice = pos_y[200000:200010] w_data_slice = w[200000:200010] series.flush() @@ -1124,52 +1166,43 @@ def testSliceRead(self): self.assertEqual(y_data.dtype, y_data_slice.dtype) self.assertEqual(w_data.dtype, w_data_slice.dtype) - np.testing.assert_allclose( - E_x_data, - E_x_data_slice - ) - np.testing.assert_allclose( - y_data, - y_data_slice - ) - np.testing.assert_allclose( - w_data, - w_data_slice - ) + np.testing.assert_allclose(E_x_data, E_x_data_slice) + np.testing.assert_allclose(y_data, y_data_slice) + np.testing.assert_allclose(w_data, w_data_slice) # more exotic syntax # https://docs.scipy.org/doc/numpy-1.15.0/reference/arrays.indexing.html # - [x]: [M, L:LARGE, K:LARGE] over-select upper range # (is numpy-allowed: crop to max range) - d1 = pos_y[0:pos_y.shape[0]+10] - d2 = pos_y[0:pos_y.shape[0]] + d1 = pos_y[0 : pos_y.shape[0] + 10] + d2 = pos_y[0 : pos_y.shape[0]] series.flush() np.testing.assert_array_equal(d1.shape, d2.shape) np.testing.assert_allclose(d1, d2) # - [x]: [M, L, -K] negative indexes - d1 = E_x[4:8, 2:3, E_x.shape[2]-5] + d1 = E_x[4:8, 2:3, E_x.shape[2] - 5] d2 = E_x[4:8, 2:3, -4] series.flush() np.testing.assert_allclose(d1, d2) - d1 = E_x[4:8, 2:3, E_x.shape[2]-4:] + d1 = E_x[4:8, 2:3, E_x.shape[2] - 4 :] d2 = E_x[4:8, 2:3, -4:] series.flush() np.testing.assert_allclose(d1, d2) - d1 = E_x[4:8, E_x.shape[1]-3:E_x.shape[1], E_x.shape[2]-4:] + d1 = E_x[4:8, E_x.shape[1] - 3 : E_x.shape[1], E_x.shape[2] - 4 :] d2 = E_x[4:8, -3:, -4:] series.flush() np.testing.assert_allclose(d1, d2) - d1 = pos_y[0:pos_y.shape[0]-1] + d1 = pos_y[0 : pos_y.shape[0] - 1] d2 = pos_y[0:-1] series.flush() np.testing.assert_allclose(d1, d2) - d1 = w[0:w.shape[0]-2] + d1 = w[0 : w.shape[0] - 2] d2 = w[0:-2] series.flush() np.testing.assert_allclose(d1, d2) @@ -1209,17 +1242,17 @@ def testSliceRead(self): np.testing.assert_array_equal(d3.shape, [E_x.shape[0], 1, 1]) d1 = E_x[5, 6, :] - d2 = E_x[5, 6, 0:E_x.shape[2]] + d2 = E_x[5, 6, 0 : E_x.shape[2]] series.flush() np.testing.assert_allclose(d1, d2) d1 = pos_y[:] - d2 = pos_y[0:pos_y.shape[0]] + d2 = pos_y[0 : pos_y.shape[0]] series.flush() np.testing.assert_allclose(d1, d2) d1 = w[:] - d2 = w[0:w.shape[0]] + d2 = w[0 : w.shape[0]] series.flush() np.testing.assert_allclose(d1, d2) @@ -1266,7 +1299,7 @@ def testSliceRead(self): # - [x]: [()] all from all dimensions d1 = pos_y[()] - d2 = pos_y[0:pos_y.shape[0]] + d2 = pos_y[0 : pos_y.shape[0]] series.flush() np.testing.assert_allclose(d1, d2) @@ -1325,23 +1358,23 @@ def testSliceRead(self): d1 = w[w.shape[0]] # cropped to upper range - d1 = E_x[10:E_x.shape[0]+2, 0, 0] - d2 = pos_y[10:pos_y.shape[0]+3] + d1 = E_x[10 : E_x.shape[0] + 2, 0, 0] + d2 = pos_y[10 : pos_y.shape[0] + 3] self.assertEqual(d1.ndim, 1) self.assertEqual(d2.ndim, 1) - self.assertEqual(d1.shape[0], E_x.shape[0]-10) - self.assertEqual(d2.shape[0], pos_y.shape[0]-10) + self.assertEqual(d1.shape[0], E_x.shape[0] - 10) + self.assertEqual(d2.shape[0], pos_y.shape[0] - 10) # meta-data should have been accessible already series.flush() # negative index out-of-range checks with self.assertRaises(IndexError): - d1 = E_x[-E_x.shape[0]-1, 0, 0] + d1 = E_x[-E_x.shape[0] - 1, 0, 0] with self.assertRaises(IndexError): - d1 = E_x[0, -E_x.shape[1]-1, 0] + d1 = E_x[0, -E_x.shape[1] - 1, 0] with self.assertRaises(IndexError): - d1 = E_x[0, 0, -E_x.shape[2]-1] + d1 = E_x[0, 0, -E_x.shape[2] - 1] # - [x] too many indices passed for axes with self.assertRaises(IndexError): @@ -1379,16 +1412,13 @@ def testSliceWrite(self): self.backend_write_slices(ext) def backend_write_slices(self, file_ending): - """ Testing sliced write on record components. """ + """Testing sliced write on record components.""" if not found_numpy: return # get series - series = io.Series( - "unittest_py_slice_API." + file_ending, - io.Access.create - ) + series = io.Series("unittest_py_slice_API." + file_ending, io.Access.create) i = series.iterations[0] # create data to write @@ -1410,7 +1440,7 @@ def backend_write_slices(self, file_ending): # get a mesh record component rho = i.meshes["rho"][io.Record_Component.SCALAR] - rho.position = [0., 0.] # Yee staggered + rho.position = [0.0, 0.0] # Yee staggered rho.reset_dataset(io.Dataset(data.dtype, data.shape)) @@ -1450,7 +1480,7 @@ def backend_write_slices(self, file_ending): series.flush() def testIterations(self): - """ Test querying a series' iterations and loop over them. """ + """Test querying a series' iterations and loop over them.""" # Get series. series = self.__series @@ -1468,7 +1498,7 @@ def testIterations(self): self.assertIsInstance(i, io.Iteration) def testMeshes(self): - """ Test querying a mesh. """ + """Test querying a mesh.""" # Get series. series = self.__series @@ -1483,7 +1513,7 @@ def testMeshes(self): self.assertIsInstance(i.meshes[m], io.Mesh) def testParticles(self): - """ Test querying a particle species. """ + """Test querying a particle species.""" # Get series. series = self.__series @@ -1497,17 +1527,24 @@ def testParticles(self): self.assertIsInstance(i.particles[ps], io.ParticleSpecies) def testDatatype(self): - """ Test Datatype. """ + """Test Datatype.""" data_type = io.Datatype(1) del data_type def testDataset(self): - """ Test Dataset. """ + """Test Dataset.""" data_type = io.Datatype.LONG extent = [1, 1, 1] obj = io.Dataset(data_type, extent) if found_numpy: - d = np.array((1, 1, 1, ), dtype=np.int_) + d = np.array( + ( + 1, + 1, + 1, + ), + dtype=np.int_, + ) obj2 = io.Dataset(d.dtype, d.shape) assert data_type == io.determine_datatype(d.dtype) assert obj2.dtype == obj.dtype @@ -1515,12 +1552,12 @@ def testDataset(self): del obj def testGeometry(self): - """ Test Geometry. """ + """Test Geometry.""" obj = io.Geometry(0) del obj def testIteration(self): - """ Test Iteration. """ + """Test Iteration.""" self.assertRaises(TypeError, io.Iteration) iteration = self.__particle_series.iterations[400] @@ -1535,21 +1572,21 @@ def testIteration(self): # TODO verify change is reflected in original iteration object def testIteration_Encoding(self): - """ Test Iteration_Encoding. """ + """Test Iteration_Encoding.""" obj = io.Iteration_Encoding(1) del obj def testMesh(self): - """ Test Mesh. """ + """Test Mesh.""" self.assertRaises(TypeError, io.Mesh) - mesh = self.__series.iterations[100].meshes['E'] + mesh = self.__series.iterations[100].meshes["E"] copy_mesh = io.Mesh(mesh) - self.assertEqual(mesh.data_order, 'C') + self.assertEqual(mesh.data_order, "C") self.assertIsInstance(copy_mesh, io.Mesh) def testMesh_Container(self): - """ Test Mesh_Container. """ + """Test Mesh_Container.""" self.assertRaises(TypeError, io.Mesh_Container) def backend_particle_patches(self, file_ending): @@ -1558,12 +1595,13 @@ def backend_particle_patches(self, file_ending): DS = io.Dataset SCALAR = io.Record_Component.SCALAR - extent = [123, ] + extent = [ + 123, + ] num_patches = 2 series = io.Series( - "unittest_py_particle_patches." + file_ending, - io.Access.create + "unittest_py_particle_patches." + file_ending, io.Access.create ) e = series.iterations[42].particles["electrons"] @@ -1574,13 +1612,29 @@ def backend_particle_patches(self, file_ending): x.store_chunk(np.arange(extent[0], dtype=np.single)) o = e["positionOffset"][r] o.reset_dataset(DS(np.dtype("uint64"), extent)) - o.store_chunk(np.arange(extent[0], dtype=np.uint64), [0, ], extent) + o.store_chunk( + np.arange(extent[0], dtype=np.uint64), + [ + 0, + ], + extent, + ) - dset = DS(np.dtype("uint64"), [num_patches, ]) + dset = DS( + np.dtype("uint64"), + [ + num_patches, + ], + ) e.particle_patches["numParticles"][SCALAR].reset_dataset(dset) e.particle_patches["numParticlesOffset"][SCALAR].reset_dataset(dset) - dset = DS(np.dtype("single"), [num_patches, ]) + dset = DS( + np.dtype("single"), + [ + num_patches, + ], + ) e.particle_patches["offset"]["x"].reset_dataset(dset) e.particle_patches["offset"]["y"].reset_dataset(dset) e.particle_patches["extent"]["x"].reset_dataset(dset) @@ -1589,32 +1643,28 @@ def backend_particle_patches(self, file_ending): # patch 0 (decomposed in x) e.particle_patches["numParticles"][SCALAR].store(0, np.uint64(10)) e.particle_patches["numParticlesOffset"][SCALAR].store(0, np.uint64(0)) - e.particle_patches["offset"]["x"].store(0, np.single(0.)) - e.particle_patches["offset"]["y"].store(0, np.single(0.)) - e.particle_patches["extent"]["x"].store(0, np.single(10.)) - e.particle_patches["extent"]["y"].store(0, np.single(123.)) + e.particle_patches["offset"]["x"].store(0, np.single(0.0)) + e.particle_patches["offset"]["y"].store(0, np.single(0.0)) + e.particle_patches["extent"]["x"].store(0, np.single(10.0)) + e.particle_patches["extent"]["y"].store(0, np.single(123.0)) # patch 1 (decomposed in x) - e.particle_patches["numParticles"][SCALAR].store( - 1, np.uint64(113)) - e.particle_patches["numParticlesOffset"][SCALAR].store( - 1, np.uint64(10)) - e.particle_patches["offset"]["x"].store(1, np.single(10.)) - e.particle_patches["offset"]["y"].store(1, np.single(0.)) - e.particle_patches["extent"]["x"].store(1, np.single(113.)) - e.particle_patches["extent"]["y"].store(1, np.single(123.)) + e.particle_patches["numParticles"][SCALAR].store(1, np.uint64(113)) + e.particle_patches["numParticlesOffset"][SCALAR].store(1, np.uint64(10)) + e.particle_patches["offset"]["x"].store(1, np.single(10.0)) + e.particle_patches["offset"]["y"].store(1, np.single(0.0)) + e.particle_patches["extent"]["x"].store(1, np.single(113.0)) + e.particle_patches["extent"]["y"].store(1, np.single(123.0)) # read back del series series = io.Series( - "unittest_py_particle_patches." + file_ending, - io.Access.read_only + "unittest_py_particle_patches." + file_ending, io.Access.read_only ) e = series.iterations[42].particles["electrons"] numParticles = e.particle_patches["numParticles"][SCALAR].load() - numParticlesOffset = e.particle_patches["numParticlesOffset"][SCALAR].\ - load() + numParticlesOffset = e.particle_patches["numParticlesOffset"][SCALAR].load() extent_x = e.particle_patches["extent"]["x"].load() extent_y = e.particle_patches["extent"]["y"].load() offset_x = e.particle_patches["offset"]["x"].load() @@ -1622,18 +1672,12 @@ def backend_particle_patches(self, file_ending): series.flush() - np.testing.assert_almost_equal( - numParticles, np.array([10, 113], np.uint64)) - np.testing.assert_almost_equal( - numParticlesOffset, np.array([0, 10], np.uint64)) - np.testing.assert_almost_equal( - extent_x, [10., 113.]) - np.testing.assert_almost_equal( - extent_y, [123., 123.]) - np.testing.assert_almost_equal( - offset_x, [0., 10.]) - np.testing.assert_almost_equal( - offset_y, [0., 0.]) + np.testing.assert_almost_equal(numParticles, np.array([10, 113], np.uint64)) + np.testing.assert_almost_equal(numParticlesOffset, np.array([0, 10], np.uint64)) + np.testing.assert_almost_equal(extent_x, [10.0, 113.0]) + np.testing.assert_almost_equal(extent_y, [123.0, 123.0]) + np.testing.assert_almost_equal(offset_x, [0.0, 10.0]) + np.testing.assert_almost_equal(offset_y, [0.0, 0.0]) def testParticlePatches(self): self.assertRaises(TypeError, io.Particle_Patches) @@ -1642,23 +1686,23 @@ def testParticlePatches(self): self.backend_particle_patches(ext) def testParticleSpecies(self): - """ Test ParticleSpecies. """ + """Test ParticleSpecies.""" self.assertRaises(TypeError, io.ParticleSpecies) def testParticle_Container(self): - """ Test Particle_Container. """ + """Test Particle_Container.""" self.assertRaises(TypeError, io.Particle_Container) def testRecord(self): - """ Test Record. """ + """Test Record.""" # Has only copy constructor. self.assertRaises(TypeError, io.Record) # Get a record. - electrons = self.__series.iterations[400].particles['electrons'] - position = electrons['position'] + electrons = self.__series.iterations[400].particles["electrons"] + position = electrons["position"] self.assertIsInstance(position, io.Record) - x = position['x'] + x = position["x"] self.assertIsInstance(x, io.Record_Component) # Copy. @@ -1671,11 +1715,11 @@ def testRecord(self): # io.Record_Component) def testRecord_Component(self): - """ Test Record_Component. """ + """Test Record_Component.""" self.assertRaises(TypeError, io.Record_Component) def testFieldRecord(self): - """ Test querying for a non-scalar field record. """ + """Test querying for a non-scalar field record.""" E = self.__series.iterations[100].meshes["E"] Ex = E["x"] @@ -1686,7 +1730,7 @@ def makeCloseIterationRoundTrip(self, file_ending): # write series = io.Series( "../samples/unittest_closeIteration_%T." + file_ending, - io.Access_Type.create + io.Access_Type.create, ) DS = io.Dataset data = np.array([2, 4, 6, 8], dtype=np.dtype("int")) @@ -1696,14 +1740,14 @@ def makeCloseIterationRoundTrip(self, file_ending): E_x = it0.meshes["E"]["x"] E_x.reset_dataset(DS(np.dtype("int"), extent)) E_x.store_chunk(data, [0], extent) - is_adios1 = series.backend == 'ADIOS1' + is_adios1 = series.backend == "ADIOS1" it0.close(flush=True) # not supported in ADIOS1: can only open one ADIOS1 series at a time if not is_adios1: read = io.Series( "../samples/unittest_closeIteration_%T." + file_ending, - io.Access_Type.read_only + io.Access_Type.read_only, ) it0 = read.iterations[0] E_x = it0.meshes["E"]["x"] @@ -1724,7 +1768,7 @@ def makeCloseIterationRoundTrip(self, file_ending): if not is_adios1: read = io.Series( "../samples/unittest_closeIteration_%T." + file_ending, - io.Access_Type.read_only + io.Access_Type.read_only, ) it1 = read.iterations[1] E_x = it1.meshes["E"]["x"] @@ -1756,7 +1800,7 @@ def makeIteratorRoundTrip(self, backend, file_ending): series = io.Series( "../samples/unittest_serialIterator." + file_ending, io.Access_Type.create, - jsonConfig + jsonConfig, ) DS = io.Dataset data = np.array([2, 4, 6, 8], dtype=np.dtype("int")) @@ -1785,7 +1829,7 @@ def makeIteratorRoundTrip(self, backend, file_ending): read = io.Series( "../samples/unittest_serialIterator." + file_ending, io.Access_Type.read_only, - jsonConfig + jsonConfig, ) for it in read.read_iterations(): lastIterationIndex = it.iteration_index @@ -1806,10 +1850,10 @@ def makeIteratorRoundTrip(self, backend, file_ending): def testIterator(self): backend_filesupport = { - 'json': 'json', - 'hdf5': 'h5', - 'adios1': 'bp', - 'adios2': 'bp' + "json": "json", + "hdf5": "h5", + "adios1": "bp", + "adios2": "bp", } for b in io.variants: if io.variants[b] is True and b in backend_filesupport: @@ -1819,16 +1863,12 @@ def makeAvailableChunksRoundTrip(self, ext): if ext == "h5": return name = "../samples/available_chunks_python." + ext - write = io.Series( - name, - io.Access_Type.create - ) + write = io.Series(name, io.Access_Type.create) DS = io.Dataset E_x = write.iterations[0].meshes["E"]["x"] E_x.reset_dataset(DS(np.dtype("int"), [10, 4])) - data = np.array( - [[2, 4, 6, 8], [10, 12, 14, 16]], dtype=np.dtype("int")) + data = np.array([[2, 4, 6, 8], [10, 12, 14, 16]], dtype=np.dtype("int")) E_x.store_chunk(data, [1, 0], [2, 4]) data2 = np.array([[2, 4], [6, 8], [10, 12]], dtype=np.dtype("int")) E_x.store_chunk(data2, [4, 2], [3, 2]) @@ -1838,9 +1878,7 @@ def makeAvailableChunksRoundTrip(self, ext): del write read = io.Series( - name, - io.Access_Type.read_only, - options='{"defer_iteration_parsing": true}' + name, io.Access_Type.read_only, options='{"defer_iteration_parsing": true}' ) read.iterations[0].open() @@ -1865,15 +1903,13 @@ def testAvailableChunks(self): def writeFromTemporaryStore(self, E_x): if found_numpy: - E_x.store_chunk(np.array([[4, 5, 6]], dtype=np.dtype("int")), - [1, 0]) + E_x.store_chunk(np.array([[4, 5, 6]], dtype=np.dtype("int")), [1, 0]) data = np.array([[1, 2, 3]], dtype=np.dtype("int")) E_x.store_chunk(data) data2 = np.array([[7, 8, 9]], dtype=np.dtype("int")) - E_x.store_chunk(np.repeat(data2, 198, axis=0), - [2, 0]) + E_x.store_chunk(np.repeat(data2, 198, axis=0), [2, 0]) def loadToTemporaryStore(self, r_E_x): # not catching the return value shall not result in a use-after-free: @@ -1885,10 +1921,7 @@ def loadToTemporaryStore(self, r_E_x): def writeFromTemporary(self, ext): name = "../samples/write_from_temporary_python." + ext - write = io.Series( - name, - io.Access_Type.create - ) + write = io.Series(name, io.Access_Type.create) DS = io.Dataset E_x = write.iterations[0].meshes["E"]["x"] @@ -1898,13 +1931,10 @@ def writeFromTemporary(self, ext): del write - read = io.Series( - name, - io.Access_Type.read_only - ) + read = io.Series(name, io.Access_Type.read_only) r_E_x = read.iterations[0].meshes["E"]["x"] - if read.backend == 'ADIOS2': + if read.backend == "ADIOS2": self.assertEqual(len(r_E_x.available_chunks()), 3) else: self.assertEqual(len(r_E_x.available_chunks()), 1) @@ -1917,8 +1947,7 @@ def writeFromTemporary(self, ext): if found_numpy: np.testing.assert_allclose( r_d[:3, :], - np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], - dtype=np.dtype("int")) + np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.dtype("int")), ) def testWriteFromTemporary(self): @@ -1971,13 +2000,14 @@ def testJsonConfigADIOS2(self): } } """ - if not io.variants['adios2']: + if not io.variants["adios2"]: return series = io.Series( "../samples/unittest_jsonConfiguredBP3.bp", io.Access_Type.create, - global_config) - if series.backend != 'ADIOS2': + global_config, + ) + if series.backend != "ADIOS2": # might happen, if env. var. OPENPMD_BP_BACKEND is used return @@ -1997,7 +2027,8 @@ def testJsonConfigADIOS2(self): read = io.Series( "../samples/unittest_jsonConfiguredBP3.bp", io.Access_Type.read_only, - global_config) + global_config, + ) E_x = read.iterations[0].meshes["E"]["x"] chunk_x = E_x.load_chunk([0], [1000]) @@ -2011,7 +2042,7 @@ def testJsonConfigADIOS2(self): self.assertEqual(chunk_y[i], i) def testError(self): - if 'test_throw' in io.__dict__: + if "test_throw" in io.__dict__: with self.assertRaises(io.ErrorOperationUnsupportedInBackend): io.test_throw("test description") with self.assertRaises(io.Error): @@ -2022,8 +2053,7 @@ def testCustomGeometries(self): DT = io.Datatype sample_data = np.ones([10], dtype=np.long) - write = io.Series("../samples/custom_geometries_python.json", - io.Access.create) + write = io.Series("../samples/custom_geometries_python.json", io.Access.create) E = write.iterations[0].meshes["E"] E.set_attribute("geometry", "other:customGeometry") E_x = E["x"] @@ -2050,8 +2080,9 @@ def testCustomGeometries(self): del write - read = io.Series("../samples/custom_geometries_python.json", - io.Access.read_only) + read = io.Series( + "../samples/custom_geometries_python.json", io.Access.read_only + ) E = read.iterations[0].meshes["E"] self.assertEqual(E.get_attribute("geometry"), "other:customGeometry") @@ -2064,11 +2095,11 @@ def testCustomGeometries(self): self.assertEqual(B.geometry_string, "other:customGeometry") e_energyDensity = read.iterations[0].meshes["e_energyDensity"] - self.assertEqual(e_energyDensity.get_attribute("geometry"), - "other:customGeometry") + self.assertEqual( + e_energyDensity.get_attribute("geometry"), "other:customGeometry" + ) self.assertEqual(e_energyDensity.geometry, io.Geometry.other) - self.assertEqual(e_energyDensity.geometry_string, - "other:customGeometry") + self.assertEqual(e_energyDensity.geometry_string, "other:customGeometry") e_chargeDensity = read.iterations[0].meshes["e_chargeDensity"] self.assertEqual(e_chargeDensity.get_attribute("geometry"), "other") @@ -2076,5 +2107,5 @@ def testCustomGeometries(self): self.assertEqual(e_chargeDensity.geometry_string, "other") -if __name__ == '__main__': +if __name__ == "__main__": unittest.main() diff --git a/test/python/unittest/Test.py b/test/python/unittest/Test.py index b0652c3bbc..45e4d1034a 100644 --- a/test/python/unittest/Test.py +++ b/test/python/unittest/Test.py @@ -16,8 +16,8 @@ # Define the test suite. def suite(): suites = [ - unittest.makeSuite(APITest), - ] + unittest.makeSuite(APITest), + ] return unittest.TestSuite(suites) @@ -29,7 +29,7 @@ def suite(): result = unittest.TextTestRunner(verbosity=2).run(suite()) if result.wasSuccessful(): - print('---> OK <---') + print("---> OK <---") sys.exit(0) sys.exit(1) diff --git a/test/python/unittest/TestUtilities/TestUtilities.py b/test/python/unittest/TestUtilities/TestUtilities.py index e03e5f6492..ffa4c63092 100644 --- a/test/python/unittest/TestUtilities/TestUtilities.py +++ b/test/python/unittest/TestUtilities/TestUtilities.py @@ -17,7 +17,7 @@ def generateTestFilePath(file_name): @return : The absolute path to ../TestFiles/ . """ - test_files_dir = path.join('..', 'samples', file_name) + test_files_dir = path.join("..", "samples", file_name) return test_files_dir # this_path = path.abspath(path.dirname(__file__))