From bf0658f9c849cbda7d77f9ae7ef4f88a14867cd0 Mon Sep 17 00:00:00 2001 From: superstar54 Date: Thu, 28 Nov 2024 10:30:39 +0100 Subject: [PATCH] Move PythonJob from aiida-workgraph to here --- .github/workflows/ci.yml | 85 + .github/workflows/publish-on-pypi.yml | 33 + .pre-commit-config.yaml | 13 + .readthedocs.yml | 13 + docs/Makefile | 20 + docs/environment.yml | 7 + docs/gallery/autogen/GALLERY_HEADER.rst | 3 + docs/gallery/autogen/how_to.py | 271 ++ docs/gallery/autogen/input.txt | 1 + .../autogen/inputs_folder/another_input.txt | 1 + docs/make.bat | 35 + docs/requirements.txt | 8 + docs/source/autogen/autogen_jupyter.zip | Bin 0 -> 15490 bytes docs/source/autogen/autogen_python.zip | Bin 0 -> 12004 bytes docs/source/autogen/how_to.codeobj.json | 977 +++++++ docs/source/autogen/how_to.ipynb | 190 ++ docs/source/autogen/how_to.py | 271 ++ docs/source/autogen/how_to.py.md5 | 1 + docs/source/autogen/how_to.rst | 477 ++++ docs/source/autogen/how_to.zip | Bin 0 -> 22141 bytes .../images/thumb/sphx_glr_how_to_thumb.png | Bin 0 -> 35467 bytes .../thumb/sphx_glr_quick_start_thumb.png | Bin 0 -> 26794 bytes docs/source/autogen/index.rst | 62 + docs/source/autogen/quick_start.codeobj.json | 263 ++ docs/source/autogen/quick_start.ipynb | 75 + docs/source/autogen/quick_start.py | 66 + docs/source/autogen/quick_start.py.md5 | 1 + docs/source/autogen/quick_start.rst | 152 + docs/source/autogen/quick_start.zip | Bin 0 -> 5353 bytes docs/source/autogen/sg_execution_times.rst | 37 + docs/source/conf.py | 174 ++ docs/source/index.rst | 26 + docs/source/installation.rst | 44 + docs/source/pythonjob.ipynb | 2503 +++++++++++++++++ docs/source/sg_execution_times.rst | 37 + docs/source/tutorial/dft.ipynb | 1128 ++++++++ .../tutorial/html/atomization_energy.html | 290 ++ .../tutorial/html/pythonjob_eos_emt.html | 290 ++ docs/source/tutorial/index.rst | 11 + examples/test_add.py | 15 + pyproject.toml | 145 + src/aiida_pythonjob/__init__.py | 15 + src/aiida_pythonjob/calculations/__init__.py | 3 + src/aiida_pythonjob/calculations/pythonjob.py | 322 +++ src/aiida_pythonjob/config.py | 13 + src/aiida_pythonjob/data/__init__.py | 4 + src/aiida_pythonjob/data/pickled_data.py | 86 + src/aiida_pythonjob/data/pickled_function.py | 161 ++ src/aiida_pythonjob/data/serializer.py | 121 + src/aiida_pythonjob/launch.py | 76 + src/aiida_pythonjob/parsers/__init__.py | 3 + src/aiida_pythonjob/parsers/pythonjob.py | 124 + src/aiida_pythonjob/utils.py | 29 + tests/conftest.py | 18 + tests/input.txt | 1 + tests/inputs_folder/another_input.txt | 1 + tests/test_data.py | 24 + tests/test_parsers.py | 0 tests/test_pythonjob.py | 232 ++ 59 files changed, 8958 insertions(+) create mode 100644 .github/workflows/ci.yml create mode 100644 .github/workflows/publish-on-pypi.yml create mode 100644 .pre-commit-config.yaml create mode 100644 .readthedocs.yml create mode 100644 docs/Makefile create mode 100644 docs/environment.yml create mode 100644 docs/gallery/autogen/GALLERY_HEADER.rst create mode 100644 docs/gallery/autogen/how_to.py create mode 100644 docs/gallery/autogen/input.txt create mode 100644 docs/gallery/autogen/inputs_folder/another_input.txt create mode 100644 docs/make.bat create mode 100644 docs/requirements.txt create mode 100644 docs/source/autogen/autogen_jupyter.zip create mode 100644 docs/source/autogen/autogen_python.zip create mode 100644 docs/source/autogen/how_to.codeobj.json create mode 100644 docs/source/autogen/how_to.ipynb create mode 100644 docs/source/autogen/how_to.py create mode 100644 docs/source/autogen/how_to.py.md5 create mode 100644 docs/source/autogen/how_to.rst create mode 100644 docs/source/autogen/how_to.zip create mode 100644 docs/source/autogen/images/thumb/sphx_glr_how_to_thumb.png create mode 100644 docs/source/autogen/images/thumb/sphx_glr_quick_start_thumb.png create mode 100644 docs/source/autogen/index.rst create mode 100644 docs/source/autogen/quick_start.codeobj.json create mode 100644 docs/source/autogen/quick_start.ipynb create mode 100644 docs/source/autogen/quick_start.py create mode 100644 docs/source/autogen/quick_start.py.md5 create mode 100644 docs/source/autogen/quick_start.rst create mode 100644 docs/source/autogen/quick_start.zip create mode 100644 docs/source/autogen/sg_execution_times.rst create mode 100644 docs/source/conf.py create mode 100644 docs/source/index.rst create mode 100644 docs/source/installation.rst create mode 100644 docs/source/pythonjob.ipynb create mode 100644 docs/source/sg_execution_times.rst create mode 100644 docs/source/tutorial/dft.ipynb create mode 100644 docs/source/tutorial/html/atomization_energy.html create mode 100644 docs/source/tutorial/html/pythonjob_eos_emt.html create mode 100644 docs/source/tutorial/index.rst create mode 100644 examples/test_add.py create mode 100644 pyproject.toml create mode 100644 src/aiida_pythonjob/__init__.py create mode 100644 src/aiida_pythonjob/calculations/__init__.py create mode 100644 src/aiida_pythonjob/calculations/pythonjob.py create mode 100644 src/aiida_pythonjob/config.py create mode 100644 src/aiida_pythonjob/data/__init__.py create mode 100644 src/aiida_pythonjob/data/pickled_data.py create mode 100644 src/aiida_pythonjob/data/pickled_function.py create mode 100644 src/aiida_pythonjob/data/serializer.py create mode 100644 src/aiida_pythonjob/launch.py create mode 100644 src/aiida_pythonjob/parsers/__init__.py create mode 100644 src/aiida_pythonjob/parsers/pythonjob.py create mode 100644 src/aiida_pythonjob/utils.py create mode 100644 tests/conftest.py create mode 100644 tests/input.txt create mode 100644 tests/inputs_folder/another_input.txt create mode 100644 tests/test_data.py create mode 100644 tests/test_parsers.py create mode 100644 tests/test_pythonjob.py diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000..50ee5cb --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,85 @@ +name: ci + +on: + push: + # only pushes to main trigger + branches: [main] + pull_request: + # always triggered + +jobs: + + tests: + runs-on: ubuntu-latest + timeout-minutes: 30 + strategy: + matrix: + python-version: ['3.12'] + aiida-version: ['stable'] + + services: + postgres: + image: postgres:10 + env: + POSTGRES_DB: test_aiida + POSTGRES_PASSWORD: '' + POSTGRES_HOST_AUTH_METHOD: trust + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + ports: + - 5432:5432 + rabbitmq: + image: rabbitmq:latest + ports: + - 5672:5672 + + steps: + - uses: actions/checkout@v4 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install project manager + run: | + pip install hatch + - name: Run test suite + env: + PYTEST_ADDOPTS: "--durations=0" + run: | + hatch test --cover + + docs: + runs-on: ubuntu-latest + timeout-minutes: 15 + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install project manager + run: | + pip install hatch + - name: Build docs + run: | + hatch run docs:build + + static-analysis: + runs-on: ubuntu-latest + timeout-minutes: 15 + steps: + - uses: actions/checkout@v4 + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + - name: Install project manager + run: | + pip install hatch + - name: Run formatter and linter + run: | + hatch fmt --check diff --git a/.github/workflows/publish-on-pypi.yml b/.github/workflows/publish-on-pypi.yml new file mode 100644 index 0000000..354e5d2 --- /dev/null +++ b/.github/workflows/publish-on-pypi.yml @@ -0,0 +1,33 @@ +name: Publish on PyPI + +on: + push: + tags: + # After vMajor.Minor.Patch _anything_ is allowed (without "/") ! + - v[0-9]+.[0-9]+.[0-9]+* + +jobs: + publish: + runs-on: ubuntu-latest + if: github.repository == 'aiidateam/aiida-pythonjob' && startsWith(github.ref, 'refs/tags/v') + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Set up Python 3.12 + uses: actions/setup-python@v5 + with: + python-version: '3.12' + + - name: Install hatch + run: | + python -m pip install --upgrade pip + python -m pip install hatch~=1.12.0 + + - name: Build and publish + run: | + hatch publish + env: + HATCH_INDEX_USER: __token__ + HATCH_INDEX_AUTH: ${{ secrets.pypi_token }} diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..4d839fd --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,13 @@ +repos: +- repo: local + hooks: + - id: format + name: format + entry: hatch fmt -f + language: system + types: [python] + - id: lint + name: lint + entry: hatch fmt -l + language: system + types: [python] diff --git a/.readthedocs.yml b/.readthedocs.yml new file mode 100644 index 0000000..eaf0672 --- /dev/null +++ b/.readthedocs.yml @@ -0,0 +1,13 @@ +version: 2 + +build: + os: ubuntu-22.04 + tools: + python: "3.12" + +python: + install: + - method: pip + path: . + extra_requirements: + - docs diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000..d0c3cbf --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,20 @@ +# Minimal makefile for Sphinx documentation +# + +# You can set these variables from the command line, and also +# from the environment for the first two. +SPHINXOPTS ?= +SPHINXBUILD ?= sphinx-build +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +.PHONY: help Makefile + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/environment.yml b/docs/environment.yml new file mode 100644 index 0000000..a01659b --- /dev/null +++ b/docs/environment.yml @@ -0,0 +1,7 @@ +name: base +channels: + - conda-forge + - defaults +dependencies: + - aiida-core + - aiida-core.services diff --git a/docs/gallery/autogen/GALLERY_HEADER.rst b/docs/gallery/autogen/GALLERY_HEADER.rst new file mode 100644 index 0000000..12385f4 --- /dev/null +++ b/docs/gallery/autogen/GALLERY_HEADER.rst @@ -0,0 +1,3 @@ +=========== +Quick Start +=========== diff --git a/docs/gallery/autogen/how_to.py b/docs/gallery/autogen/how_to.py new file mode 100644 index 0000000..35c0438 --- /dev/null +++ b/docs/gallery/autogen/how_to.py @@ -0,0 +1,271 @@ +""" +How to guides +=============== + +""" + + +###################################################################### +# Introduction +# ------------ +# +# To run this tutorial, you need to load the AiiDA profile. +# + +from aiida import load_profile + +load_profile() + + +###################################################################### +# Default outputs +# -------------- +# +# The default output of the function is `result`. The `PythonJob` task will store the result as one node in the database with the key `result`. +# +from aiida_pythonjob import prepare_pythonjob_inputs, PythonJob +from aiida.engine import run_get_node + + +def add(x, y): + return x + y + + +inputs = prepare_pythonjob_inputs(add, + function_inputs={"x": 1, "y": 2}, + computer="localhost", + ) +result, node = run_get_node(PythonJob, inputs=inputs) +print("result: ", result["result"]) + +###################################################################### +# Custom outputs +# -------------- +# If the function return a dictionary with fixed number of keys, and you want to store the values as separate outputs, you can specify the `function_outputs` parameter. +# For a dynamic number of outputs, you can use the namespace output, which is explained later. +# + +def add(x, y): + return {"sum": x + y, "diff": x - y} + +inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2}, + function_outputs=[ + {"name": "sum"}, + {"name": "diff"}, + ] +) +result, node = run_get_node(PythonJob, **inputs) + +print("result: ") +print("sum: ", result["sum"]) +print("diff: ", result["diff"]) + + +###################################################################### +# Using parent folder +# -------------- +# The parent_folder parameter allows a task to access the output files of a parent task. This feature is particularly useful when you want to reuse data generated by a previous computation in subsequent computations. In the following example, the multiply task uses the `result.txt` file created by the add task. +# +# + + +def add(x, y): + z = x + y + with open("result.txt", "w") as f: + f.write(str(z)) + return x + y + +def multiply(x, y): + with open("parent_folder/result.txt", "r") as f: + z = int(f.read()) + return x * y + z + +inputs1 = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2}, + function_outputs=[{"name": "sum"}], +) + +result1, node1 = run_get_node(PythonJob, inputs=inputs1) + +inputs2 = prepare_pythonjob_inputs( + multiply, + function_inputs={"x": 1, "y": 2}, + function_outputs=[{"name": "product"}], + parent_folder=result1["remote_folder"], +) + +result2, node2 = run_get_node(PythonJob, inputs=inputs2) + +print("result: ", result2) + +###################################################################### +# Upload files or folders to the remote computer +# -------------- +# The `upload_files` parameter allows users to upload files or folders to the remote computer. The files will be uploaded to the working directory of the remote computer. +# + +import os + +# create a temporary file "input.txt" in the current directory +with open("input.txt", "w") as f: + f.write("2") + +# create a temporary folder "inputs_folder" in the current directory +# and add a file "another_input.txt" in the folder +os.makedirs("inputs_folder", exist_ok=True) +with open("inputs_folder/another_input.txt", "w") as f: + f.write("3") + +def add(): + with open("input.txt", "r") as f: + a = int(f.read()) + with open("inputs_folder/another_input.txt", "r") as f: + b = int(f.read()) + return a + b +# ------------------------- Submit the calculation ------------------- +# we need use full path to the file +input_file = os.path.abspath("input.txt") +input_folder = os.path.abspath("inputs_folder") +inputs = prepare_pythonjob_inputs( + add, + upload_files = { + "input.txt": input_file, + "inputs_folder": input_folder, + }, +) +result, node = run_get_node(PythonJob, inputs=inputs) +print("result: ", result["result"]) + +###################################################################### +# Retrieve additional files from the remote computer +# -------------- +# Sometimes, one may want to retrieve additional files from the remote computer after the job has finished. For example, one may want to retrieve the output files generated by the `pw.x` calculation in Quantum ESPRESSO. +# +# One can use the `additional_retrieve_list` parameter to specify which files should be retrieved from the working directory and stored in the local repository after the job has finished +# + +def add(x, y): + z = x + y + with open("result.txt", "w") as f: + f.write(str(z)) + return x + y + +inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2}, + metadata = { + "options": { + "additional_retrieve_list": ["result.txt"], + } + }, +) + +result, node = run_get_node(PythonJob, inputs=inputs) +print("retrieved files: ", result["retrieved"].list_object_names()) + +###################################################################### +# Namespace Output +# -------------- +# +# The `PythonJob` allows users to define namespace outputs. A namespace output is a dictionary with keys and values returned by a function. Each value in this dictionary will be serialized to AiiDA data, and the key-value pair will be stored in the database. +# Why Use Namespace Outputs? +# +# - **Dynamic and Flexible**: The keys and values in the namespace output are not fixed and can change based on the task's execution. +# - **Querying**: The data in the namespace output is stored as an AiiDA data node, allowing for easy querying and retrieval. +# - **Data Provenance**: When the data is used as input for subsequent tasks, the origin of data is tracked. +# +# For example: Consider a molecule adsorption calculation where the namespace output stores the surface slabs of the molecule adsorbed on different surface sites. The number of surface slabs can vary depending on the surface. These output surface slabs can be utilized as input to the next task to calculate the energy. + +from ase import Atoms +from ase.build import bulk + + +def generate_structures(structure: Atoms, factor_lst: list) -> dict: + """Scale the structure by the given factor_lst.""" + scaled_structures = {} + for i in range(len(factor_lst)): + atoms = structure.copy() + atoms.set_cell(atoms.cell * factor_lst[i], scale_atoms=True) + scaled_structures[f"s_{i}"] = atoms + return {"scaled_structures": scaled_structures} + +inputs = prepare_pythonjob_inputs( + generate_structures, + function_inputs={"structure": bulk("Al"), "factor_lst": [0.95, 1.0, 1.05]}, + function_outputs=[{"name": "scaled_structures", "identifier": "namespace"}], +) + +result, node = run_get_node(PythonJob, inputs=inputs) +print("scaled_structures: ") +for key, value in result["scaled_structures"].items(): + print(key, value) + +###################################################################### +# Exit Code +# -------------- +# +# +# When the function returns a dictionary with an `exit_code` key, the system automatically parses and uses this code to indicate the task's status. In the case of an error, the non-zero `exit_code` value helps identify the specific problem. +# +# + +def add(x, y): + sum = x + y + if (sum < 0).any(): + exit_code = {"status": 410, "message": "Some elements are negative"} + return {"sum": sum, "exit_code": exit_code} + return {"sum": sum} + +inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": -21}, +) + +result, node = run_get_node(PythonJob, inputs=inputs) +print("exit_status:", node.exit_status) +print("exit_message:", node.exit_message) + + +###################################################################### +# Define your data serializer +# -------------- +# +# PythonJob search data serializer from the `aiida.data` entry point by the module name and class name (e.g., `ase.atoms.Atoms`). +# +# In order to let the PythonJob find the serializer, you must register the AiiDA data with the following format: +# +# .. code-block:: ini +# +# [project.entry-points."aiida.data"] +# abc.ase.atoms.Atoms = "abc.xyz:MyAtomsData" +# +# This will register a data serializer for `ase.atoms.Atoms` data. `abc` is the plugin name, the module name is `xyz`, and the AiiDA data class name is `AtomsData`. Learn how to create an AiiDA data class `here `_. +# +# *Avoid duplicate data serializer*: If you have multiple plugins that register the same data serializer, the PythonJob will raise an error. You can avoid this by selecting the plugin that you want to use in the configuration file. +# +# +# .. code-block:: json +# +# { +# "serializers": { +# "ase.atoms.Atoms": "abc.ase.atoms.Atoms" +# } +# } +# +# Save the configuration file as `pythonjob.json` in the aiida configuration directory (by default, `~/.aiida` directory). + + + +###################################################################### +# What’s Next +# ----------- +# +-----------------------------------------+------------------------------------------------------+ +# | `Tutorials <../tutorial/index.rst>`__ | Real-world examples in computational materials | +# | | science and more. | +# | | | +# +-----------------------------------------+------------------------------------------------------+ +# +# diff --git a/docs/gallery/autogen/input.txt b/docs/gallery/autogen/input.txt new file mode 100644 index 0000000..d8263ee --- /dev/null +++ b/docs/gallery/autogen/input.txt @@ -0,0 +1 @@ +2 \ No newline at end of file diff --git a/docs/gallery/autogen/inputs_folder/another_input.txt b/docs/gallery/autogen/inputs_folder/another_input.txt new file mode 100644 index 0000000..e440e5c --- /dev/null +++ b/docs/gallery/autogen/inputs_folder/another_input.txt @@ -0,0 +1 @@ +3 \ No newline at end of file diff --git a/docs/make.bat b/docs/make.bat new file mode 100644 index 0000000..747ffb7 --- /dev/null +++ b/docs/make.bat @@ -0,0 +1,35 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.https://www.sphinx-doc.org/ + exit /b 1 +) + +if "%1" == "" goto help + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% + +:end +popd diff --git a/docs/requirements.txt b/docs/requirements.txt new file mode 100644 index 0000000..9eb3b97 --- /dev/null +++ b/docs/requirements.txt @@ -0,0 +1,8 @@ +sphinx_rtd_theme==1.2.2 +sphinx-gallery +nbsphinx==0.9.2 +ipython +aiida-core +ase +furo +matplotlib # for sphinx-gallery diff --git a/docs/source/autogen/autogen_jupyter.zip b/docs/source/autogen/autogen_jupyter.zip new file mode 100644 index 0000000000000000000000000000000000000000..051cf02fd0b985fe574d34e851b941a1d072253e GIT binary patch literal 15490 zcmeHO%W@mX6;!eij=)D6DGy7q>5w5qC_bw7r_DsmcLa>N0Tina1 zsl=n!B+O1CeU`K~%EuF#hf$b^c>4ThRcdsR4cWqVW;^O8H^oDaEDktwH);7kj_kW{ z{N~5Kc~g9v@x-y{_d}(k zu#-;nu};3ygML2{4^o+ku$$yEo2Vp=MVLhFaEE;dff(vY?(E<#F*=qkgaA^NR=v*k z$r_*Q5Ce~;*i-86o=7u2Qn3v9XeCK;?0R?Pxf~W;b#JJPB*%s%MI2Y#)Z9G38QgIl z$6;z@gzBR(Hqt+;w^tW=TIBYy995ky+w)PTC&I2)s7b1`T*O#s&##V4YkqF8UEh(o zuQHPhc^*zuw?zj`nv#2~Xk zJjI95Zq<)svwGEWFSjktWEy6&e$Z11kHCi5x0>-MJ?JL&ZU-_sR*5WILpOTIGVdig zobE(NawNhiYM(>x)}S{;h)m{1mIy0pSIdndcA*Z-hi{{g4cD4IT>KgpiPe5T?cL|y z)_E7_duu~i z$3}c1&tVI0b*}oldd-<0Z*`Lw;_#39?6E#-2f^l8pXGW}C6PQ2fCOv3`s}Jhv$?#3LEToim-qQHj?E z$J(QBNb3d@JOND8j2pky6(wbcN@B;Y=qEap4VhmoyA{W{xT8y+T+?niyqz<@u>01$ zw-$Ip>qKUWj7=(sfW3;dMaracIPDP+QQmSmB<|FNBo33~B0RQFosp@Zm91R)9XA=G=O{H+=bk4|M~aCA`r&H}$7_3A<0=2*ZACjb11wD{f&0cT6|8l=_6ux`(;>*7b_W$v> z%^NrP|5tDn0Fu32U$c(@j3)UyE>tAVN_6UVyp!+cC6K(nBkgmj;T(br6X;gH5&sc8 zbXSf-Fcsms7eVVWTqcmoMzPQ%yC)^(6L9kSj^ehvr0_DY$wDexS51 zU1bp(uzl$W7VHh3GkUs%kY0ky$F}4|PV3&6qUdGvcw4=-n&+1k{vrncf9C+aWvuU3tL&cUmbVlt2!H5K{;T202w@Js1= z*SdHb#)Sj}H^R^bq%-cd-vw;ob_>!FHJVy|?3Zfa^VRx-&0w(Mr+1V4I)ncnO<|wa zurZB=J_{&0HJ*mtq#>xGtFj@$(uZS$3M_aW!pn^WV+8D-KP#Z&+ok=Nxa4qgO);T| zYn^lY%#j+6TtP=nUvlBEcGjJvUc2noR(U7&5~sYX>Njflu=e_A9B@0&rYk@5)Xlnl zeAG>@?ntk%dmox)o$Fonq#N!vrqT4R54`}78dW%#xjJro*Qv)(tozE)4nkfd9Y->A zHq{mNTv4q;0zpxI27Ppf6Fe;(4y8dD6}sZ~Mvy(jbRxjRcI=o&mZldoA1y~k4CO*-wTT3oq$bkties5b0zf1NQ}&(7Q>6>jbOh-anqpw&*9FIQ zSZ!TI6Ces~XNe_Y3G~{1^l!r!Pe3eGikVnOfI;1wJaY(gc;a?tVwhnGZW3HHj~Tt0itKHzQL^<~wqL!LQsh41SRw590ppnkA-d08OCt_Cvw6F2 z^R|GAE{^U&1_WkS2HQ>sT*{SXux;hH2+L)FbNd?k(Ua8rh8_wt53PU{ZNX+Pf#4vw zU)V-H+o(ZXO3Vc&Wwr-buf*9`b?$r~HxujV45V;90L1)!5M4VVB1S5cL-;|{(utaz zXy8EZ2g`AM5=tYkz;vX8avC$q>=a2VEJMQ* z3A?RrVAaLLTN7MPz*MSn$s}%C_DnMrma`8NjTvVy*)h9{(#C5OOu`e1#Ta0@)A?#N z#JSIv$$R=__iS)59g8OtaTbeSur zeBoG}krK!ZroEdBOC` z)%9W^RMSdAU&=s$bJ3j$%~%)69Kq<7EhABhb$)RYC&0l7Sbw% zFGx-b8Jej;sK)kh_sXBzF!qDrW#DH&)2A}wjSSA=DMLh@ubYUWl>qnUod6IFcv@LA z1jI$bMzTJx`QcI$J)5#+o`pkjV2tG=!`{s8h);kBNIx=GJi!${1gU78jm|7s%-AqQ ztbE#NLG)G2=(^dT2Z3mY&jm zi6nwE5nVaIslJ|VjvF4JZRc_6T980&I>?g-n{1};(3 zr|!ij-w{OOHPQLK#TXwc>$doF0LeO`Ry{D#$&-7maQ++J1g?cMD~9c&#tdlbyvd<5 zD8gDsb#KQA+|`p=D?Kk7r;v+A%WCXksMBd1EW0#awnkuZjgKkaw)@CG#d>3Ehw5lU z*eUgFjnaWEds&$6A<_t@_guY%ox=FGksH(jHmq`Mp%D%5Lg7kTh$TDcEq7@d8dn7? zp>SND0JvOxF&rm)y0t(p9k?BQ{LzNMeGL2Sqa#{-F23s4si2v=IJmGH7gPlZCAk_Y zgyk``yJ=|CQ9inc{dsLaXklm$gpPT5VAM8hO_beYq(OQpmN9*AgswW z0c|R55P=@%j}7U&<7y$QAzjJl-n8OQd0y0+h#_whFi(;Q$qP)*OeH$$Jd>H8nTR76 zV;QHgbM8zyXmBnMa+!#_f*njO6m#7_SU6thg&;-_Y-l7R)JU{h_&afXEeI0;RU_+A zZ6J{lht*1*`t#uyw6crqY{uXYn$svc(n3OVP&Di%*^)fQ7N6pIgSa=q$Ynw?I{T`| zVsF(4-*8!@l~4+f$2@#EXKBd==FaxkoANB|I-J;ch(k<81dT#~Lm6hbrCy+Ob$#hI zD*j#PG$4PNIgVGcc4RfXN#$;U&4(FSzvY2-ojNYyzV#9Cd0*gQfLWy)(Z=2WmgIaYZ=5wO;6U;B4d`O>2LRl)syN3IdvuSpp(r=|Y6{yx{Y=3uX1B z+%<;@?ppx1j=>&$pr*mBZOoG}Q>Qk8-M}g6L^tW23J~um;b0id%^hllYA8RSKHK^8)UM|)L<1JW-s#K%_-t;Wnj-u$QulftmY6R{ z#nuWSt--KQH;$`FhO|+qk7?lIG+~$38-i$T1M5{2#l5f3vEzxrg+>a# zjQh{{C$8}!>HJZ9oad?8+1zx(VFD3DMS5rgr8m7GjeFV|!ok~=$tDyA2)vnV-WlKI z^6kAh6O8joe3LPRhg|1^iEx+Fta6UMSZsK6T?BCtww@~w6xXJ&MpMLP(#b5g6 zmx1~E_8b0Q_zLj;XLa*sUj<+zzP|lt4c`oCelcMGv!DNhPkxG@Dc=9}TYmZvGNE5M literal 0 HcmV?d00001 diff --git a/docs/source/autogen/autogen_python.zip b/docs/source/autogen/autogen_python.zip new file mode 100644 index 0000000000000000000000000000000000000000..f5e7ca4d2fd7c6404743b4322b69172350b05689 GIT binary patch literal 12004 zcmeHNOK&5`5#HEwknoU`e?VgdB&3!cc{lMPg5^N)?jnvIue@5rFa&|qoN0<}4X4M` zGm^NEApQfn=YRO8Py+_Z} zC$GLb{ny|B`S#E6-J^fMMB6u&j?c%oltmfliy#PkpHquCrk378`}BJBve(M zKF-R*Bvo8$li}y6^YnJm@Z5-^%0xNSR+Lp~3N6#2SeQy=N+qH+LT8{mO+^$*trI!Q z7v;=kUzQ?-g_#Q;BlJ8sMJZBr9{Vo6UiVjjaF?=tq6=FJbtUIH zak3=L(TM3juJYViB`)#5%u149EYrA3WvL*+D{)^eqDUABO%!Tw&>EY0UO{}J_!2)P zcB#YSxRG|vY+k5b7OH(Q))~gzp*Z4=`JfgPz)hm%&xAFCGjt@;56!Q&;7(cH-Clj^SVb z_2xa7=DCkWUWw=r<`ORDgD~8inak4b=`2xKA?)VEXgqG!99O zhaRi&OiWXANj?M4D;1U1*%^<0#f!AlT>lN^M0y%ULwrt6;l5nX;7DMC%;suJoZ(Qhpq^1%+SNO;Q<6ANm7rdVX zZy+RgItnvoR_f>_fRZBTzTq*bf7CesNR-l^i%SixWC5fUk93`dw21l2MGjaLA)7>& zav}*U=yJv_=W5Z;{v!<7W~pyV06+0DJvc=uzq2s@Q3d6hU!$nv;}xCskWGq2GsKd` z!s*mhUm?C`)qJ7~(q!lt!Yc(n!uK^kZFTiRrjbBW;Kx(o%Pbvz@hQ&)K^ zfn*awP@1N{4!*t&Y&8cgWEcyKnds@1e~!fB^-WMMBiaVqHhg+$w;8-Py*z&hCk-xc zaVpW(0lV!BCegn+?cEq`?(TY=N!HR^1Dg%}#JU~qdBXC6-gXbklYA&Im&eI|&$~aaQi7Rx+30+W{2w#i5hQ`rS zNnnmM*Ae00k1N12tLl z(#{-so+zeKRl?g|&}I0cW0)wv5yKB5s%^(V8)(8q+I9Z6B1ph7|?N1-Pe3U?FWK~0qxqx zCE1<(MQyD-+ZWzp6hhuhdU9oEKx5&`LrY})P+@F1m*)z^+rDG2SqCM9D{afMIX`$_ zRBEusSKV`OeUCH7n3#Z?&LY* zxSGs0A|fe+uSz1PUrB{{0H#3+lS{x90udckgiOj)oK2CAx#A$(3&_I$P=`=XfZ)oR z-y`11LEY17$+o_=Fd%p5`uy$jZ<`_C~9}VnsPIn1-xu&~KDW0ja(CUKWg7#G$=a9ITPU^7YXx6j{4aWw+ zUIU;HIhCDDWEsB14F$aA0zyuK;#m`Mk|1V;Jane*OeG=b4Glb^w0YxNMD`NsTY_lx zHp(xax12&zqkrn&) z?}F2iv}tT6UnAy^IrSyLdv~ztvnHE<%I75;j2cbP$xu2)ET3aFXa^CH4vwlRF+H zy>W#NvRte;a?|8e7p@>j1F0t0SXfEwMg;Jr|-F@V%7-XwY zQs9kAs&;qxS$wMudc3tNGJ$*)DFc#UuFKF&zN(|+H4j$UdTJw@8q+PurB%UZF+6f_hzpuU@4~M?_@U=dg za)eSlEpLSBp;w0UDpw~!Lk;6x%%U2_*&?jZ$gzUY6%J8SwEkGws&G zuCj)pwAOa>vgC$c+EycI1c4Q4pZmdK8Vr#0wDO|>`CjnwY zwqzF;3<-(jT^!14FpLT!$~sg&r04=)O#TjWfCf_PhSPX-LtUSS`yhkcJ!REaC}TR3 zqWWnjR8g41x$DeiqZg_$ON6)}J5y;6fA6;58M(v6sQIJjgg7~8pWY_Zo$Ds3s=;R3 z@}!=MKDB)y-W!B6gYCBU$VLR@83IQ(^z63}ptb?>5-ZR6DhMSiLSb1bBLqP~SDisV z7b<9>UTd6uP=GZtNwWf`ZN9rP>-}$Tu)wA(2IbSi<#UA5!v}W&KACPOoPF3Nl_48Q;8!3{@KS>lMuHp#5nDZH8qt9_W(2GkzWPa^Y0Gt@?cj?C5Xo3{*9QWps z6f-Gs$0x8R$`x>@3rvofNgD=y6^_{tPC+vsIf=s+mS8dgHD4`W?Eh)OTt0%p0-l@C zMz}WF5DgShV6Wwjq1>9FxK{`PPU48bKk=2P6(Ktkus_`BYykR1paEBM$U#x}j%YeN zL;V^man$}Plpqth7eQO$4nfF6tqqPC|9vnk%iQko?K#=dSpt?yOl(7K_PiLadpnAe z7VN2P4>ki|wO1Otj+W+1bd_{l20cVarA_pVW@1=ZJgaijzaH zyN4G>Cn7;v%DLMWVFEbg5SLDxW)fu?U+J$!M`DyK3S>!pS7ve2ZS~RZpw#f+wet?e zS6GDbsAP&b7yyY`xJR571DJMpj92X*e<6Ml>Y^^VCpFb)6;8mYU`T1L^XWUnU-4`d z_G?QQO`RgXbRVRXL90qGi)T%xH}nK6>PqlogBC+wU4J27yRZ1bmt&IRW-=)kiSqi& zYDm;UQyo*R9V@Ig#c|PxoRIv|Z4kJKKktRyJ!*^e0|4zibdhhy{k literal 0 HcmV?d00001 diff --git a/docs/source/autogen/how_to.codeobj.json b/docs/source/autogen/how_to.codeobj.json new file mode 100644 index 0000000..d6afe33 --- /dev/null +++ b/docs/source/autogen/how_to.codeobj.json @@ -0,0 +1,977 @@ +{ + "Atoms": [ + { + "is_class": true, + "is_explicit": false, + "module": "ase.atoms", + "module_short": "ase", + "name": "Atoms" + }, + { + "is_class": true, + "is_explicit": false, + "module": "ase", + "module_short": "ase", + "name": "Atoms" + }, + { + "is_class": false, + "is_explicit": false, + "module": "ase", + "module_short": "ase", + "name": "Atoms" + } + ], + "PythonJob": [ + { + "is_class": true, + "is_explicit": false, + "module": "aiida_pythonjob.calculations.pythonjob", + "module_short": "aiida_pythonjob", + "name": "PythonJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida_pythonjob.calculations", + "module_short": "aiida_pythonjob", + "name": "PythonJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida_pythonjob", + "module_short": "aiida_pythonjob", + "name": "PythonJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida.engine.processes.calcjobs.calcjob", + "module_short": "aiida.engine", + "name": "CalcJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida.engine.processes.calcjobs", + "module_short": "aiida.engine", + "name": "CalcJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida.engine.processes", + "module_short": "aiida.engine", + "name": "CalcJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida.engine", + "module_short": "aiida.engine", + "name": "CalcJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "CalcJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida.engine.processes.process", + "module_short": "aiida.engine", + "name": "Process" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida.engine.processes", + "module_short": "aiida.engine", + "name": "Process" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida.engine", + "module_short": "aiida.engine", + "name": "Process" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "Process" + }, + { + "is_class": true, + "is_explicit": false, + "module": "plumpy.processes", + "module_short": "plumpy", + "name": "Process" + }, + { + "is_class": true, + "is_explicit": false, + "module": "plumpy", + "module_short": "plumpy", + "name": "Process" + }, + { + "is_class": true, + "is_explicit": false, + "module": "plumpy.base.state_machine", + "module_short": "plumpy.base", + "name": "StateMachine" + }, + { + "is_class": true, + "is_explicit": false, + "module": "plumpy.base", + "module_short": "plumpy.base", + "name": "StateMachine" + }, + { + "is_class": true, + "is_explicit": false, + "module": "plumpy", + "module_short": "plumpy", + "name": "StateMachine" + }, + { + "is_class": true, + "is_explicit": false, + "module": "plumpy.persistence", + "module_short": "plumpy", + "name": "Savable" + }, + { + "is_class": true, + "is_explicit": false, + "module": "plumpy", + "module_short": "plumpy", + "name": "Savable" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida_pythonjob", + "module_short": "aiida_pythonjob", + "name": "PythonJob" + } + ], + "add": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "function" + } + ], + "bulk": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "function" + }, + { + "is_class": false, + "is_explicit": false, + "module": "ase.build", + "module_short": "ase.build", + "name": "bulk" + } + ], + "f": [ + { + "is_class": false, + "is_explicit": false, + "module": "io", + "module_short": "io", + "name": "TextIOWrapper" + } + ], + "f.read": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "builtin_function_or_method" + } + ], + "f.write": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "builtin_function_or_method" + } + ], + "generate_structures": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "function" + } + ], + "input_file": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "str" + } + ], + "input_folder": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "str" + } + ], + "inputs": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "dict" + } + ], + "inputs1": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "dict" + } + ], + "inputs2": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "dict" + } + ], + "key": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "str" + } + ], + "load_profile": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "function" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "load_profile" + } + ], + "multiply": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "function" + } + ], + "node": [ + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation.calcjob", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "CalcJobNode" + } + ], + "node.exit_message": [ + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation.calcjob", + "module_short": "aiida.orm", + "name": "CalcJobNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation", + "module_short": "aiida.orm", + "name": "CalcJobNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process", + "module_short": "aiida.orm", + "name": "CalcJobNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes", + "module_short": "aiida.orm", + "name": "CalcJobNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "CalcJobNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "CalcJobNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation.calculation", + "module_short": "aiida.orm", + "name": "CalculationNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation", + "module_short": "aiida.orm", + "name": "CalculationNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process", + "module_short": "aiida.orm", + "name": "CalculationNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes", + "module_short": "aiida.orm", + "name": "CalculationNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "CalculationNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "CalculationNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.process", + "module_short": "aiida.orm", + "name": "ProcessNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process", + "module_short": "aiida.orm", + "name": "ProcessNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes", + "module_short": "aiida.orm", + "name": "ProcessNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "ProcessNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "ProcessNode.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.utils.mixins", + "module_short": "aiida.orm.utils.mixins", + "name": "Sealable.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.utils", + "module_short": "aiida.orm.utils", + "name": "Sealable.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "Sealable.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "Sealable.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.node", + "module_short": "aiida.orm.nodes.node", + "name": "Node.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes", + "module_short": "aiida.orm.nodes", + "name": "Node.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "Node.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "Node.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.entities", + "module_short": "aiida.orm.entities", + "name": "Entity.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "Entity.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "Entity.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "abc", + "module_short": "abc", + "name": "ABC.exit_message" + }, + { + "is_class": false, + "is_explicit": false, + "module": "typing", + "module_short": "typing", + "name": "Generic.exit_message" + } + ], + "node.exit_status": [ + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation.calcjob", + "module_short": "aiida.orm", + "name": "CalcJobNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation", + "module_short": "aiida.orm", + "name": "CalcJobNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process", + "module_short": "aiida.orm", + "name": "CalcJobNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes", + "module_short": "aiida.orm", + "name": "CalcJobNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "CalcJobNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "CalcJobNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation.calculation", + "module_short": "aiida.orm", + "name": "CalculationNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation", + "module_short": "aiida.orm", + "name": "CalculationNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process", + "module_short": "aiida.orm", + "name": "CalculationNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes", + "module_short": "aiida.orm", + "name": "CalculationNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "CalculationNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "CalculationNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.process", + "module_short": "aiida.orm", + "name": "ProcessNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process", + "module_short": "aiida.orm", + "name": "ProcessNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes", + "module_short": "aiida.orm", + "name": "ProcessNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "ProcessNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "ProcessNode.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.utils.mixins", + "module_short": "aiida.orm.utils.mixins", + "name": "Sealable.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.utils", + "module_short": "aiida.orm.utils", + "name": "Sealable.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "Sealable.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "Sealable.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.node", + "module_short": "aiida.orm.nodes.node", + "name": "Node.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes", + "module_short": "aiida.orm.nodes", + "name": "Node.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "Node.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "Node.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.entities", + "module_short": "aiida.orm.entities", + "name": "Entity.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "Entity.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "Entity.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "abc", + "module_short": "abc", + "name": "ABC.exit_status" + }, + { + "is_class": false, + "is_explicit": false, + "module": "typing", + "module_short": "typing", + "name": "Generic.exit_status" + } + ], + "node1": [ + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation.calcjob", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "CalcJobNode" + } + ], + "node2": [ + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation.calcjob", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "CalcJobNode" + } + ], + "os.makedirs": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "function" + }, + { + "is_class": false, + "is_explicit": false, + "module": "os", + "module_short": "os", + "name": "makedirs" + } + ], + "os.path.abspath": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "function" + }, + { + "is_class": false, + "is_explicit": false, + "module": "os.path", + "module_short": "os.path", + "name": "abspath" + } + ], + "prepare_pythonjob_inputs": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "function" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida_pythonjob", + "module_short": "aiida_pythonjob", + "name": "prepare_pythonjob_inputs" + } + ], + "result": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "dict" + } + ], + "result1": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "dict" + } + ], + "result2": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "dict" + } + ], + "run_get_node": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "function" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.engine", + "module_short": "aiida.engine", + "name": "run_get_node" + } + ], + "value": [ + { + "is_class": false, + "is_explicit": false, + "module": "aiida_workgraph.orm.atoms", + "module_short": "aiida_workgraph.orm.atoms", + "name": "AtomsData" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida_workgraph.orm", + "module_short": "aiida_workgraph.orm", + "name": "AtomsData" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida_workgraph", + "module_short": "aiida_workgraph", + "name": "AtomsData" + } + ] +} \ No newline at end of file diff --git a/docs/source/autogen/how_to.ipynb b/docs/source/autogen/how_to.ipynb new file mode 100644 index 0000000..753485f --- /dev/null +++ b/docs/source/autogen/how_to.ipynb @@ -0,0 +1,190 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# How to guides\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Introduction\n\nTo run this tutorial, you need to load the AiiDA profile.\n\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from aiida import load_profile\n\nload_profile()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Default outputs\n\nThe default output of the function is `result`. The `PythonJob` task will store the result as one node in the database with the key `result`.\n\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from aiida_pythonjob import prepare_pythonjob_inputs, PythonJob\nfrom aiida.engine import run_get_node\n\n\ndef add(x, y):\n return x + y\n\n\ninputs = prepare_pythonjob_inputs(add,\n function_inputs={\"x\": 1, \"y\": 2},\n computer=\"localhost\",\n )\nresult, node = run_get_node(PythonJob, inputs=inputs)\nprint(\"result: \", result[\"result\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Custom outputs\nIf the function return a dictionary with fixed number of keys, and you want to store the values as separate outputs, you can specify the `function_outputs` parameter.\nFor a dynamic number of outputs, you can use the namespace output, which is explained later.\n\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def add(x, y):\n return {\"sum\": x + y, \"diff\": x - y}\n\ninputs = prepare_pythonjob_inputs(\n add,\n function_inputs={\"x\": 1, \"y\": 2},\n function_outputs=[\n {\"name\": \"sum\"},\n {\"name\": \"diff\"},\n ]\n)\nresult, node = run_get_node(PythonJob, **inputs) \n\nprint(\"result: \")\nprint(\"sum: \", result[\"sum\"])\nprint(\"diff: \", result[\"diff\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Using parent folder\nThe parent_folder parameter allows a task to access the output files of a parent task. This feature is particularly useful when you want to reuse data generated by a previous computation in subsequent computations. In the following example, the multiply task uses the `result.txt` file created by the add task.\n\n\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def add(x, y):\n z = x + y\n with open(\"result.txt\", \"w\") as f:\n f.write(str(z))\n return x + y\n\ndef multiply(x, y):\n with open(\"parent_folder/result.txt\", \"r\") as f:\n z = int(f.read())\n return x * y + z\n\ninputs1 = prepare_pythonjob_inputs(\n add,\n function_inputs={\"x\": 1, \"y\": 2},\n function_outputs=[{\"name\": \"sum\"}],\n)\n\nresult1, node1 = run_get_node(PythonJob, inputs=inputs1) \n\ninputs2 = prepare_pythonjob_inputs(\n multiply,\n function_inputs={\"x\": 1, \"y\": 2},\n function_outputs=[{\"name\": \"product\"}],\n parent_folder=result1[\"remote_folder\"],\n)\n\nresult2, node2 = run_get_node(PythonJob, inputs=inputs2)\n\nprint(\"result: \", result2)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Upload files or folders to the remote computer\nThe `upload_files` parameter allows users to upload files or folders to the remote computer. The files will be uploaded to the working directory of the remote computer.\n\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "import os\n\n# create a temporary file \"input.txt\" in the current directory\nwith open(\"input.txt\", \"w\") as f:\n f.write(\"2\")\n\n# create a temporary folder \"inputs_folder\" in the current directory\n# and add a file \"another_input.txt\" in the folder\nos.makedirs(\"inputs_folder\", exist_ok=True)\nwith open(\"inputs_folder/another_input.txt\", \"w\") as f:\n f.write(\"3\")\n\ndef add():\n with open(\"input.txt\", \"r\") as f:\n a = int(f.read())\n with open(\"inputs_folder/another_input.txt\", \"r\") as f:\n b = int(f.read())\n return a + b\n# ------------------------- Submit the calculation -------------------\n# we need use full path to the file\ninput_file = os.path.abspath(\"input.txt\")\ninput_folder = os.path.abspath(\"inputs_folder\")\ninputs = prepare_pythonjob_inputs(\n add,\n upload_files = {\n \"input.txt\": input_file,\n \"inputs_folder\": input_folder,\n },\n)\nresult, node = run_get_node(PythonJob, inputs=inputs)\nprint(\"result: \", result[\"result\"])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Retrieve additional files from the remote computer\nSometimes, one may want to retrieve additional files from the remote computer after the job has finished. For example, one may want to retrieve the output files generated by the `pw.x` calculation in Quantum ESPRESSO.\n\nOne can use the `additional_retrieve_list` parameter to specify which files should be retrieved from the working directory and stored in the local repository after the job has finished\n\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def add(x, y):\n z = x + y\n with open(\"result.txt\", \"w\") as f:\n f.write(str(z))\n return x + y\n\ninputs = prepare_pythonjob_inputs(\n add,\n function_inputs={\"x\": 1, \"y\": 2},\n metadata = {\n \"options\": {\n \"additional_retrieve_list\": [\"result.txt\"],\n }\n },\n)\n\nresult, node = run_get_node(PythonJob, inputs=inputs)\nprint(\"retrieved files: \", result[\"retrieved\"].list_object_names())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Namespace Output\n\nThe `PythonJob` allows users to define namespace outputs. A namespace output is a dictionary with keys and values returned by a function. Each value in this dictionary will be serialized to AiiDA data, and the key-value pair will be stored in the database. \nWhy Use Namespace Outputs?\n\n- **Dynamic and Flexible**: The keys and values in the namespace output are not fixed and can change based on the task's execution.\n- **Querying**: The data in the namespace output is stored as an AiiDA data node, allowing for easy querying and retrieval.\n- **Data Provenance**: When the data is used as input for subsequent tasks, the origin of data is tracked.\n\nFor example: Consider a molecule adsorption calculation where the namespace output stores the surface slabs of the molecule adsorbed on different surface sites. The number of surface slabs can vary depending on the surface. These output surface slabs can be utilized as input to the next task to calculate the energy.\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from ase import Atoms\nfrom ase.build import bulk\n\n\ndef generate_structures(structure: Atoms, factor_lst: list) -> dict:\n \"\"\"Scale the structure by the given factor_lst.\"\"\"\n scaled_structures = {}\n for i in range(len(factor_lst)):\n atoms = structure.copy()\n atoms.set_cell(atoms.cell * factor_lst[i], scale_atoms=True)\n scaled_structures[f\"s_{i}\"] = atoms\n return {\"scaled_structures\": scaled_structures}\n\ninputs = prepare_pythonjob_inputs(\n generate_structures,\n function_inputs={\"structure\": bulk(\"Al\"), \"factor_lst\": [0.95, 1.0, 1.05]},\n function_outputs=[{\"name\": \"scaled_structures\", \"identifier\": \"namespace\"}],\n)\n\nresult, node = run_get_node(PythonJob, inputs=inputs)\nprint(\"scaled_structures: \")\nfor key, value in result[\"scaled_structures\"].items():\n print(key, value)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Exit Code\n\n\nWhen the function returns a dictionary with an `exit_code` key, the system automatically parses and uses this code to indicate the task's status. In the case of an error, the non-zero `exit_code` value helps identify the specific problem.\n\n\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "def add(x, y):\n sum = x + y\n if (sum < 0).any():\n exit_code = {\"status\": 410, \"message\": \"Some elements are negative\"}\n return {\"sum\": sum, \"exit_code\": exit_code}\n return {\"sum\": sum}\n\ninputs = prepare_pythonjob_inputs(\n add,\n function_inputs={\"x\": 1, \"y\": -21},\n)\n\nresult, node = run_get_node(PythonJob, inputs=inputs)\nprint(\"exit_status:\", node.exit_status)\nprint(\"exit_message:\", node.exit_message)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Define your data serializer\n\nPythonJob search data serializer from the `aiida.data` entry point by the module name and class name (e.g., `ase.atoms.Atoms`). \n\nIn order to let the PythonJob find the serializer, you must register the AiiDA data with the following format:\n\n```ini\n[project.entry-points.\"aiida.data\"]\nabc.ase.atoms.Atoms = \"abc.xyz:MyAtomsData\"\n```\nThis will register a data serializer for `ase.atoms.Atoms` data. `abc` is the plugin name, the module name is `xyz`, and the AiiDA data class name is `AtomsData`. Learn how to create an AiiDA data class [here](https://aiida.readthedocs.io/projects/aiida-core/en/stable/topics/data_types.html#adding-support-for-custom-data-types).\n\n*Avoid duplicate data serializer*: If you have multiple plugins that register the same data serializer, the PythonJob will raise an error. You can avoid this by selecting the plugin that you want to use in the configuration file.\n\n\n```json\n{\n \"serializers\": {\n \"ase.atoms.Atoms\": \"abc.ase.atoms.Atoms\"\n }\n}\n```\nSave the configuration file as `pythonjob.json` in the aiida configuration directory (by default, `~/.aiida` directory).\n\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## What\u2019s Next\n+-----------------------------------------+------------------------------------------------------+\n| [Tutorials](../tutorial/index.rst)_ | Real-world examples in computational materials |\n| | science and more. |\n| | |\n+-----------------------------------------+------------------------------------------------------+\n\n\n\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.0" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/docs/source/autogen/how_to.py b/docs/source/autogen/how_to.py new file mode 100644 index 0000000..35c0438 --- /dev/null +++ b/docs/source/autogen/how_to.py @@ -0,0 +1,271 @@ +""" +How to guides +=============== + +""" + + +###################################################################### +# Introduction +# ------------ +# +# To run this tutorial, you need to load the AiiDA profile. +# + +from aiida import load_profile + +load_profile() + + +###################################################################### +# Default outputs +# -------------- +# +# The default output of the function is `result`. The `PythonJob` task will store the result as one node in the database with the key `result`. +# +from aiida_pythonjob import prepare_pythonjob_inputs, PythonJob +from aiida.engine import run_get_node + + +def add(x, y): + return x + y + + +inputs = prepare_pythonjob_inputs(add, + function_inputs={"x": 1, "y": 2}, + computer="localhost", + ) +result, node = run_get_node(PythonJob, inputs=inputs) +print("result: ", result["result"]) + +###################################################################### +# Custom outputs +# -------------- +# If the function return a dictionary with fixed number of keys, and you want to store the values as separate outputs, you can specify the `function_outputs` parameter. +# For a dynamic number of outputs, you can use the namespace output, which is explained later. +# + +def add(x, y): + return {"sum": x + y, "diff": x - y} + +inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2}, + function_outputs=[ + {"name": "sum"}, + {"name": "diff"}, + ] +) +result, node = run_get_node(PythonJob, **inputs) + +print("result: ") +print("sum: ", result["sum"]) +print("diff: ", result["diff"]) + + +###################################################################### +# Using parent folder +# -------------- +# The parent_folder parameter allows a task to access the output files of a parent task. This feature is particularly useful when you want to reuse data generated by a previous computation in subsequent computations. In the following example, the multiply task uses the `result.txt` file created by the add task. +# +# + + +def add(x, y): + z = x + y + with open("result.txt", "w") as f: + f.write(str(z)) + return x + y + +def multiply(x, y): + with open("parent_folder/result.txt", "r") as f: + z = int(f.read()) + return x * y + z + +inputs1 = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2}, + function_outputs=[{"name": "sum"}], +) + +result1, node1 = run_get_node(PythonJob, inputs=inputs1) + +inputs2 = prepare_pythonjob_inputs( + multiply, + function_inputs={"x": 1, "y": 2}, + function_outputs=[{"name": "product"}], + parent_folder=result1["remote_folder"], +) + +result2, node2 = run_get_node(PythonJob, inputs=inputs2) + +print("result: ", result2) + +###################################################################### +# Upload files or folders to the remote computer +# -------------- +# The `upload_files` parameter allows users to upload files or folders to the remote computer. The files will be uploaded to the working directory of the remote computer. +# + +import os + +# create a temporary file "input.txt" in the current directory +with open("input.txt", "w") as f: + f.write("2") + +# create a temporary folder "inputs_folder" in the current directory +# and add a file "another_input.txt" in the folder +os.makedirs("inputs_folder", exist_ok=True) +with open("inputs_folder/another_input.txt", "w") as f: + f.write("3") + +def add(): + with open("input.txt", "r") as f: + a = int(f.read()) + with open("inputs_folder/another_input.txt", "r") as f: + b = int(f.read()) + return a + b +# ------------------------- Submit the calculation ------------------- +# we need use full path to the file +input_file = os.path.abspath("input.txt") +input_folder = os.path.abspath("inputs_folder") +inputs = prepare_pythonjob_inputs( + add, + upload_files = { + "input.txt": input_file, + "inputs_folder": input_folder, + }, +) +result, node = run_get_node(PythonJob, inputs=inputs) +print("result: ", result["result"]) + +###################################################################### +# Retrieve additional files from the remote computer +# -------------- +# Sometimes, one may want to retrieve additional files from the remote computer after the job has finished. For example, one may want to retrieve the output files generated by the `pw.x` calculation in Quantum ESPRESSO. +# +# One can use the `additional_retrieve_list` parameter to specify which files should be retrieved from the working directory and stored in the local repository after the job has finished +# + +def add(x, y): + z = x + y + with open("result.txt", "w") as f: + f.write(str(z)) + return x + y + +inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2}, + metadata = { + "options": { + "additional_retrieve_list": ["result.txt"], + } + }, +) + +result, node = run_get_node(PythonJob, inputs=inputs) +print("retrieved files: ", result["retrieved"].list_object_names()) + +###################################################################### +# Namespace Output +# -------------- +# +# The `PythonJob` allows users to define namespace outputs. A namespace output is a dictionary with keys and values returned by a function. Each value in this dictionary will be serialized to AiiDA data, and the key-value pair will be stored in the database. +# Why Use Namespace Outputs? +# +# - **Dynamic and Flexible**: The keys and values in the namespace output are not fixed and can change based on the task's execution. +# - **Querying**: The data in the namespace output is stored as an AiiDA data node, allowing for easy querying and retrieval. +# - **Data Provenance**: When the data is used as input for subsequent tasks, the origin of data is tracked. +# +# For example: Consider a molecule adsorption calculation where the namespace output stores the surface slabs of the molecule adsorbed on different surface sites. The number of surface slabs can vary depending on the surface. These output surface slabs can be utilized as input to the next task to calculate the energy. + +from ase import Atoms +from ase.build import bulk + + +def generate_structures(structure: Atoms, factor_lst: list) -> dict: + """Scale the structure by the given factor_lst.""" + scaled_structures = {} + for i in range(len(factor_lst)): + atoms = structure.copy() + atoms.set_cell(atoms.cell * factor_lst[i], scale_atoms=True) + scaled_structures[f"s_{i}"] = atoms + return {"scaled_structures": scaled_structures} + +inputs = prepare_pythonjob_inputs( + generate_structures, + function_inputs={"structure": bulk("Al"), "factor_lst": [0.95, 1.0, 1.05]}, + function_outputs=[{"name": "scaled_structures", "identifier": "namespace"}], +) + +result, node = run_get_node(PythonJob, inputs=inputs) +print("scaled_structures: ") +for key, value in result["scaled_structures"].items(): + print(key, value) + +###################################################################### +# Exit Code +# -------------- +# +# +# When the function returns a dictionary with an `exit_code` key, the system automatically parses and uses this code to indicate the task's status. In the case of an error, the non-zero `exit_code` value helps identify the specific problem. +# +# + +def add(x, y): + sum = x + y + if (sum < 0).any(): + exit_code = {"status": 410, "message": "Some elements are negative"} + return {"sum": sum, "exit_code": exit_code} + return {"sum": sum} + +inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": -21}, +) + +result, node = run_get_node(PythonJob, inputs=inputs) +print("exit_status:", node.exit_status) +print("exit_message:", node.exit_message) + + +###################################################################### +# Define your data serializer +# -------------- +# +# PythonJob search data serializer from the `aiida.data` entry point by the module name and class name (e.g., `ase.atoms.Atoms`). +# +# In order to let the PythonJob find the serializer, you must register the AiiDA data with the following format: +# +# .. code-block:: ini +# +# [project.entry-points."aiida.data"] +# abc.ase.atoms.Atoms = "abc.xyz:MyAtomsData" +# +# This will register a data serializer for `ase.atoms.Atoms` data. `abc` is the plugin name, the module name is `xyz`, and the AiiDA data class name is `AtomsData`. Learn how to create an AiiDA data class `here `_. +# +# *Avoid duplicate data serializer*: If you have multiple plugins that register the same data serializer, the PythonJob will raise an error. You can avoid this by selecting the plugin that you want to use in the configuration file. +# +# +# .. code-block:: json +# +# { +# "serializers": { +# "ase.atoms.Atoms": "abc.ase.atoms.Atoms" +# } +# } +# +# Save the configuration file as `pythonjob.json` in the aiida configuration directory (by default, `~/.aiida` directory). + + + +###################################################################### +# What’s Next +# ----------- +# +-----------------------------------------+------------------------------------------------------+ +# | `Tutorials <../tutorial/index.rst>`__ | Real-world examples in computational materials | +# | | science and more. | +# | | | +# +-----------------------------------------+------------------------------------------------------+ +# +# diff --git a/docs/source/autogen/how_to.py.md5 b/docs/source/autogen/how_to.py.md5 new file mode 100644 index 0000000..721c57c --- /dev/null +++ b/docs/source/autogen/how_to.py.md5 @@ -0,0 +1 @@ +f8c57716af1376de7aab8e8425f3d9c8 \ No newline at end of file diff --git a/docs/source/autogen/how_to.rst b/docs/source/autogen/how_to.rst new file mode 100644 index 0000000..4564f52 --- /dev/null +++ b/docs/source/autogen/how_to.rst @@ -0,0 +1,477 @@ + +.. DO NOT EDIT. +.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. +.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: +.. "autogen/how_to.py" +.. LINE NUMBERS ARE GIVEN BELOW. + +.. only:: html + + .. note:: + :class: sphx-glr-download-link-note + + :ref:`Go to the end ` + to download the full example code. + +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_autogen_how_to.py: + + +How to guides +=============== + +.. GENERATED FROM PYTHON SOURCE LINES 9-14 + +Introduction +------------ + +To run this tutorial, you need to load the AiiDA profile. + + +.. GENERATED FROM PYTHON SOURCE LINES 14-20 + +.. code-block:: Python + + + from aiida import load_profile + + load_profile() + + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + + Profile + + + +.. GENERATED FROM PYTHON SOURCE LINES 21-26 + +Default outputs +-------------- + +The default output of the function is `result`. The `PythonJob` task will store the result as one node in the database with the key `result`. + + +.. GENERATED FROM PYTHON SOURCE LINES 26-41 + +.. code-block:: Python + + from aiida_pythonjob import prepare_pythonjob_inputs, PythonJob + from aiida.engine import run_get_node + + + def add(x, y): + return x + y + + + inputs = prepare_pythonjob_inputs(add, + function_inputs={"x": 1, "y": 2}, + computer="localhost", + ) + result, node = run_get_node(PythonJob, inputs=inputs) + print("result: ", result["result"]) + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + result: uuid: 699baec5-5a9f-4a46-aafb-428b4dcab803 (pk: 19638) value: 3 + + + + +.. GENERATED FROM PYTHON SOURCE LINES 42-47 + +Custom outputs +-------------- +If the function return a dictionary with fixed number of keys, and you want to store the values as separate outputs, you can specify the `function_outputs` parameter. +For a dynamic number of outputs, you can use the namespace output, which is explained later. + + +.. GENERATED FROM PYTHON SOURCE LINES 47-66 + +.. code-block:: Python + + + def add(x, y): + return {"sum": x + y, "diff": x - y} + + inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2}, + function_outputs=[ + {"name": "sum"}, + {"name": "diff"}, + ] + ) + result, node = run_get_node(PythonJob, **inputs) + + print("result: ") + print("sum: ", result["sum"]) + print("diff: ", result["diff"]) + + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + result: + sum: uuid: cbc002ae-b34a-4673-b71d-29bf702a85dc (pk: 19649) value: 3 + diff: uuid: 9c3f0cdb-9f4d-46d2-bbc8-3cd2906b4587 (pk: 19650) value: -1 + + + + +.. GENERATED FROM PYTHON SOURCE LINES 67-72 + +Using parent folder +-------------- +The parent_folder parameter allows a task to access the output files of a parent task. This feature is particularly useful when you want to reuse data generated by a previous computation in subsequent computations. In the following example, the multiply task uses the `result.txt` file created by the add task. + + + +.. GENERATED FROM PYTHON SOURCE LINES 72-104 + +.. code-block:: Python + + + + def add(x, y): + z = x + y + with open("result.txt", "w") as f: + f.write(str(z)) + return x + y + + def multiply(x, y): + with open("parent_folder/result.txt", "r") as f: + z = int(f.read()) + return x * y + z + + inputs1 = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2}, + function_outputs=[{"name": "sum"}], + ) + + result1, node1 = run_get_node(PythonJob, inputs=inputs1) + + inputs2 = prepare_pythonjob_inputs( + multiply, + function_inputs={"x": 1, "y": 2}, + function_outputs=[{"name": "product"}], + parent_folder=result1["remote_folder"], + ) + + result2, node2 = run_get_node(PythonJob, inputs=inputs2) + + print("result: ", result2) + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + result: {'remote_folder': , 'retrieved': , 'product': } + + + + +.. GENERATED FROM PYTHON SOURCE LINES 105-109 + +Upload files or folders to the remote computer +-------------- +The `upload_files` parameter allows users to upload files or folders to the remote computer. The files will be uploaded to the working directory of the remote computer. + + +.. GENERATED FROM PYTHON SOURCE LINES 109-142 + +.. code-block:: Python + + + import os + + # create a temporary file "input.txt" in the current directory + with open("input.txt", "w") as f: + f.write("2") + + # create a temporary folder "inputs_folder" in the current directory + # and add a file "another_input.txt" in the folder + os.makedirs("inputs_folder", exist_ok=True) + with open("inputs_folder/another_input.txt", "w") as f: + f.write("3") + + def add(): + with open("input.txt", "r") as f: + a = int(f.read()) + with open("inputs_folder/another_input.txt", "r") as f: + b = int(f.read()) + return a + b + # ------------------------- Submit the calculation ------------------- + # we need use full path to the file + input_file = os.path.abspath("input.txt") + input_folder = os.path.abspath("inputs_folder") + inputs = prepare_pythonjob_inputs( + add, + upload_files = { + "input.txt": input_file, + "inputs_folder": input_folder, + }, + ) + result, node = run_get_node(PythonJob, inputs=inputs) + print("result: ", result["result"]) + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + result: uuid: 413ecd04-bfb8-4155-b234-0bee1e1414e9 (pk: 19683) value: 5 + + + + +.. GENERATED FROM PYTHON SOURCE LINES 143-149 + +Retrieve additional files from the remote computer +-------------- +Sometimes, one may want to retrieve additional files from the remote computer after the job has finished. For example, one may want to retrieve the output files generated by the `pw.x` calculation in Quantum ESPRESSO. + +One can use the `additional_retrieve_list` parameter to specify which files should be retrieved from the working directory and stored in the local repository after the job has finished + + +.. GENERATED FROM PYTHON SOURCE LINES 149-169 + +.. code-block:: Python + + + def add(x, y): + z = x + y + with open("result.txt", "w") as f: + f.write(str(z)) + return x + y + + inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2}, + metadata = { + "options": { + "additional_retrieve_list": ["result.txt"], + } + }, + ) + + result, node = run_get_node(PythonJob, inputs=inputs) + print("retrieved files: ", result["retrieved"].list_object_names()) + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + retrieved files: ['_scheduler-stderr.txt', '_scheduler-stdout.txt', 'aiida.out', 'result.txt', 'results.pickle'] + + + + +.. GENERATED FROM PYTHON SOURCE LINES 170-181 + +Namespace Output +-------------- + +The `PythonJob` allows users to define namespace outputs. A namespace output is a dictionary with keys and values returned by a function. Each value in this dictionary will be serialized to AiiDA data, and the key-value pair will be stored in the database. +Why Use Namespace Outputs? + +- **Dynamic and Flexible**: The keys and values in the namespace output are not fixed and can change based on the task's execution. +- **Querying**: The data in the namespace output is stored as an AiiDA data node, allowing for easy querying and retrieval. +- **Data Provenance**: When the data is used as input for subsequent tasks, the origin of data is tracked. + +For example: Consider a molecule adsorption calculation where the namespace output stores the surface slabs of the molecule adsorbed on different surface sites. The number of surface slabs can vary depending on the surface. These output surface slabs can be utilized as input to the next task to calculate the energy. + +.. GENERATED FROM PYTHON SOURCE LINES 181-206 + +.. code-block:: Python + + + from ase import Atoms + from ase.build import bulk + + + def generate_structures(structure: Atoms, factor_lst: list) -> dict: + """Scale the structure by the given factor_lst.""" + scaled_structures = {} + for i in range(len(factor_lst)): + atoms = structure.copy() + atoms.set_cell(atoms.cell * factor_lst[i], scale_atoms=True) + scaled_structures[f"s_{i}"] = atoms + return {"scaled_structures": scaled_structures} + + inputs = prepare_pythonjob_inputs( + generate_structures, + function_inputs={"structure": bulk("Al"), "factor_lst": [0.95, 1.0, 1.05]}, + function_outputs=[{"name": "scaled_structures", "identifier": "namespace"}], + ) + + result, node = run_get_node(PythonJob, inputs=inputs) + print("scaled_structures: ") + for key, value in result["scaled_structures"].items(): + print(key, value) + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + scaled_structures: + s_0 uuid: f3d579d4-d97a-4bac-8dc7-34c8938e5a8c (pk: 19705) + s_1 uuid: b4e51314-b40f-48b5-8432-0567be4bdb94 (pk: 19706) + s_2 uuid: 68150882-5f6e-4b06-a819-3dad08b33657 (pk: 19707) + + + + +.. GENERATED FROM PYTHON SOURCE LINES 207-214 + +Exit Code +-------------- + + +When the function returns a dictionary with an `exit_code` key, the system automatically parses and uses this code to indicate the task's status. In the case of an error, the non-zero `exit_code` value helps identify the specific problem. + + + +.. GENERATED FROM PYTHON SOURCE LINES 214-232 + +.. code-block:: Python + + + def add(x, y): + sum = x + y + if (sum < 0).any(): + exit_code = {"status": 410, "message": "Some elements are negative"} + return {"sum": sum, "exit_code": exit_code} + return {"sum": sum} + + inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": -21}, + ) + + result, node = run_get_node(PythonJob, inputs=inputs) + print("exit_status:", node.exit_status) + print("exit_message:", node.exit_message) + + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + 11/28/2024 10:23:20 <1209555> aiida.orm.nodes.process.calculation.calcjob.CalcJobNode: [WARNING] output parser returned exit code<310>: The output file could not be read. + exit_status: 310 + exit_message: The output file could not be read. + + + + +.. GENERATED FROM PYTHON SOURCE LINES 233-259 + +Define your data serializer +-------------- + +PythonJob search data serializer from the `aiida.data` entry point by the module name and class name (e.g., `ase.atoms.Atoms`). + +In order to let the PythonJob find the serializer, you must register the AiiDA data with the following format: + +.. code-block:: ini + + [project.entry-points."aiida.data"] + abc.ase.atoms.Atoms = "abc.xyz:MyAtomsData" + +This will register a data serializer for `ase.atoms.Atoms` data. `abc` is the plugin name, the module name is `xyz`, and the AiiDA data class name is `AtomsData`. Learn how to create an AiiDA data class `here `_. + +*Avoid duplicate data serializer*: If you have multiple plugins that register the same data serializer, the PythonJob will raise an error. You can avoid this by selecting the plugin that you want to use in the configuration file. + + +.. code-block:: json + + { + "serializers": { + "ase.atoms.Atoms": "abc.ase.atoms.Atoms" + } + } + +Save the configuration file as `pythonjob.json` in the aiida configuration directory (by default, `~/.aiida` directory). + +.. GENERATED FROM PYTHON SOURCE LINES 263-272 + +What’s Next +----------- ++-----------------------------------------+------------------------------------------------------+ +| `Tutorials <../tutorial/index.rst>`__ | Real-world examples in computational materials | +| | science and more. | +| | | ++-----------------------------------------+------------------------------------------------------+ + + + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** (0 minutes 22.087 seconds) + + +.. _sphx_glr_download_autogen_how_to.py: + +.. only:: html + + .. container:: sphx-glr-footer sphx-glr-footer-example + + .. container:: sphx-glr-download sphx-glr-download-jupyter + + :download:`Download Jupyter notebook: how_to.ipynb ` + + .. container:: sphx-glr-download sphx-glr-download-python + + :download:`Download Python source code: how_to.py ` + + .. container:: sphx-glr-download sphx-glr-download-zip + + :download:`Download zipped: how_to.zip ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/docs/source/autogen/how_to.zip b/docs/source/autogen/how_to.zip new file mode 100644 index 0000000000000000000000000000000000000000..6c4ab6b2e5e93b95e9722f38466a1a729a03c1c5 GIT binary patch literal 22141 zcmeHP$&%a1nbvrjh++8X2graT#-vn4X|WHk=<4X%t&aI_;^1H z)Ppd5_&_bwWFAI=i{8C?nkc%LHxw1V~5>)H&2JvZ#`3={xgQL?RACVkz8EaPlY}UWPMh*^o3a>QCi!@dj>b6>; zg&tcSVmG^|?W6ym^knbNFUv?n?cpou!Z}cP_ms23`@J{Yj4@3XSZRBAVW0fs~X*gerypGG=G-@?g?6wH7Ina0YS&}MHwTk^kIIYoP zT33S>&;qIi8V1?YpW1o%)cILBJ)_nI7t6?p2+vdmx;Xr0a{J23iUkBpaw*g0I*nv>OKsW8tncu+RESp&mj()iu9}orH-toU=cKo_tlqk1#FRSM zYsqRUz2HYfbjcsct&wd`-ncE)P_C~|u%BgNd`bZcAb0a5ngwZx85X0e8;x|MN~V+_ zMaek?R9jleu0Ndy85|L0M>Ac~*TB}opxLQtm1(OXMDxIhS%VqOcry>DMdYW^N7`ZRyO%kntI^;}KdCTpPWGUg#*M&f(SMSN ztsKeS0hwxPzxP(!En~H7D~-#JCa78fqAbRBvItVr*qKAUGk59J7D*l$-E-;-zNZ&V z%C}nZJ+Nw%@8rEJbD7VU5_1~WPmOVb3MHakdPPPbHyfRCA;XMhn3^ZERtH*{%9y&H zwKQ*=PKSw_Hi-=%jVVI=W1j;(MXHoi?nAR@qocZ#GA;wsn}M7xaNripN2o zhQSL77eem19~tW<879SH&Cs-@2p66t2<$@y=zEgNE_`GeCWU|i?l^(+=ZNA(5}DMV zG4crGFgpuouH+46c*L~1b1stI2>Kc!8t;ym=kCS0A?gUx{!oDGVxb;AIePr)$&;^i z?4-WJJWOyX?$R07$uMhV*Efn_X6mj1+F%nRovCO$pPeN|gh-Km)ts5BlGSZv&x}m6 zu$i@(lGvj2GRZ>m2b*_MxRj<*GWiBRe93^f z)uGh5^puKH7C*+VXa^pUm813494XC%)$ZOLX*Rythu$VaL# zZL}I-utr4#E(jWhC=4aX@THEPbd8a6qYE3Wh>C$*1I1F{43<^8G7bC%iypzWpy1O+k%~VQLApYuXlE9M*)$#aXVwhsiis*- zN#X*A*<5svLQUXjD}~He#+U6fqUuL>_J>S_kJ98t5c}~|mj5+VW-eA!lF@u3sLQmE zMI@fqQ(`U)?t|@7I7UU1ajxx|r~VZ29VWwKSvoQ<;6Qx}%}4Q@#iT_NfhCA<{aKcz zViX$o9VxsiytcS6ywG`dR-|)QnX(AcijB3KLr?T(unvL|kNCg zkWy3n5sDWT4FeOJa}1x$M89%YcbY6$I7#45zNCShq4YfsqNuOG@}s&{qv<$2+0*ol zq=m^U^bj4C9?zX@^eTMgoM3M0Aroplr%;PrUmBcJ>)Q5Z$&OvxR3j)Eff>2aedj^s z43P6w{G)@s(R%IwnTVA2lcV$%WYTDw(dGGoh*movF%`;av53?Gtrp;QBFix2i{$ETCm@G}(}| zU*3h-I><{h|5QrsOjJ~WX(5bY1f6bh3jVwZoGR3-lv550Fee5nXTY%KXWggWetU&Q zY`UgUCLL^^BMk4|y@KFV=+?|RfKIYR>DI25C4aeQxYlfFELqez?+r`q9{ zVO-ZC0Q94bND=V)DFQ<~f-L)k_WgK3{YqB(5Dar-)ygE4YcWb%xnSUA zI%e?CbuFWUksTRq4mWBWfHq^$Fs`EY8f$7h!*(T=IBNeLgdkSHMM%&ihaj|2tHG&*hKu)e^DZ zS-yyFGQW&Zhgq>CZefTm8&2h-7YoloHR`RHK&<|n1t?_o%A2A8zQC}z$XMjj7gL%YRF#^-t zj>)Vf%9eFl3c*gY$ux=Q;c0;rFdlTV%AsxgO|>qt-(&=iXhD3X-zbw#B}zJr7fIzE zzMM62W%$q`#SmBBUa2?wBi``g3Axx!CUcST(xN9^QU^)ZixTNrTUuEhSAFma$uD6e zFcJU0?@IG=Riqyv(7r+w`878EZ-4&JOnr#}AB(c3SiIxg?b_GNKW^W$G0tsF@>-3b z8ca{7K5^ZBi}2ZpsS7ULG|M65BkaLz^*HdOA!=DjBut{lqz~LAI77q)RYsUKN!K&8 zZLeh#yN7Rke~l5-aw59b zO|58)BWmazDbWwRM0S+ z#@DfPTw48me{g*}gh;QBw3AJO12LA=mOPMP&|a6x5B zCjbPu8K^`EdKE;_bGGU_@M0~8JC-TDSl1K05+{fW!7Tv_+KV0_SJ$VBO(dZge;l|$ zFa8WQoo@>^_>pV>B^vq4BesxAVnC0cpdR%F!XCNwx9h4_uS#Qg(8;oCSuU(}71S{y zBsGT4x+XX7vgGbL_V$SvZ?BRKFeKO8)=3OzFM?g9rlzIBz>``Njf-rw(d1pH9(MpI z5%@6E7A)o8VUi|t;Q8)bV3Jz>KjOHr%K(%|{aW2BjFELaWDI54bIv^rxvW&v!n<%* zK$xe#vF_$MuLUF;q-T4!hCJ4=o_g-IW9}B-b8U75_^eFVrI1hE`HE~;!zPJg0Qysm zk&O`E4FFoLyP}o^6iXmBJ^GY}yX#hZ0n%c zj+I){+OXGdZ&0oOzFEI@H-iIhBraiQmw9i2U^k5NHbT3`Zb4!7*v4`BcurtK@oCi4 z>R)yM!*ZOrqG3&Ny$KH65Ld{Sh@uVM+KAZJcH1t5H*CuD`ihKsyg^n8 zc7=W!uh7xfZWd$t`YP6XFn|Zq-XR1?-1e$dchFW!s{ zCWF2Mz+g?p2f_s#4iG7F^lig}$!}JM|Beb4?ECJhU@~wAd-B~%3iYYg8lc%8W_2N& zxmpjPg?ahpb`4s*6R24`2G8N2Cq(kf$z_SGwJ@_5uvz!ey<1>ZdOG8B042H<-CQ{) zfvBNpIk$l~ueEqbd~-EQi`rupmZU z$rR2jSZ5DlXXLT0dsc8~2?yGE=e1$Gi)O9W=rp=QJ#%n<&>^nWRM(`E@Sb^4Yidq} zfhjx{gqdZK^2o8(g+YczKdBDQL)5evo6-x6wS5FP&1g|;B^iid^HB^Tq3(q`klz`!TaF4 zIm5QEcZGHDHpjNqG}^tsp54%jwI*`_0}JN24IXx$2Y6mS;!|~0H-j1myeq?R;;rPT z1908Zu2bEwP|6XWmo3awarzmE>K@@(?4_^Y*TfhkvST{}i8)W|b24IEgnTY=gy*U_ zfF%dWa62Th==@F0axUC@f!|KTlOt$sC87sIvj%ZXdpCwOiJQ|B0-kL~E^QrBB8SEur*;0TZP>lS7KHBMIaJNMnYcv9Bz z3yTq+Iq?Lk7#?>3mA@qE<%Q8Aj1xW&d#Cs2=!<{&(a(Rp`FCIWd$4@{^)LLt{1w=G nx2{J3*qE=s{$&q8AZxnG0|7^0{PZ7r=STSSC%pgPGk*Jj;2`F0 literal 0 HcmV?d00001 diff --git a/docs/source/autogen/images/thumb/sphx_glr_how_to_thumb.png b/docs/source/autogen/images/thumb/sphx_glr_how_to_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..b06c4e6a17748efb9e7d009eb1e161759cfa2b74 GIT binary patch literal 35467 zcma%i^;?tw`!+FRG^3l1?vO^1!GIA`(j}eJB`M&5(IccpNd@Wd5{c2>sUj&MNDF@P z{dxX?=RS`8vK{xm>vhF>ombqkI$Em41oQ+L7#PHAaF`wj1}5s^LyM35@Qt^5ejo-$ zoSqsCs{bMO1EzmSV>*=Z+};E=6m!b+j2&Z z$>H&#dvdVD>d;b#z)+pt5B0KCUPo%5aex8}3Da`CzqmKq|&3VFC#JcU_|ar80edVT;*Put6uqW<46Hk@Cr znCqlm!apD%Q)Bd#m{NEKny*(VvZIhlq!|`TT)3VUN8MxGh}S@0DPuf-yfQ}fcy>_< z7>ywe)|VTTR>JUtccR@Gt`9n$kF=!%) zxqFzsC}4!_5Qxoq56?g?Mk#L9E=LZC93IpTNe^wL-1VN!RY-mvWW`pe7(}s~x5Sra zfdqia$wj!Vypl|<_;eCP%a*-gJj3G0ZNff?;Iw+;iD|;QLxy>dW#s5`S5Kaz@Svk@ z0%}Y`-S9sI!YJFf_DCs1@~sU$tEkrlfS`9sJ86O{E)h0Y-#-3|2hErea((Tl1>S|h z2<}W4(g-t#y!HR;#_c}|(#_qX_mqnG-qac3LCo$MrXuJ-4f#(M^&~2c8Yx8W(ST5v z8#NE3*LpsohpHTtNvryaO-q7yofTf5z+J4i!$olt!TyAb)&HHrb(6h*7&TXm-_I?` zP8aaT;H{7f<}YIInsv-saH>?ALOjk)CM4dLJT8{;sZ~#`(>C@9)2yn|JRF7Fbt5PZzTODev#;1J%jOVlsCLd$GYdb?RP*}6et z;-hV`|NXu0RjJCac>A6Mcul)6IKLo6L2w0SFcpvgt)}xmUriXfu_FA=VqgE!v`2r` zy9-lW%>$++_&>|z09|XlD7aNWH67lLSu#hCpov23Aw9||14RZl;BS39=D=s1!HS=8LE5;6Hf=# z;U)GBoli$TgG(uEvICQ05>!?K@87Of5iKT|8#47g;=b_0u{WI4w&9UO|7gUeVy{xj(3bU_Gwm{x?n6%~l0Q29 zJFvs!*nrL33&@x~8n{hRi1W{lzt>0{h;!>$;&#M{-C2KjFS!~BcME2L#OrOcdmCXk zXc`pc*;_I-o>eM{vudEqE53yBmV7h!!Tis!9f%yfDpaxBC_c=SaDpcH4ng|TUDHiG z&3_qmz?&u&Q(wjOV zj5ph9TVG2QN%hLwnZ7PA8#5}*i5F2ak(JKi}vY>04%=~?J``x z5nLa3?wtDP!&zDt0C!dDKHyduJvFN&zfx}yv9}Rb!Am9V{paFLRb_7rnC)#3VeZZG zTG`fe7wg#QPHNx-U_fljHermix=picltwKXMPoNMc3u@KuQJ)K&;z_8tPna0`w;2^ zg-1&j#rkj0)vfS6Nxgx@kkc0Yjs<#hH*w=rfu)QTJ5aNmA?Xq?XSvFv}3LDB-*a5m!0i07aUc1@oYB7U8h zk{#!`Z4h@vnp*_2kRK@ewdZA9M!}D;vQ?6zp_D6X_>X3VvZ$sV221@WGni^+jmZBQ z^;@*CqA>zLp9VOs6pMvka?5DURkHQdaOG`Oj9~y^(|V-0%(t;M^sgJci*t@)_3S2*fFC z&Uyf)-x$+;9Xm@+V+PxZlWYy634t?>X(al%EU{+HR4vg-1-;hvS1&&JxAa`p3oL4> zx%km*YOepDtJ2fz(zMZdBQz(aH#R z(18r_rn~&bTP+n+lg?6^uGmSZT8h@r=*e?IPEEbsng1Zoi;2_e5^|}vVG>I74upA4B?D1s-!C^4OaYj1{a79mlbRny>N2t?+{0sJ$t1jjy#`LhP?!g)P zuaw6HfD|}RTa&j+TLt5np}v9^w97%y%J2!(&ASRzIg;P7z)g!&*vc!_#_k=}aA+3w zKeM+)S@-F_7TcqOPcm!42$tMvSidbs+2AXe%G`GJ6I6|kmJLQ>&gy-jZ{3QcGkein zfC!(n!|$7XZ3Cu4pPC+|r))C_7-GL63EK8nG5p6$%*X^smyDyZwy1jAW0(Eyn)y^r zZ?!hRz}%2zi(_EiTq)nx}wSPcA^}}>tCPbC*Gi{Ph9}%uiY`k{S z_6JY@Mk}-5pW9GkJ1YT1O!LYgA+A}?CPRP1yiy4$xm?))DYTCf%2U=Q9}ck9NOEuG z3zy?w_X3&lD)*R`-l6BP;(S)gKJ{GkYz>rINtrlQl1?@@K~9%`q|;93uyL;=S6_`x z%B}5hX5q}r)?z8)WuCp$5oY@50$zkYL#ew^MK^rlY;?t|^`)1bmI1%x9aHU4_q?7v&T9AtoiHn;TI=>Z)71k9XwJIKiZM1zkfsrBU=fV zDfyT}Vj|Bl71}9$o|*pSzb6Nm0+^uyZ|W=FR`DHm7=3#ROE*5S^>wNJkki?SoeDXJD0j;Aac=}S@Z#8OB|454u zKr82*30+vKWTNM$HsIlLIO3snRQ3bubb}iBgUOpZLM8m(ORAshCXJ^|q&B=Qz;;dJ zNP>@~wa_KSY$8pQr>2*8^uUL)nPqJdn*L@k0z{chk#cJS87%o`O7LGkc971+w_<{j zJSo4`Pm(zJfF`bI0M^Z+e|Qr#GE2W*)c+jP8&%tEG-+z~G+sz+OEHHMv?~f)Lck^8 zM$I`?8|3r)Z}f)ZlMe^EHe_)a$C&GDq(s%P?#bj(IQc+|m9+*B-xFq7{EHQnxb%FB z2(fi}eWI7*_3wlbW^p;d^;l7a;L-%9K1QZd#I}BmQG7)qFMER#`Eo_I_|q@K;^m8MQ>69_3Wm z;yYjMq>$YUd)rD!a@1kHq_=;}#7YrQ%N0U(e+a0fg~2*7*JM4ii_LP=A(19~>ECh_ ziy9V13>TYZ$-w?(rrp)e2e}Ozf_eV}QH0QHzV99Uq=90&Lejp`xTNK?Xz@^U{^rZ5 z!~qb&YnQZNs)z-)sz3M?38{> z^z2;Z?2vJ3Ab!ty=&{+YxTv@&wb-S5%{$~D!8YNkTILT%B`am4>i@i-+bWBr7BolB z4rlAe&()xaaXxiLX*kGw{~{`Em@P;TUo8l?5goE7v{;1m=V--i`_X;g zde_Gl|If@(#4HUK1Ra^&w)-;X+yL{}<|6GW^MB;06PVySwSvvNZ6S`& z9bY|%ve!-BzmGu#>u!6#Zqnw6d#jHb`|M94UyRwFN=rDttF*UVW%Jo~T=h#)x&}W% zPu|I&S{`d89bg)HE8D7QO zz|xv!7Y(JtpvF!va5YIn3a!l6P+5H8o&D83aKWJImR7~cqG%_AwBb33prE!pX3#*% z=#E?l!R#t~v4S6gHPt*q54Hg$v5SY zPIXnyzK?q;Ca_?|u5KHbLl)xi329LM^*!T<<)?S8b#~wI4BVhgZgSk>lJU$iu@$li zUkd!Gm-F#dS8{0R)0lx0ADM@RL|+oo7pZ zYw+EJ?V%JK73Qb-l%b>CMdJMF%e1arJMW4Eo+k?^l?f>}UWnBz;{Y_T5zCose2KZH znq^^`w6D-aW&f3S$Pp`xi(@V`)S5SV3ySgMukXn%2x_A8jE z8J!nqF)^gct*fk4?@?}FCoTqIC77EYhMY0^j&kz-%j$hZU;{eK*KwGVtlif}Dns<9 z2>5q-m_#W2gc)}o4r2{`tuaQ!Qa)LKSk*dd7)l44HWGHlGyx`ZOZVW_tHvw&LQ)0B ze#s*i)x_iI-eHr_r4VfyUt+X%zd?i42caN44)c0Hnh)~^b~`7-J;EAXy8B1=Fk~gS z?WmdF+HSo$vI{S8B8!3VUuiPR$Q#a;aw1I=dc#C#}|Iq~s0K7Kfc{faj&o|7mU`U0(W3t%`2 z-1LD9H@+n-7t*aRC%k4SH7oJ3xHp-_2>{lmQiC;|G2H4IjB^Qyl7ZVYK{T!IZDfJ` zidYhdk;AO>qrbiW zrFukDVEU~HqN0NEJ}9wR4W_5m<0478<lKm^JQLE^RmK1;Qplw?5Ry#T z)aiqRiuztm=b?OHu7dX7heEeZBdHI`vR4WJ;M>zB(%8da$`pTfv@mMok3(6p!0zhG zQszAAoqsauev3%fwQJvNoArt&+nZhV%C$In*5k}|xoohdVs159Veh-}=E*(l=e2dKi*DV+6(FIZNfn=N^#_|=EKbgrz-zN08mO!Dfnjz`Aa6?JH9=<4Y4T; zq}@E2xDfzMUC`wS^pD8R~ArPC{n?nZTbYO6~IA|TW1>m%Py8U z9~R-zOZIW<2ufZ@KRg-As3_ROmNDi#om7w0x*FmJ&p>t5cqkx#0E-vHBp^1QxQubS zZl0`sV{bRqemS@MzT@56fVtg$$lOCwvOtEJxlF$w`;V2mZMlZMi+y10u%r6 zLt@kw5By)5>+P^87*2-V?m_MR=@PRJG52#t-g@G!Dld1zlQOLHo@ChGy zu626|?CX|9h@CT267td~AM_(WP8jkz*x~9aBAN6dBQ}$k&q*qL2*LuE1_U13rXQNw zpqBQo0+87%ezt(xZ~1OJkG{1{I?4^7s)nu^l?J2%OSy)7%2Y3>CqkTk3zD`X4_DOw zrQ{_-n&lE9y!Q$d=#_Z(TLTALZjYy6;e6bJTaRW#dfFMtd(`cKcFD*1I3dSd z#(&7X>6kb)Wz4Of^e4P{oyz*HCAwm~?rThY!Ffv5lY6IFoOj$;-B@pDck}zFk+QW} z$*4!uoxQkre@~U(H~i($_z$8=?T9 z$+?~&uFPkyf}lsK?YoMjJCHi_5F?9jZD1XBh=XxeP6C6y^>r{_o1|aqnY))n*84g| z!GZ|~>6`cBW~Kphyiyhss}?w~|IuNd7L)*sJW?J+m)e03J$so+~Hs));zlCBZzCie=eb_=O9h6{O>$68NO8+a9#}EhC1In8=kZq5%xKGR=|FTQMjy{7wq~ zd=R>#ih7fJ=@7HKWQnLK#RKk@b^@SMh`1j&sL9S0t9`sSP@}1Ho|ch1XxlqinNB~{ zSe;LD{tAyA_!E$n!{DS}kcUw0mEjtBK{Ne39@rlzj(DkYW4?5j7AOb39F`oF-}^!N4-FWV4jrT z%|hH-D1cv&(>`GivGJ0(t|hJN$-f@@LiqU+l6wL2HRGxMx%*rBS@*51yC;x9D|%rB z!HFTb&IL;6X0l*PO#)nAnaZhW@>2cc%w8;+Z;T=}uI6a2>C$Bc_#T_;WUL!1jtzOk zbTU~n69MKy{7P5ja@Yu0Y2&tl}ICeYbj)NpzJ(A2y`y&T6)e^S4P4b zpO5YEQExruZzcDx+(Z?d8?ab{iQ2=n>Ck^GjFBRO-Fo#%jr~d%rCgWu^=xs_CN#Zp zaTv!={LP|vUT0X&R5#xOSu4LXtcAFBNruc>EI%jnObrtSe+XwY5-ltrG9dl)r4Z2} zeeJgc&4f5Um-$&}Ru}s)THwNr^q-|i1o49vN;y>v=1gr(FL+;wOz#S@7M4P(vq%jy zK`ty>g%YPUoUL7u7Qx~{qM5ejLe>kdXftL>h{*G)uBBDCr;>{j!Q;%7qXVl>@xacW zX#D>8A`=!MWP1d0lU{!j@W@l365a7s9|~>-t^G1iE@aDUl=KD}Pe@QOIIK zMZPb&{Y{7b_|{6p0&88MbhhzA1c_ZBf8BR|84eMmoVRX5ko2UGFqVhXYt%XGb}LL@ zc|Cl&Vu34LQDLQt@$M%Sx@gI+pQjvg+96*d*+ZxJ%Yv?`x|SCBQIL|qE4(gwyCD3$ zzNd$MsE8J);8@kY7-Bc&reJ!WwWh|?04Zq2Ze{VH?zuQVFQOd~6>awK7w9m3+A#fO@|A&T=ee?Q(ZSKEtUCB(P+tTxh~TWOTkpsu+ZFJof) zz!hI9b$Ci#2ynxtj@rIBdxo&YhSg=EZ>t=#(t z8=AT}qoAxVjO-TJy??{X_Z`Eu1Us5jsqaGix76Kw;t`YJC|)Z99!UHj<;VSqc?@;_ zbcV_Z1>p?Eh^`1iKNTc{F)sz!v({CQ%3jA}6$VD7{k`+wlvD924WKenYu9b_GOW1( zzq(c|?$3j|f4oMd)HTcY`wwVB{JV{cf-Pg_+h)h#an`zTg_%T7>kVq?%b|ytlaf+8 zyUdq#?*B%i2|n3^6xXz-6M2Varvko^cDErwBp+o83R%d#fHz51{t(5D)6h?8Zx5oy zg=V56S+Z%#MX05?eF`Shgz(EXHrr!!{`>$j%7!Pifm-93(LtMUV6{mdDKXD#m-HxF z7|4babB63N?=(t}dYU24&7Ry+mJ#O6U5`%aVXUDSH=zn|7&_$wgF@d>AC#&T`>}W@ z&n-{%oQze|Y_COBql)}ow{Ml}9V6>$Bys($W8yERa* z=sxUyn_&9WQGlYQzzY_MAy>l*Pt=;&D-uukg{Fj1~wo)bMupE6j znW`P|<~i5!pBu7aEVC&5&FBNs-r#qr?C(QgZns>?!G4fJb7i=*pi(4y5a!;{g@Y`m zVO7+XR`CO`Tht$P)me^e}2s^5riC_FV zH-`9b^G0mn7O^`b=3ALlz8S9yjg?~E%Co;PB{!*H!@qFf8ow^>+)g%2;D3i3{EC(z zY+YYjO-t@U4EgvB8P5CYfM4;`1gl2bY}GIN?|C%ePzT!pNHx`J2YO5n+~q4fOVW!T9Yxp+^TY#Q0)-z8&h458qqixNm7trYdm^do!t{>MNCW8-brI)M7TUVy3iIJ0dDUFh(Ykxng zIdJ>Qnuzm}e@dLsrtv*)J6sJ@xi;l$eztBFkw69fEzP4wjBHpaXgG~uJeS2;Y){+U zL*-J4;6A9-3hMpDF+`x~D)`;9{juUvcAlbF3aD75I|i`Uvf`r2VNSL>$9;J52>nC< zb;EagN}}gc_{AbaawO*}Qb_Zl(A}_zBNGb9%K2`XL$Wjy@zd?19vja8G~i8TErkiy z2+D6Z`l$A}^G%b&KG3~V3Ee4Ci0g>uq;NFM5SrAFD9|a#w}@LoEy(lgr&3{+VTh^3#eaW;Y??;eKko&{0c~WJh1~W z7*->zfWaiuCs*+St*gn1{{CO@SyOOokzdDfLt_^G?XrKUN1a12+ff?(`JK(!z%{|% z(KG{e=p4d^t$6&+VldZXqphYjb`MOPZU>*fmFvsT^9I()l>~(6lNDzIANnLnMfEG87vS%IdyLmu%N>S>f{3ExXyaK#p@j-uac-milP|h1f zf$D}zp9q!dvA%WG-NR-_Ucl#08dOFXF}ZFEQ#TXv6|LOWPg}gi zi%UbhY_WpKzBf%>d>6}W@b_0Oc-(6j>7B>k>=K&PhozDCwnQwVKVm00Cj8CX^UjYf zVq|ltJqT@@2Dpt8^4&iWKUGfRoXv`y0^r!-cP^TA4ZEi%b~u5mkRN=9&Dlb``N!2MJ|RH5H2P zB(I)(G^et#U?g`!XT{fw{ROrCWdeDQgu+Kev z(4}}}KEAUFmp4xPSVJ{z>z5QLBj#bRh{fPNjegn3%0Ntzae2z5hLLLk__GcIe*j!w zZrk^HARH#3Ajqb8NA!8_E7xse-HIPyZvamh3(!1oY7YN#EdED4@-?ht%seQa$xbTJ zcY{f2^v!uh<-~;ZuZ6FUQ$j3Vgvx+sN6Y%-I^iGA(vT1qTNl;hsc_0d*X&Srx4dgO zkJKPh>-K}$x0o&_`IWOd%ur-6MKl-O@4v3=->yZyfPcGz^0-?T|r4`p1; zWV;J!^P&&+pf0@KUh`W|{4OAOYIjs)4!xeHp57DxAM97xCirx%zK9 zqQa|97`>qu{zFDT#VscgREi3}ER4BkP)ov8`DpK^(z^$N(mq#sO7@&r!N`CbSXWh* ze_#nKHT!iABJN41z_;hs6KzLPO((2E!a^#aQxI{b+n5bX_fCHDW-sm3C?^UfSwL+D zJsKj~%HhfS;K#@VVZb+BGw|)s1VuyriVmc*s?UR3tcb6zUgALlwC9O?Quw`Egs(;} z(Y#-xwZ?qqo;|Sejvle9c3q*eXFbosm^rR0s-N82$y+}O7RETT#Q7xz8f#;+LaWOA zi2|NC7PvlFD5dR2Q{uP6o)T>|zG_}>^)ZMDzBHh#+t&hp{6z&mCSj6C{~%)+Q#o6ULJZ8^EzRAh(HfEREpXRPU}Jl`RBF|LNS^RCf9+xay7 zAzcTCKQo-`gj?NHd5fEmrG^te;;_VJhLX-3v{d;?Xe&8P`1U%QSK3KMP_ogik`eq2 zqDdgl(4voBw$?AWH33L8?r8dN9ug%3V72FU+7@NpU_J`$Jg#r@cP*v8EHhQe>~AIB`_7w8ybW z&4QV(?ck@50Zc`6wxN!lp<)1V_3Yx(8|ImaNMF1gqAZCuE%+AHn-CA#Y%oYjK4%tV zi+2*gc~}|?GAjw{q=@K02?-COs(YxyHR_p07)L;ngxSVnmZ5xlEvNT)$qzDB*2Fl_ z1sNxSS$@;$_lajMR{?-?W8CHYy6MViP3zkNo4%<#`HsULa$;JUpF`NiNI>*tL5JN* zlinfnAkLlaZ&f|9rs27;$RLI<2b_-?A%`kvMO>)*rn|yVlmpxE&23!Um_m)>Qc}Gb z;?@$JkD5Q6&$D4w)Z6;*d}&OJZgr($wOK(PeQMqnglLSX#Q7E15z*w|K8xl*s(;ja z@fQ4meGQ)@LtS-;chonZ^mvYZZsJku`_g-y|N-6uk*3Tn=e#KOh?({f=P6e{qArLTZ#MsnNm1SV-i04(@QX!7@hUl` zN!IY?J2Rt&0USC#&+U~wqv(nJGg3r9{#C?>F)sfn{Y19MhrbU~kzZ9$C&GD^(d z-$H5^k&ysjOs6eI*+AeQ1&9>=ovoZ_kAyB4n(pK(eETUv3LM2lQ;IHHx(l& zjl+h!Be>{Nv=;TCgd*+|9VJ|-)MoMD2An1^mp_pSJ+Ts}`8$a*+h8Q6`d0q7lf?P- ziNf-6gyPAW3~*8oP5Sq70pSToT@^D-@$iI}#3;VuCZ79k1qe9ipAI__hM>S(^Db8O zW{Gd9yqV{;ezNP!<|$#VX>8Q@srbJ7t0E{iMwKLVqn60(we_5 zX3f-)y~I?8;0y`PjqCM<8;jSuGOi84;al!L=!g} zG=*JHBB!u1B9B25H%in^(`d30_vi3-Qgbg81Z?Q&Ra>lv_9v1Sx`yBZLpKP^NLzEY zWTa$q`Ru@L+FU5==ScuHLeOfMgf!ybVCJxHS5-0OQ)MM_YvV=X;tU4p#T}fX3mb?g zY#PAR)w%C+GW8zLBb~0wV+EkA3tX$d4cJMCr>Dwgq!{SXw-Ww}qe2pqQcS;}Se%Gu zq>f2GyJih+c9UfCs&-a-z;3TEfY-;}^?1MzhfSgBzLe!EqOtT{j3X%4*T>!-p*V6< zfg|Z^mG!t!g+c(pbJw0^6zaTkWM;3b9W{Ty{ZuQ3)o4Jy*e?^}x4n#JZjsZ!W&-9NU z{o76xA)n26uWi>=nZRM^t|Rpqclp*#e?f?i;w z`+bEzhyIvK{&P(#^YW7OR~(pKI7HVH2x{Azy89LJ(}ul9d)(^hj@<7h12$0&lzj_* z%DOe|qxeM-Mcj8OI|FlJKK?tpD#h=%3s|p^@@F_r6J$9VEfnPYiAQg{jS|tkE&}1l zajF7!!-n94S#aGhmsllZlOfIaO10{|C0@M~uZ`eCSBUlz|KufIafBttJ*C|07w^Nd z$A6_jkN}WF#sqd4ORIw5kM@sjQM|Lf{f^8ZZaKw4<~n;XlbRLhv65x3tgAHXN`il& zYFgrjiwY^$l16`uEixr|0F z;0)Eu3#+l_ZwV*w_EhtNv+sUC3fZMXxVH+R)#VcT-*vQC|D=~Jn&_mrFygDD*De?> z%o{q(cUKy>!RWBwh7uvTg7Os-`P<#>ikjztcNxd zm`Luu;QN`i$6xGIO5&s$akGbbSj4bPtW>-#A^xyrsbJz$nH?Ik@QpX}wpx0~hhp#( z?7{zat?3|L0W(^5Wq%D)_c9LHiRM{9>qa&-_`xwc2s^M`1mtbwSy*x+Xvn@^e}*5T zLwy+9NL$SXNKD8XP_o~NRU9T}^Mj2Mh)GuOuly<^=?!yw+DS`XGsHCex;Zb1#gV%? zFGBey?{+!$M{XkruXjzU!KbB73$j2z(f0~}I#9AepJ{M|$uYa;XbV5;VzoyJNP>~eUbN!kE^;n71&3M7Ihs43Bo*D4jK(|+_N`w+Bp1l_D= z`+6dAn#h9Jh0~E!iOD0z?lQWFbYXk1c>hQ4kVUfVcKq0Lh;0{A|9+^SVRoojr_-<9sj+ok4Ef}(EdkJy)yU0FS7mScAW*>`SbKoG!QDA4<%`bU40fH8|B z6bI7~JJx-d%V=9B2zq9Bozna^zXTN_Jfy>eg<5hP;uTaCg7^Ok>3%;=d7T5)|_`o?8g#f5yBz1s7sv$N2+0oQxg;6)f-Ph#LlERDOUMAfFpJs zVQ2D@m-w`m>tRVcDr&pD-ElSaO;l_BJ~<{otg49+$w&=>?pdTNIgNFq^)%WWmqEmJ zrs`Rj?-ze1u~`R4t4Y}B613=j~QQmcP=uH(y}<4l2~7Yv^T(h}-4 z$m{r%*uGwo)%A8MISpF8M*NhgRJg$LRR$0@^K3+lMUj#XxhWU##(K~}Zv_>phgrcN zr(%wb@Fwk)eMWTnKMXdlk#^)E6;26)aj6R(tz;o7VZWK26c*$v2Kra^2~mohguDkl z;el+|3hXE|q6>^sCHbpl)^AtxCwS7#>7wi~`PXqjc~e+JSft+XV#-Pv(Fpr+t0>Mm zGW<|f%o4p~Rv+oaHla9PJaE-z+$4a+X%}r?Te)mtQ|cTS!EE))9~RUxfVYiAERk=> zcqb)=Lz>Xy#X47`_4ROc6MK$ewo!`?Pd7D396wmB`Gv~H@z0JN#N@k|Y-I{h;!Imd z1C8dK9`tu$&2&=G>AzK;HT!EcWAkJ{17Erz-}7d$_LVIoOeK~g$)^g4$UyW2%70V6$2F-z~P3F z%0CmR%|nIW3pE(4{Ij8nrmh5Z$>3hc3W{ORl=)fy1o(#yp7#tt?DF%V+s)(+P0_&- z2Rnr?#j2T5zDQ-@go-yLsS=^6ppC1FQk>1OYh#mgYhG*Wj9g7)Tdv(G`=>7}e7RYc z{G6m0Y0cu?S?e*YX34CTH@9$9N8YGKz#Kk1Xd6%WB9VsG^E5VT$ ztcOh#mS#%$U74&l((1b|=2}jSSGrbcoB*A)zj73Y>V*jvce{VOa+b}kly=*{#{URW zuTmlySYj{!zTJr18xv?J*? z*U`|yI7^pftVBxTimuE<+^)TP?OH^)zPuv6a)*F31B?flOZ0oW&+~U{QAB1a+ zH(1GPU;3Kxs6b+-&?e8Nk6a%offqhL_aLk*zF?Eod6+NHFA@}Eya0=pzvy$xt-BnL zk`UC;<@KN`lj#0XS3VF2XZ0bHps`*ueGn9CKD!=11CT4c ztt`paVO@w7TNx{Dp7T!+eM2Hf$_kN+Nu|?Txeq>ER#0@sCm+(<3gd?~0Ht4-ZA^f1 ztTAYFYzRN#1NA(t+5MRurvfUL=TGHN3=PvJ@XTv=nPGzEfrx89JTk8>mwI^?ocU(9 z-mq5^^N+^ZArYf;f5gg|<#GSWdY3F1V!a}6Rcl&w%}m+8O1jJQNGxBa!_1GldByho zje5x}H5;Y`0^m%p4$6ut>)LPx@njg%{1S!4b^<`$P{jt4{)~|PP9oq{J-X75JY~ct z(o=wQ)r0J-B&+DYidWi;$#rsM4S@WfAbm?1A-|*12R3+oedc8_Z4d%>(%N92d#SEbEM^Q&_L>qv{cv3by!{53G1mp9HgR;4kQ%)Q;*-TAk81&oA4_3?j%%N6Mm^rpT2!xFMi1i zZU&~js>(_8y~xz&5zulwkOgOAJLV$oF$?xKu7NklF_V{j67BDyW&wosN~@WaD$T1@ zXmth!;>V#S-8<#r_P0=z1>FwZ$Q1Q6$a?FMxy>^`UuC|B#X#~pANI~ThV!~t>o8f$;bENH^;ts5$7t-HJ&CEh)fBBWbC>3&H@6WIFPJ~0Fc5Y(fnjAw2}4^jfCGNT zE`%z;(kg`_^;aNZ=Xa!&B}Q2KQ;S@lmd&_Ns#wRi=&0?LnC1dk3+>52^}CISrn)CS z^@rc_sLml8FEG7{-8}1>#uI0rzalYlX)d@usxR>e!N*9W%D6_`PcqW}nuFMD_E9Gr z;Wm=m;Eh|@8fRDd@#D5v{k!*qO_!2IVFs2h^Vr1WLHZn%KYCYh%%zgX+Tn_}{UJrT zp&tVlH)8Oqjy%}wQmej1)%M4eWoHz)w+0^H%bJk`?WjM8bj%@|2x%f{d>(Uiz|n8A zf~{pi4;kV1{?Uq%kQWIJZ-gXfp3*ad%p%uY41*hHt^9r*8k7f5DrX9W(sh@ONW?U$ zsx$Aag?@Po;XV`wf3>+q!6Nb0ON^cOw`{EF%#jj6FCM?|_<2IZ+A}mL@L{`W!i-WG zPP@Qn-8I>?lpyx40t+=R#xTaKhF?`5v}tYEDh)F{W-}VY3Jz_rY)4mpr}H@z@F}QX z5ayewOj87tXyd-sW$n(~9C(~|_xM?D$`6G8d6Ak!)ziMKhg11m%emO?IzE*Y78Q_$ zW*b~$4l@4v;I={ZM({g|bv8v!p`)3Auxs=2Nd+BogH@ z;)9>LpmcVd7w?}F{BhgV*3qu61?^Gko3~J{bqB(^g9#scXg?n{b;$x7!NpCj;Q&i% z(0n_2ww>8Pa`bPxlN0EnjPH}3m4mgWqnMu|VFFcw*D&>=hwix z04FcAe@WbuYd7FNE|%MHN?NqE6s9w!4x`)wQJqj>#u6qnjsQRVkg~upmjBSJw6%4x z$|tTeY5<=Sm8YCPJKVgkU8T`Dkc&YFH@*`Jt-{G(rkR{D-~SL z!p;3R6!Kn~iHf$nQ0Or-dV4f3`A1C)I?O-35*E>``TKeotfphpC+=;BY+==8!Ih8r zvp>OhxXMwmXJPr3lKq~xE~yN~dBt3prLIfSO%DqTOMM=64>LSFQoJmRB|}{9H2|$O z%RO3kpE^NBkmmLbWY9$5_HYm>`80j>WprC4fA}@``lc(_pJgVsx=5IQ@!B86@}+J7zqgyjx%)Nz(FPLh^Xpyt3J94UJ$BPOn)@{*(I5XV?YTg9 zM%T&CrE`#*m6hm%GG5IYNU!`S5lN$C2x*b z(M`2-t>_vp%4hphY0$hj-=WyLy3x&SGN3T>YhQL7QJY(y&F{bt6!P&Y!APnpLSFK3 z_M4*P;K(?R`fP*svI^yiahn(0aL}#cQwB%`CH_y$tYqfYiItIwh#OJ+CO*~3i_C|Y zTYT)?h_E&QU1@O7Z6>XS1}q#Q7DcQFuDkhY7y3MD8g_tUeQ3aJd*AL^w-Amr&s_B+ z?L2Y`d5839or*HoVwjSdrc;Tn{2F<9>Du$kvn-QVd&v6OG zc)VqG9+5&Ba>rdj!nYr3nu}5VU^Vh{uVJ{r@Pm!c6FSOzLGQDnj@j|HA(HzUlo2+T z%Jp-I^mf{Mijc;;0L6C(7*2RTi^+{-EZF_1 zbx$OZ89*eEJi|2fPa@aOn@Ed2pX82p>_x3gK8%D=BT5;)YFb!cJD+CZq@8a^LhxQB z7<{{rYe?(;IMSZ4pzmWp5(bx1n~bA`iFV$)C?T{AsYB#@7qZMc2`;ZLBzQiGG}QDk zq+&a^Lzzb<$V$4)DNF5!L1YW4A3y~5X(X`QZ7o|F?qVvobse-9K)qvL203P*tIM~pKArfQ-ezoVeYf* zSx0CHS&m{LH;h%8<~=Nl!pY)UWIv5|tk+OY$6jQmmD#b+BhomA+O6kx{~+}jp*SykF1TCM%lc=wmm>_Uo;Tr zkRO=#1y2m6gnnD62%|u~E5%zJK0fk&Bp54!gX1N{M4J2$kTB^(0=*Au<%Y1`wIt6?x~fbF%tJ1VYUEZqVEeQH|2ipvxzeM-u5}Uc z6Ue>PO9*+ycMfj|=*OF1%iI~MvO|a^^ftO>9qc9K&WM)ta{2U^KcTljqiHyJ2X}d3? z2kmvFvG$|S=~>$>udatNpceIYT8ylMrAX8E#883=q!9^{OGpTJGBvGLno*(OB18~A zzTs0}ePQ2zTbs8SJjwJU7t@`ZvxKgSW8mZ>J^7$DDL2!_qC$Wy3(%s zV<`4D<)3W<#RS%%IM{ad8Tl@`4UD2R7NrbkdJ^$iQqG6F6AcL~a*x*pd-_h$o2 z0d|CV(H&0rp`a2WrVvfY%K89V5!X#{8Im#4I zeY1e`pp%gDaTlL?)r;{MMAT|j_1kYe!?hN zTQc`yopHx{owEs6YjVT;=)y$6rS?(-svwWuatjoZK&B8*LU98UAorqF&=(SE+3D?y z&_nVRYO#K&y>$!;)T6V=;z(l|iXZf$jGs@TAh~Zg=sVFm*F0Tafv+I}z8;Zv z5z>T@*t&cHVwOM#QAX1SV6B6bW7K}%Mx?nGk-`ze)WJoFC|gjRWGO*07_v3)cACFk zNVOT`f+ zMi0*+;7h6Q?L%wcq&*4f|F?bq%>)%w3;>uEz)ke}6& zi@bcS#-Vu1cruud0e^u=WG}Kl>+G22DDJa>;V~H>ykCI?@LNvBp|iqui87?_L2kH3 z$PHJETIJsen^1;c5hB~X7dA$W50OA70Vh9p9kL+4i*;7brlLA&SD!<|;m@$)J-~0E z%#zcy)Mb^$5~OYWHX8F7-ah0ix>wtf&^f|1cSyA?siP?Vv=9+OKeB)-87av9-+joH zR0S+>3gV7MpkKpMlF{eA3|VB}D%(YF;!5oIO61!7^0+QE4=|VQZu?n@Tu=k(b8fi7 zew$HdULQ8E9B7I`^4W(-AmfC&q3%WX1%BQ_Tau}mHnaYA6o`EYRXCqU8t@t5cYz_}n$1Y?o zjc6~E<6J{dv5s@-9#o)f_UWqY2yIo;qy-`sKYI5~r;NRV>M{V=T~8G#4f`%+ISnGk z=mh$n*X7v9Cm0l?=ioBB#@Xt!z2y-JWQ?%Z3mcK}xd%PyCjrtAxe>=&q-lN{rSUF7 zWb-XFgZUD(K*Jm-YEi7<91<+))LCD=w-4RF)ku(^1&ZCw+N zRzwV^8fj(U2L8zYp3A2C1?1LCw=MA)BJ^xpg~;RrB4yt!a;CJ=D)gDAhdp`tK>tZ( zCEiWQu$#{~U?k$F{S6_v-faZSC+oiQt=Uhb;>G>sx&25AyHQc)Iz$+6qI>f_6vyh! z@jTQb;#h&KzgL+>HBAv?LnM$Jz{v%*0cp!$M*;G5H`D}araucjNti3qgsRS;1r8^n zC~z*L2xZmmWVnC7m=IWAgWNL>h)mWXK{=DeT1lADmrIbBA=lbP;HS_tGHwDv2kv*F zjk+(AgW=YWpO10`wf8uLGO+U9yQ&N+O}>@L8uYzBhg@slLWu=knf;a_!l^-)%3+{6 zDS#2V5mS#yASu|xz7Po+-wvZ4IFaNI3Kk))^0UYv+<@AwzltiDkIklh{z+mU(mH*u zg-0>Ehli11sYQLE<^ojOi;R%!NMWUP>~r`~Q8*^b$gqdO5?B0>*4ita@Z+`EJ}>u=4_vTq2xri#>086imAhxH^%#vUBL5r&XJ z%o1h^B9ud@R;CCAU9TYFyc;;<;K~fuz_i=>R}yMHJp7cxgwQS4zn&$WF~QP^O9_J2 zx0P>18u~uq)dH|MN)QPQ+V6W1iCjX~;|HkKeoJOQ-;!ntp?B4xMAk~g9LFt9ASPhv z5!y_wM+7mffnF*gSc}GwdtJceNSMEl&9#_jblX~1FfyQ>Ac0!iCsg4?Jcl;V=YpM?nIW+^T?_` zg5pyn)@kH%A5sV|q3=#>cDsnVk6V&JoXqVXL-C6vz;B@ddG8?w?pV-hL71noc}Xz3}d`f)@U#ptu? zLRQBRDv))ledZG^j!VdjYC_W#^ZpD(%sg%>0x^H>Q%D=0hqU2$5(O~R+m)dQ>tW<} zxC1?KucC+M1+1PUpAU|ct6?t^es2LELc-uI(k3?&Y75-(U55$CL8N*679;1$G=dx> z8W3^qCXm#56mb2Rt@8|W>1;ye;aw#~4pt;LXn`?B@G&6&H-wmwPfy**@E%^ROwz(M zNuIGP2H6OTFI7cDudbirhG*h?t8I=_~{8bQ&W&o$f#%EH1MD zJ%M&0D`7EmU-_VRv8}s>SqulIi4=+Ts1v{?bk8rMEU-7TpSL{=3()7c1Sw#rli{Cn z#4U_DO(6CFlp!JearFOxN>ta8-mZk;^DagH-91Q%yn|u`e?CFC&XmCgw^I?SivI%A z?w>>2;VR&Vw)uk$FX&7A`h9TQ|2=@Rr0zumehBzmPNR3Pjn&FZpv_6Rv?EKV1(8Js za;3dNXldV%T!{ThnC(Tb!Um_Tr0Mdf&qvCH&%i1|0`dgX#upIc9X`l@8d*)V4yJ6B z12s-$UWUk|$Lah14U_?wt|Bi+pVLaDOud2Jdjr$>xkSuD%vl04Vfz&7WmJdeWFIDD zJ=nwYA>e;P7RIxvd+s^lPf)dO;|!&kjtX1ngGd;^h=lI~q%~eagja_A*J}t_L^pyL zqCH6HHzEOh7mA5IiU?^uwFyngo%R@E_Q!4L{v9IJRa78N_>`?-0dk>z0R?ZTOXFWa zXz1tAJ!`Qwf0WQs)>|f*(Nw(I+Gcz)`ffBMvhF0bWH|y1A;dN>MP$hw}KwBq}KUq^zo3u(+RBf>eG6o%jWF%wxQb##kJ<4YB9K8uAZ|Fe)O;3rwF;U9z!H^2q_e^X)Z~`3}em^h`FnZQD@hF^z#;)aMqpK_7Qu(6bZ?P z(VUJ`Xxkqpd3ITt1+~l8_JI99j2`YaC^feS`LKJCV0;^C^_#hQ+NlpFw;@e*4{}wk zL-7h{=ilq#@>ny6tRCbbB8Cs_c$?Av^exSI+Ohj>yIO+gKMz@s=bY*frZQt`E3&LS zifv7oZH@C8K0nJe$(ek|`XUM+ zbAmvegy~B3u(W{$8L5hJa{;YEg6n5dQ2ZPcmd_#g)mby=^GzPBQB2^Y_A?*3TF#&c zd?&h|^GNtrqs*bN0#Bn_5buUb$2+npLHFzciV@ATbJf`EKHvYCZ8KIhqXAnNP@uem zFblm01!T7)k~@PmUr!*Q&vFNF%!2uoZnS9GNB5`B>)ltU(S3XrY2kx}#DWIkRH3`C z%;H#r$m0R@{WyxOs& zwHgVxmyu9>nZhvDmjKJ1wEbPkAKyt3cHT1b!mb2q^(9D)d>>gYO9>MIFWP=95y=h` zM$9IJ#(@>K|I>(wz4hV;BCQ(l#U!e}UV#KuJMyV_6NYkmLDy(&Ud(W}+cDIO=}zFE zGw~*awZN|;A)B5>UW{Cpptui&tSsNJ>jerl>&Qr8H>$5$O~@Xr2cAKR2qW&9xq5dZ z!g&q#Z_9fF$B5a9Sw|pN(DY0CllM_)*R~|%^LG+bKZhQaJxGwXqEWKHhbE`xoj9Dd z^v@z8{cZFxS2Dbx6i6d)K-%JNzLMz+N!SC!{iOKo(6g(mF3A zLOP8sgT)Njx{HBD4yuyz#{%9*F^AQt%6SkG)LL8H1-qWbh{SpbZmmj0s{Lq8>@Ndv zC!qttO<``U&mj_8z;G+3gir@`4}nKyRcAN@I=f)%o+l55}NlTVR;n^yf>4PZKW$wHSy1(Lc5K~RrL+j?C^(( zJo2hpItk9DD2V@4NOSyi6m%|UI3X4xKm9dCgiDbCK8NBR%aK*pWv?AcGFqhz1%_X= zW0aySol{6lUx}=<5{47V5E4+;jFUhTXm*M(E%#*YL{{qvbHlxL>(WjKGLe>^cUa0~4XW#S8a?0#kwAS6Y2)c^nIa@81_?C`XOIwXLc#yD+@Nwi zfjXx|foeor)yVodj0Dny1Ob&!^~KIN(eKK;w!bIM4Tt~_BMW35A}Alc9zs^eUV@dh z%ht99DH2ERrw(}8g2=lGf#m&!rif{a%9GRpy4Mx9Pno^eja-^yh(OZYl_LSt zfds`Sq@~s)i{t+VUN5j*c0VTdKD733LOfs%p_{FDUnPhf66C_`BLua7gxZkY6bZCC zjpcm=-P<;#;jctiTMff4A1`qGnV`GRr&JqhZY0!ubRh!Sg9N$1-ah1h`hA&DW<4S$KV)hj@Xe$EdOX?v@^vv!pcu}d zqwjkM%9i?W4vVA6zU!N8pB`id=G|QzG51iA1cEfw9|t~z(n0?T(l)cLM3$lXuk#Ndts71)yn@pQ`(0A<)Wcgs{Jdffs-o?~o=kcviP9a5Lr|s`M3Kya8!Z9Rd z7u)Y^NT6P@V{T#OXJ1SxYP=m;K&>d2_PZ3$Af(mEI^Bb=uLoI-R}g9TlKHHhl&)n? z1lWd>0Io1{JV8Xnl%Svp#K{-_3rKr92mD*4dDo>M0JA(QQFhh;WZQT_@jP;yoJtBi zEwK*?$hAoD)f47)c#RxFH{A<}Y`joBHG!ZucC43>uxUj1d=FuEw}-cWn#uG^nji)c zp+AU7vekb4_4=%^ql9UImkIYIomAjsCF#JwPdh$>NMJXzK(a*Kfyj6*64I@xUCsBC z$&=)y;baXyjMl#zU0*A*nCgJ890?=ykVUo@-M@OIkPKwEjfj{^6a;}dJtR*c7sou5 zN%VfAES~gs)u`cL8+y1tgM`apVA`0uAM);k7z6rIo0VMzVOfC!;@&6kQ)%mv6|fb> zgf0M=?0yVo1LikEqKu}qh-ACbF&-vp_}NUQeMsO=3PwQZ9DEIt(q*IsEJN4pn;ITO zmRXCfZ#A;W(lb1Kti&e(EVcKBkZZ3O-KUidPnIYlSQ`(b@5u+iEBSU?S<3ScQfdVfw>F`)pV=zAyn^CjD%?^5}ZRwaC9=xJbpX(2|MOm zL}J|X_bFDZZtZxex81K&m#SSJ!B-gV@Q z+6IwZVi^+lE$AMswCx5Q%o~r!X)eHKLc8wyNcg75ybhum#hZx){*)n6ixQu4)??@I zM;1u0t?N6s2JePSClz?IxQG;rrM7=7vaYt;F}zaYuc^iMsYA&XV{TPHm2hENV=*Fu zUq#A+v$Y*Z%@5DyaNU-ndvYiG-h9utE2ubM#I1q65{Nwze}t@vY~4Z$$`<-3DC=e! zdbqw~+x{E$&=hQ|i811bgqo0t3FBuILcsT^z3*cOhmc@dPjG2HH;!}GT8)P9x#fF9 zd6Xc*{@<7a_f;+=nn&?E`doh+3Ht`*`!7O5bQvQ9;YGH#OX#y-j$$AUNCWR76xjKI zdofZnUP8pwYHP?>V7UspVmBkg^cK`Rzz@*poh$I|vl1Q3ygQ912uAXV$O~qaK%6qG zRs;WU66O$$sF-!KNPY?xfo(@x*9qVc5aF~Y1=?a_yfF7kwE{M5^ z5=5@W_P=){H6zPwEpl0WfUaUNT@#U;D@6LM~fr*AyF?OZFUo~Y|`y4hLE6J z11xkRlZQ~y`6AMKk7w%-CJM8JUQL~|8VUU>Tc7XY<%8s9NPwQR_ZOmj(T#|-)(-tH z(&W#eW{i3J-q^k0jy|is=>Kad-TWowl0BY`Ov~MZuCWWbcRLFqQ!FCFW7-JBw5=6L z$lZ@zEia-%t+Zcvo^7`WxiWqN`NUh05cn?er%9f><<0>52yNtjP`Q&(K)4QB9j8po zUX6ar(Ea;7u-`#T4!`+o^cj2(eIEZE5z>|%>lj2r`Ce4HUW;N+zlOB__kg#Z+-T>g zB0gd+m@X%B>O$X%N~ERlClrj9*z3)R7`7l6&>2E|lV!*i)QSSjdCNv}`W`Ps#PJ}i zJLv-vtEQOucauKQ-dZVj@H{MA|h{?P77o^D(>5heChuoo9$x4Z6|7u z`143O`1*)%0l%G0lMjBPa6!0xi(H6DZcP#I`wN_w5NJT)L3f z{s6MzUPdl4KMUJ8mHY@IgMG-Nc@k;2={6sZ5HENiX~;W~5Ntz&{UPK|I}beT;4hJt z(l^=rU~b9lmV(ndUq|G*+OD$}3Ey*epWd+hbT|p^*&8Oaj>*F59+smFu}>f(NC)wM zm}I6ry5K%sg3_3Gplf?Rl1D_LVmb-rqe$ERJTkp=rPyWOlDGn%tYB#r%+bSrwJWvPZBiI)5rx_hBBBIA))K+wO>NVIE4i6 zPXnJsLFRdg5Dw{d*Q>CzNeI57;IoL(?;NWbqQxyw}+Kl?1`==R^F2(`$Dk3uB4BUxEbuQKa3jB6QyMLO8MG zb)wJbqFwJ=h6^$u%=U$kD}cM5Tu2Sc6cLG8G=7FjE)-PlkJCMWkYKe{qx<~|vivS* zlM*au^I`N|tHTDJ&DPZPh+7NONg(ec0ry1|`)EP}VIEN7q**3t_{|9v8+msc*E1%# zP~E!=3Gz-9#P31D{Z$05eFzEtQj|UPV@~O`-<(MHk_pVymLav(&UuzFqx?J)ruPwA zprhm{03N?PL|BFr`4iIj!&PVUG0 zTOqyFg{F;rkjrW(3X-2i-}?i|z1EW1&&N2Hpzl)yaAGQAN)Zt=g=r%YOVK=rG^vLj zylAg=5j6M(=pp;FB+vC6PTJyXL>f<_RLh;fH&MsiwS=_eUP42^H|+O4D2{VCQ0mkv zb)p$F&!D2hB0Jv};+P%^^bMVfDR|1v}#?<32?A)*?M z2ogkc`_Xm(7*Yn_2I|e7GnmJEi)n?WUT;U4gAb$o*M!=!e1N|5ZJB+l5ov5d-?v^A z=V+d}l}AKO4W>Jd_dM`Bz^|iN!Fy(!x5M=|Cgn&Fotx&hjDQQ3y%i{C^Vd=Oa0wE? z14z5Rf|3EeAcl};`zDGdtU((7a#ZBl=+vNZ+`=l{JXyFC%x=^Jz=Mg`&{;w$FX&vwS{DjVFkRm~EI=0!g6HNdR9(=K6VtYvtvr zvi36Yucvb@qhKLQ8~z)}*YCUFdhNX!t-Tpp0qGizA|!C%KrXNKh){-*+p7Yoa_XKs zk$BFl$bbGI`kao~v2R21khF#2;Rz#4S|uo+(S$VRdG>lSfk29oVF&PEP#wV#YDMBvfdY?-Q-%5YnedxC3A<0BZoa!wW>uN3&ljyFgdtCZ zC?K8I)`#r<4^Tg^jf9xM5AFY7K?M4>@vhObvat1)qU@=8NP8uq2_;(}1xQwPF4!JOgehu)8&ITgQ)_NWb zcF#|2gtC)Du>dv1d=?|FTWzABvd!n05lO2kL;6mbSsch9rz$ ztwdJ9T4V+MTc+{Bvi$t*sG#nXC{R8R`SE==lSdH}h>gfn+DzzERgA2R6S_p!5@HI2 zC|&a-h#a410t904MRMM)vq3;o6R36&vk3P9RNxmzw}tC*TrU1^AgkBN;sdW z0pFKyKpy@8S5XlEw~?0J!SGh^KBiKS+*f`eR0E-%h)Aqda^4tmhL*Q=#i`>Xm z3J$BG&gS!}@d_A7p$A|sEj$g+J9k?+Yw?y5*05pxnVMj(b4P)FS~o=0^9 zrN}baF@@)Q3ROw}J>;TUk2LD}41cJz!Z=MJMJVMm4NoBZ2m#~-xx8LNX|}6T+@j6S zUp0yAxI!rC%esEDgpr1CW&SGT95!SE@=f5Mq1hRAh$uXPoVVBRvi~~~dGsPu^@LeW zXusZr$ao#n)MxtpcSAV2R6h;;3zS^pTf)4Kl!aXTBq%=icMx&B3H*P+(Yczw91-C$ z)AK;iF*1ny0U8YT2_%TW;H0(fo5%@F(|-`9uz zfA<0<6I`809~p0}URA!0de=cq#Q51(f2-CgI=Jz0eA(O1c1YxOjok~tnl zu@2v%_@do+P;$Wtx2jV{+}-FJUq;`T*k(QAmcfh?NG{=-K&O+|_H~p~{~?qTxo^D3 zchb6}AB?G?y<(TD_S zHS&L-L4wn_9AAknl8O5KkJ|qCW!7w$J_3ndXFU?$RSs&>ZkVwmQM~7Mr5xLJ+>qcaH z1Wnm{aU$oQN?5#NC5p#AX}|liStrQl_fC%e%8=5q+kPKIGivfSQ#~RgpU4Y=xS;Jn zjIvqwn-%gBDrRd#f#<`ck%$X3BJMybv%ifphc+Q$Fo>@26{JD$MZw)3hR@g@vez!4 z0&p*M;eRyU@hT z?dY{;6r=ej@X8HbPz%xL*^H{lr)^qeM8s@BUI--R)Bg#S<+C3NoZmp2?9Tze2>d4p zZ;T>%YJtCxT9&_yG|G)gK=@u&rAUA#sQw{+9B)ZnLGwz!j7a2WZiwTypZ9nOfc%IHGbw;FjBxs{e6t@#&FtCh*RTe{G&uVbi& z=OiMM+wA=lsNUpF3ZU-86pnk43vw^Icjf4F`W{M`?!3u5o6t265HqVHBJ$_LN?_2* zHB{^1fdt-gN;fS*KIV@g0r)PtALwQ78o0MSd=}Ajs51U| zGR7f9mYpzi=BjaeeyPL|hyM)&U#M5e!mh~s=AXz5Nw{{+GHRE6%%^T<-HOUBSX zJqgxoL_`F6BoM^ety9u%HGv)@-ZxNo*8)ZofuH^H zhOK8KA`tBNPoi3naR=0MtY^?4$0@Pkuh4zXYj;_v&v*-RIo)BuE72OCMI`@zGP2FO zpHCBKazsSj1bHQp1kO1HSf6lu$Pbu+JdA28mZE8e=MY(>_icBYg7+TkNp&}&@n5n1 zewNUpy^KI0L&&AG)qMF~+Kt73L1P2jORymzx0F8g0j7#Sl z@Moyc)NSM@kQU%ovsyOVn!Ri9FK zwA#0ftd*!(vz6Ra10y0LQYeH%t{0I8|3yOO?+YlPouHK5yK_}?CvuZKgBk=bW_V}W zUOVm%WC^4RVgL!5N9||4M7j)FH3LWp_uKhv36{}%!hDTN6mb6-a;5BWs*+D8Qi+{t z#>NXMcGN+r4Y9WwLInJQ-M2M_YWW1+=NdPOtgu3ZRfEq*R>#i*{|L3{??)-e-va() zqK1xFQu~!UeQtYgzgplza+7NPh=_>%yRbm6jvpdFw0BdT1ipa+oA^K2Fes+)V!f#Zbl*57j ztpdeN{w1=$ik*VD6Y;Ho&Cc;LLad<#1?>+JW^(wUP~LU-HX@bnPVt{v)7bC^Sc?W) zJ&wq-9Ff5D$l7WkbDfp~{v-;n*P|Y7SF+nhL`399-YpPw9W?`oohBD9Fu(dC)Iwtr z`TC!Bp)fFw79_-57(T<}W#qcqOqlbL#vsyIZv#GQLGiI-4RTKn0N+Q}$n8i-y@Rr^ z7TY=J+usZArxsZNe)!b?2K>5HKz_W~k212Z5-gBXluo=K1@gC}>-YC&0J;9Qp^U62 zCUD+@f|K>|GsxmthsgMwz`qB+nkbmx=aU%BT6r4yRdkQOgFeT)^mY*u5jRHO2?Rn$ z4{}A_Wx~7}wYxZs0>zur!=Bx@50TB+2!gZ`k;)?|>&Au6N1z&6DeFgDpSd~ic5sK& zI#!~4=SRe@LZos9Su^*c7>iH$>_ZlguTSYfE~X`jYH+p`uC#T&pgNv3h6Q!Hw={cN`VUr3bT5D^hEK@@}q0;ujuqfE0)L^uagO73A4#2!R~zm}0jUkStOMFvqi@gqK{ZT|6l&^-6wL2- zTYnL?v|nx4a~@^ERZJwHKU45Y2lnqe^m*+=MA!`cR-&IHseOtq<$4Q>MLmd8jQ{UQ z9uX0fM|Ym)cq{A*;#9R^~B2)S;2U zA4Nj74v|_JvOava)m0Q5`MZcRa*ehRt2 zcA#2?)|=g=%mocO#WnsCWvUG#GFoJ7@h#oYp`?K-J4dr!(=OCL<}{gln|#CEh?QuK z2Q5|iDynwBlq-A9YKrbf*Y+w>K5}jw5fL$Q6dZwAY~WqwditD$zsUIK%Lr}Pv-|b~ zElyhIhmhv#Yad3lKD?msL#d!UkpSLc=Ndw!vY!w@KZ{au??M6oPDB7j$YLq7WB9AO3U@vu?v01y-4Ub z+i@$ArPF|{lmCH&&v{K>b-L~~hy=cX+=OS)HN1v)+IRq}A5C z(atsK;4jCSUV7P%w+0bXDRN77BX?Vct>F;xTp}(Mpn|d6KHTE=*-IE^ZU|5xP0-e*?G5E3*$f%P?FWGCG#s1?f3 zAVFTt=rhngsrUlKdQ|YY90{>bL<|jf-g=ZtwFqg*{{;!-PPDcwcFdEgmCC1pe~rkf z7{x$ZknmfKVkw<=uG0w&IK^4+u-B^`TpI_89Jh7cgJL+?Jt{_ZF~?A5+lv%Hr(DAW zz-I}>QHDO-llFdFj_a>MO(9n!>*P04MSl#EM?}mJ3YI`jke)=DRp|^YF9;W*T7WO2 zpm%y-=OoB~2ly9AAm40$gP#r3gxpu9=>D8RWVRRy^_P&Qdk_WA&n3g-F<#IOqIk?R zh>-p{5{&Ig>-QDuhi&^MNDzV=0j@??&SvBm8`Jy=OFAe*f%0->d3=Dhc%Nx?zCbLG zJ5kfg-2|868Pw7DEo7CEB@d@WfX|?ev)6$7B-2$lA|hh?QLqF8kU+1K_M8ru4k0bO z1-WQG?$jvHkDv7zK(BukY2u~m{j?h^`}Y+TC)kOEa1pW)E+CiIOQ<@2zr|i|+Hfy{ zMkj&zDWo-bAQD-J?!Uj5c}ei?Z$^Z51^M(3G5!HYml4r5A)$N)IF(Fs2i}RmDRBQF zYGipYatoeD2?Xx~Z>8=L@t0+7g!lm5To$u;yQ^7G%0 zipqYIl#n&8^|F!6M&us3nGkM7#IT7_2zU_*`2mzV`av@O$%1vL&~F>E2)YqLl%Qbz zc|rkW4a)SIZ}0m!$(UiTzT>E+M-$S}+sGl5XA%x7?B3l8{2IE>3&=ux6;+2{N^PIO zFbm{*5=El>77-DVM-)7PAM!X=>7isKd{E!v?q)+0f?2c<9%5=N+wCkv|(aW@)!TY;>wa#ZyA z_fZP*up|g;L2{#vxrm5}0>G>y5C`iW{ECBO3tYd11k;PaUw5$6<+iF`XLN1clgTCI zPp?2Rh*IPRYH+d+9vkJ@<5)f|C0#*2^0tU71R!7 z7_5^0RW#yuh|p@p-o73zZ$~kQWwy;h!noW|GQkOc=9=1!$f6x-;8mzPyeYMY5?jBw zKw1fnA!h{~tZ?dVTxu`*zEwW}S`%nVL>w1W#4+C~mG^U~An+{asv2hvOhiP)3}Uv$ zKw41|-zR~uByxQ;TUO8Q_H){4%3ih}-~#a9Q7Z0IM!HD+Xxhz$7A6Of@V*DdEXtTz zky;s=8d!>~kaDDvA4IOJAtYp%Ab~p{5zC2O*+_*5C*s|N?$t(=P1UcrIE7q@xh4Xx zLn+I<(RII;s4aU$L_~pORuM=7=bc)<{~?<9eFX_9KfGxLa)G>zY6Q~zUURZQ-a+Z3 z3kU+O2&*|j>AF6E?fXS7MjG@62PenVTgl04*@RkiyhkV^oQICp>a>VNg?40q3r1$Wpv*P>L~ zPNbP%CS;8{WRdJapGhAQrq7P!Jo!Ks@Yhgf`>!JFtsPk$H*&k(j#|q9Ecz^e3l$DV zL`2LY%qk1S&35^Tw1=J2dtXU62(;?<`zuT!#)<)nHNZY@Jte186({KCwPP*wawNIm(j+_KyH4l&3&r?Wf`j+yo zh(L-FfehL?dQkS)?RMNRQ($vBK*4S_b7;E0Hb$z#?Mh+!}4 zsnp`&u#w7hNYK@wdV)HXxs}C_fbXIyglCXoUxtKU9cp0s1d3HOqBia2sHUVH*yw^4 zQD)ozO_Xglh)8Hy8y1wp+Hb+L4?w7FZLOEEVI|o zqX2tP0g*=$a>JFQ`>_$FKc7HZa4(^OUN>r={tmjn@%9djh=`a4nAHT5K)aLssSmir zi4-oP{z^ZAvVwlzUVACC@1Rqv?C&9MdI{>+)Pb7&^&(e`uW#_frQSfk{GUegkbdO$ zswQ{uDo-LU4A<0)kjv{VvP=qQj)f%vJcX>T|BQ%qF-lZ;5fO4*W}j7PuoH<=tw9q6 zV7bzylf@x+rNerRjd~2bQhf~168ChD7A&a945wHU; zCz%=TPWR#w6vrt-aS~c_*XimqMQ1E*m>el;Mj(xil zIA#^?%LoMW0dm=#LP7KU5lOs^0_)olDfA%s%*~oD`T+9vz<2^fiH1?-b^NM>A_r@n zERIj1>nlO-xNiY<2{b0^NL-iNuhyyX@j(=eDg}O&C_649BH|XntS1md53(fgKp8@y z)Z9O|-wyykeQ3$VN?is7x23Y{EtM08xaw4i(oFeu>6pz@1Y)9 zcc7HgUZ;+)Pa>^)FYpBiUm7Jv*3UCPk76HJk(+9hz2Ach5I=@eg-5eG(t-10L?9g~ z2GcyL3!PeQLUEjrq4-Y~O3A&7EVBO$^rhWURt)$F6zez*{4VM-RxrdE5fL$Sm=gp7 z*Gc57sP^OY3H*5iZBA9|tB|JsONeM1a~$H^D7&gC(ZHvxs9nkyl!AJg5NMxINYV9# zk@okOAPeRc8q@j>+kaT#IUjIZ|4$>UY$u9yv;lvL?!o0G%pvjn%tvvK$59;UD~M?8 zlF1L+iin7qrI<4WVt5U?mmY8uhMmAO$ojY)StozZ8M&l)OyGoruK_jY>S{y+b_cRd z-aziLrHnq+uoP+O_2~RBp+d$(Q+t`?hf|E`e&l+36j>mRC}ZsvRNrzUO$c@$9zxam z^N@w|0dTNT(v2e`BJzhhMIZ@uSq9cJ6cqke0(Dk({x~9%UqS_kZ)LND5-T`#FyD@~ z6&3ltjfCvaAp*&|!2Eq|LutjYB9i)GI<9j(aI%`#qe}WuBO<+wYBXL4zL(@^WN39D zzD<4w@J12~0V5(J<~kgF#KE^LQz&t=D*mpMo69-)r!F*N%(Z`!gJlk0axmoJYYtXA zIN&srw!^6g#s4%r%^~?iC)ZHkVb`$8!LK^_k%Nm4&Nz6_!S6V^`bH#CIE|AnwqW^( z9qe$jJ|ZF_BIZ6EY<2MO9Q?Qm(Zs3H@P|esC!FE{qwQbj;Ij@6IgPaaO{bBzbxy(g zcBlCwT}CE9jp|6|p;$*1%G7!e zjnsXQ0%%KaX}MoOF`G72DZdQ4-@cCARu_`gKrE;0*@)sHT_|~=4V0KLlk1O&h?rfN zlLTTZyoV8y+~J@(feTJP{2!tC$nB^QE*F6eBI5AFs!jm^E2;_UMZx+X*s)&8cP5h4 zy6#4qQct3+ulLZp{%^G2_p)u)o$5aBLWKEsz!|xWdE#P3M8vI&IY}UfepE>KMI<;c zpu)i4McGyJkj6gRzTX3W3=y4O`8(8W8<)X2)YzBCI6`V<>7_8XA&oVLb1YfXAaaR@ zA!G?-jw~a)!bvJIwsCATnr!2gEfva^CDWnlRSr|Ok@21PJN*go{qud@*YkXy`@XNw z=XtK{xhI6K#YQ8H+<20#u8i^Y7bEaQ5iO`iwnMv&ku^`TLUrlh`%ojN>mSGfIs|p$h%usx+XRh^9DDuknw;xLNa`cm@HQV zHz}?+1~JW2*1?MkYujzB#0&Qk6R&t`Hdf0r2ywzVmzAB7bzDxZ6#EvwvzUBM39h>T zfc7L+-2eGJGVq~R`=L^ONpSO;aNRPXwO0eZ3^;t6LiXl17koWS@K?S*?hsHd|FUZU820(A%<2 zQ1UPd6l@nC;k{83J)bWgEDFa=Js8r;OY=crn1H)hN7U1XU!uabyN*XwYjzvew}59y zw4{?Q9bd14OUWveSdayUapo6eH1T(@(;AJ{QZz&uRJ3mMs9Z}LU?DHgm)6KhB@M4A zUmLZVu3@iSRlyweqO%Q$fbhrcOb~7v8M%xQb?Obyt25?T zxEaPBzuVS_Axzw($%V_(txrDmsc=!8CU<7SoWBp2a3|BPlVuOSs=^rWd$X_h?n`{r zomN+x@$pzel_2#@1(?neF34mR4rrY`~6|1skxa zaGNijN$Yc-#9Q{b)=;UEAw>$+fLCZ+tlCdJ-ieT9JSYHedD3%ND=Cuv4v|}rj?DHX z@pIfM3tmt?Y)kPw5>OUv42^ryekyCuB-ib!8s0VB6Oqb0q-5((!wFDxTt~t z(_PwY3L)jc^9RC^o>im`)OPjUN4wS5kaEvWz4mMQQwgBnz+mLzy3}2%%2TsdF%o#! zoibZ?$`1%7&@0lMd-2U}w@J zrKz<%OkniT2g8890@6iQ9v7A8xy%~6pT+E=_O7#xB-26{uV#x5o0xHpC|;XZ8#Ypn zg1u(h{PD{Hw31EPXqkDb%JZk}!4DL-^m|4KIl9(BDAsdDYWAy6B|EDrszKS+aG%;z zHd^KTDui)e$=^$GKpBVIg&JH6W_-KV2`!WTo%gTe5%C-o|GX_<^TBEuP)xc7gTJ1e z4c{m=n@SGettW7-%(+&%sP)+BK=5S&^GagjyMZ*>pab$Rwv;t0JAiBk2l$9b*iuSrm-1|{mTaIS<~=OnBN*pJQ@@_;v7>8t z^=)I1?jBn@r|^#-F)1zIjHaZ09@2n@B>yVVwIkQVRsmy1XYW3 zlIsmTaZ!+EFLZDFh{u~6d3C?L?8jnJbXC44!GK5Z&c|nf3*B1(Oe%i*R(>f};OVSB zXl>}QQahJ#*p82hng~)~vTe;*6aK32VgXb9%J!cu0m$vT55aItKQ$D&0LV_@fa+X=5~y2iGt@uCFpiu^p+m}5EV zt^LB>d~h&iyHxGxS` zVyEr2;Wo6xJ||D6eU=-zAixE$4LY2NYJjH-Gg|eloxWEyamU9ChH3;!gON9Da^pZ@ z0JeEBOOS@08jnQ{#k@g&es;G0bmiY5J^jDU;4_C0(VJ+WF2wl}g+Oz(#bTOm0@D8n D?3*K8 literal 0 HcmV?d00001 diff --git a/docs/source/autogen/images/thumb/sphx_glr_quick_start_thumb.png b/docs/source/autogen/images/thumb/sphx_glr_quick_start_thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..8a5fed589d17fe6ee62d6a9788519dba471fbd65 GIT binary patch literal 26794 zcmd3N^;cV6&~zU%f(#fBBGMqQ6{mHpumQ z^~%~w0VJvEnR)nlooKF`yb|H}xA|C<^cUE&K&!B{#ar*%RrUI{fOnuSCL$03!--#| zXbLuXBIIR=G00EgrNBb5!{Wg0L?-(}Cu?Haf+0F7%3dlH!I7q$mL$Yecj9<`jSzfT z^D5Zgt0)_Z#Ecth&8aB^q#^tt+jp*g;+l3Kuta%Af{32Sylxr^V1hg&jmnR}mkXfv zAbI2;{|#XPvE=`E0PeA)C<&;HFH;EcKm_ss4Tt~~BwnThPbCg5(+AS2SjXlQXBFG6FYU#Fiz_d=HGDi{5xv>!;RVHaotlES;Y{+4GrXjSnu(wiwX$)Rm{msq2XKT(PDE-xEgi z_(VivIE9l($F=w;5%X502cyd9S;o)?@*@IUt%-#RKi)is3GxWf#e9B1a0p}-K^GkH zE#RcLH-hEomo}wtS-`xST`geg^-vL1i@Xk;hL(*ihFVL8OfGro z7;=ZwLXX%~LDV&-I)%f^Zc@iBUPr)DOdL&SY8Fe4jRr0M^?OY#0}B$nzJ&L1In9Al zQOK9T4G@xxBY?1&=&}M<$uJ+xfTKBW>PRAN(VR;B>we1G_F=pv`)g>`Z^uYRx_jEF zLgunT}hWZL=J)o}-tSc-H43?kvu z2VGc#wT9ESOGB&rE@vy9Zz3|&#)3Ae*}ua?Sht~cbom|GLpPlTSm&X z8g90~9$!qLFeLh!xc`P0$(R$Ja&h&6EezoO__ACMkpyr*-p^zc6Sxuga|EOJ~>i$53|i zk3uPR?4WZH-H$~ya%J$=ipQkS-owRa4tw(P{%YPDc|taynH>AT=I|^o0}49wU~C{J zauK2=H85m@3Fzliq*hcd5LX*{uUE#$CpBIVU20AE#`cp-cs73fa2V=^W|F4HJO04v zh?+`+`1w}s-C)?|2Q&Bi)%tp}=My{T_L}AL?X!5%*(mp1+HX=ICa0VpGHL53#n?|w zrsVN!1h2aM0663#mGa+wr{K=E&1QL;2u+|ml4!Vcv|H~i8eectf>jNDUkq^angTdW z)#m!O%sOuC%7f^!Qht!qCw#tfe81zkyyL#c-mQq;u_=hU8Y&X0P3ZX%SuC=j%cBp_ zr)IOhDL^juo$ys}!om$%Q`tjo6J*I-hLpYq;KY(EwP$7&><+R6Cs-uf#l&-#?&}Uc z2}fsB6#K32O!O58@ksbMFteP}G2(2lnU(Xx0EQ{|6Fyl`z1ErflfT{pGkLUjTO->; z)k=c&e_nqG{-=f;xxac&YcIobydWM*>-WCPDRj}<`M?J$>U-1W=e@wPjOqn`WB6h3 zK~jXSxO+anw%MV<(bYq5#QL@?&$9}E9mOt!yYPhfLyOPk<($)-DALZ(Z<${Ys2e!I zEf@cqbht`&4b(^c=S@?tyU^-%7nsg>47yyUusdaMN|9?WoX_ry|GMNc@rE*&1ZWvhRi z_S3m-n`bk(@O+$m8Kp&CbTqx#!3Ro^Y+#WD0wD>iyApzndvKsR&ejCNHW0D$L?Y{& z-+!(ex(HR2t+z8ezJ13p%e#_pEzeK4BMasjYdnwJ?s{s>RYi;(GFI&ez1i>3C&Ca}f(#daZAeX;DyOc))i?10TL3cyU9coSu% z)362xbDc4*fSbB%;E}Qhj(t3Y;h0y44|>PJ{Wnh8tcJHxpgfb*cWZ`3JyR%I1eX@D zhJN6ES7k-g^nBRA6+Lz{hr;f=v9U%{mDsRr?D`_~Bl#>!q2hGpQrdVLN8GXyT@?Oz z+7-##Pu6db%j-<<)!r^-I*w+`UXXo=8fYr1;~kdxDdbuG7#=k3gnwVoy*Pse*8?CP{EM4h zidh=Lh5n)*Bd=X5LAxl_{94L7yp5$zMl^#3{EGwW`CdAsJZjClo-A{sUr5=r94uSOqPi$Bx2XqwJ0Xj>oqW`wvg3e07J z)B$YP_^e2>9eu-`H$Ilmv=_G3H|}OW)AT=Y3o&4{OAN>c@maT9IRH=16i5c13XIbq z?_;?AkC)0lxBghxk!|c14fTiYk`u>n>GBPA8g4GyLuofED1?CZ_ogTPNTn=*7Rz>` ztYLiEcuec^&_!PYFq(!2r=O|z#Ft@!biey30#@bucj&Ux#$iwD(4dukXbV1&1KhNt zqjE7f0?G2qOI$YC4rS^$^olhr+rM>-n6e_o=%}lSws$wIa?!a=?|&NpC0eU;zsWc{ zKWScWoG(_FP6kwvxFXl0Buq*6*?NA&t1N%CNhYQxAHE>%%=x@2fFKMkfr^ZzsGxgw zHOYm3bg4i(w(q;Sv}7U`6Pvh6Lm7Ptpx?d8)x>^1RQ1|vuG zD|M?Wx8nh#9<4K3^p^ z+tzr;24w)=wRU5Hn4~bhvixzHRwBbhqo*XVUm->z)SC1la7x*{$-z(_ak+M#+ZP-g zeJQP$k`X`41*d?oW^%4JZH z`2Ld2K6XjjT~MILbr{_xL>OfMd9;zSl*--#DwbWDM4Mp%J~N_%)nv|~hmH9@ z)4zT59opehPLJ^KJ6=VVO6Ho)jzT3gR98(NX*61GGVm56{>rj2M18)d^HhPG&iNrA zw3~jo&)!0v{GJE>iXI;ylLdla?z?dM+T(hzjWMg#Y7nngVIY8XR3%u9|L|1IzJP2% ztaEj&!l5_dWLvR#((N&`TQ%Z_aD4=0dIY1oPwr0rTMp@~krDj&qrOo>N7s>&+3e2# zj!pO%Q}P>}{K5VvO(@IX)|C%9o-~d!1M>GPa$u9}~ znp>y9i{nXjS1MjmdxDO!=)mKBA}-}mBp?edtY>D#G4~`p;ncc6ZCuu^9162{V`}LD z1V6Z{i6Rj#_U_ZbKJm)mqBY>vgDo5E5hb5`g#UiRW{W{;Yv3BoGE+jQunVS36Np2enN*G1C~E`p6Rz z{j*O=Hr16UEyfQtZjl0gr^mQuLw~P`!;^`G!)B$eL?HKwuJHg*Av%bEfp+&rI#S6h3Du-ZF@PZN*czynUs7CxrIzi<4CKW$DVUDZp4t@iHd(8;)$gN|7tcwXZ*}bVfvjelr52^Z_wbOI56$NP(NgU%iVHE zF4%~Q^N)t*gCUrPDQc6o=jm9zod2D$s=kAl9(4KKJVtQbESUEoxkjYn)gw9Hf$ zxkomp*C}$oszKqQn1bcxZ?n$jcvSPa-%+!QHfnAo#hP0@4Eu7#;_sn^YAFm}a5$kj zR>ve*17@Gze8vcL$w+E@`m5(=Qy*Q~>OI;1*l|C1v1PMWOw3eL9{)(8=9b%IM}6!U zMcHwGU&b-gd%E#WWQ1W&us{G=3v*Gczbd>nU~XA+RYmPvAK8xKIjItpke-|6DHLM? zV^m(Trt~H3n8Djvl-rxNw5%%KO97+CBR|c6-1xk!_MSCj{y-=Tl)@CO+~o2vmxWhz zY)tX$O0oEU#QKS|IP#9wssiTL8{ zJD#*5d^it313T$)w|h}CIq*lseFB}(#~)6PjENhpg?^8Xm3F>+oFgAQ0$j`746Z8! z4T=vRkY=AQ@V*?NvGxTdbw)Lbpeb#Hwn$jEkRl=kfxy_mFdnIZZ|v?Bk47e(MA6iu z=n4|Yw5OvgD)#Z`yIX8TL)kRf#j_X&X!Xre9^FbrGeT&ua~Ez z;L~rWd6P4b4BJHLCtjp5IQbK@Yn5nt(u(A%E*th>PWT9zmg#$i405 z1u)h5>|%j;2S6YL!d+3OTS>AcGo{_X_PRbtC^cZ~OZKy980j48DF6mKe0`ShOy)%)$Gpx&xYS;yMa=?0Luop+4>{HHg<+3oxDAZ>#%N{Z}PF z*y2V1MAwfF%&zQdI+R~N;zar?o(NK19$nQ&-1jXxMS(~iDLANz^Z7LflHO0Vw(S<{ z-&6fR1v zwYJ=7+&AQJkPZmGr9o>Y;3Dk51T1!Wny3eOhpSrVHP)r8j)n->0clR9O!IoX`MucX z^Zwiy9^o(E6WZ-Mwy{?YbRia8Hzm{^kdB^~ULyM{?&zV&+;rnvab{}4=iyZAU08O5 z@MV3_6vpe|{fU%r_^adQ-fn4Eqe6TqJ&VNr&47I)qv~~M_?mkg8<^CFz(S?YiNc&J zF8sSlt6VZ89ZVI=m8J2(?|e_Ih#V2_^euv=$5qy@pDZ&CU`?d!{!Xjfsm*EywtUo{ z7M`bz>Lm*;&#qzeUCS&pM&I|_VXXi>@ekIJ%6OP-srY6;NEm+rQ*Vm2SP=dpg`9v=8of0 zhOkW8VtL8ieYY31ECO7=oqO;@Z;1Kzm!dv@X`5}nJ?l8#I_`)#C(=D2%+*Mp=89va z5suRsE0+)2#jpPQZ^r2U4e8Tn0Artnx(_PB{~E_^f3atL)FN~J89#@h6>YEJ)SkOU zkVI-6t3OWeJfD)pik~F|nR+pV9ikPTcqm+4>0E|mxI>`U=jt6gulFW~I2NBxSomP5IND~h{YMxG>1blL@4eF^{6 zk#UK;tX9dPQL#jH$V)FC!y~DdwEKH`o^W&`SzLFoKY%L61$Od-y6mNtoA!97 zb$UD;2B^}_laB+luIKy^Uxf-;n)u=5i3a`beLsWhyI-)zg6gb&zCf*CjB)ffyJji% zQ#_YU?1e#Jdx!n7Hh(Ev_|i!PMx4H_`%&raZn?l8`iT!XKN5a0UHxpS!xGFhTNf+u zxpL3g?fDyTpaY9;=4WvgLgnhu2&s^lz|K2HyQC6^rpuT~EyYofcy)zpZBfmg;sx&O zRGn#3W%*3Vl+k#uA^Ks&Nc5APO_6`soi1jM35BHy;tl(LIhwN&a^a`% zJuR6vsJ;uQlY9qsuOj!Xa9BQ;(SH{l%NmW8PkWZ6_$5iF;!BonGJlTSxyBnademO^ z=BG6}B=bz`T7Vp#E}gK#fA=$pv=-a?p8toJvm3oLf8KYg@E~;oi^ZJM)2> z@1e%vtsSw=PX_(a0OjVZhHr*B{g*Z;W~=k%vO2VPhKypEAzx81pO<(tom_IjrLqcK z@zif`H$2E)oZV2FE-3#8Buc{2P?839y z#~{t0*VWy@2eH+)q@uti{*C6u-6YLDb<8RHk){z#wL@!tX{(Ia1#tU zO(0i6^t)Ueq*UJ0(u~F1x&}Xc`0a@7^&Z~y#d7ER`@Xo6>+bhoC<@-^DTsWJz$8O@ zE6q6A|0WUD9M91pzApN8$^s8ZUZ_$??^mFi= z)P@l=?Y7Lm!mt}6Km%zR(K8vzVwM*Cph$R+Xh-GcA2iO3omIER78s5{b=pAhf9(?R z=jTLCaD?TmqKr8OI!4VC|q!DCP=dfL9nazg#oyW^g{NeK5d*yo|ECkDoA zntQ_k_T^y$F%++7ZZFRcq6{D9EFXT)`{k$vq&&@tPha9Z|9HA1$islU8DzR5%*4N_ z8DoCrp?%1ld;gLWA)zb^F16gFV+?4T=W$cQrr1dXp6_NT3tcXsDig~m+Mf7k+r5{4 zaX`hIACv4>RGTPbo0)OUixoJ^mShV!I=(iQq{0#r8*6Kg8efTj75)X$kJM+F@I}UK zUT6J1kRe_tI*P!@&+7A20@t?a1+@I_mNWPu$Azzu(L~LJHNe5}vc>A0`ZhB2cvGWA zAP_fmED`+(u^vqY)Gl~k>@lkPE7s{N6Gk9+#3TqD@!RmIUBVFrL?mpY%{jeTz>Ul# z^|pRz2~Co5>*5Vwzu~?e8!^P_RU&lWU@N*MZCSemvk~1wkWh+dwBQK1rWMVqOV=%v zyWG(np@yt5?UDMjtFamAdo2)+Vqc~!LSR~pYD@3!kZumq_zz#BBQJsnet2!E?A{p`ONN@Od?Dfmf(G#Pe1^hkYDJ<6WdPOs0PO-=ay0Yv^)oC(`N1#}Xo#K?6jVUH7&Y zt>ZCkQuHIG`z{jqel&L+J&lkI(N??MwcMzEWwuRE z#tsWN%OvjLGsAyYLoMu0+RDt)d<{d{D_#C6oX)J(P(ujhKG$ds$ObrB+81&6HEUZK z6)Ny?G2BiOc@i>AX>0zY@kpJT=631_c&}0lHb_-=K+_#y@r3`Ov!QI~A~_f2I%EKS zJ7emxmhe7MD5eWcXU=kDb2?}pJ=Z2ixgDeUC)Cr0Dgk+Xip@@Z?fW>sIrSfsd3iKl zA^E2N1so#Rx-^RoLChMgcI*of<7Fc`lfSl9eUto$^I#>M(!@{8#<(NM6gWSTnNcQu z#F(n@U$X%u^XVuNW%;9#eG{+J3*S0eDbzOK*JU^!PFZiL;WG-ghu)JglFs-`?gx{0 zg^83973w0~@wfomO4tTCaGoAw5ySDvK+M&*B-%3F6upN6405V@Led1c)Ncz<5n^(< zPiF~QP0i%xt-8+Ans2bE6p^{j(LYwh4VMN%1BUMdM~=lxfRktQVj7Tbz4GRfJ5l**g7#AP{Ll zg26O4>0syPBWE01LOh{Vto8R|+hzwxjMgaa+4w4znI|@APC}4r- z_j-3kt^W!$v8L)O)(h**BQ`G7xTp5~*cIxL$Cbd!4BDqq1iG#K=~9AS9TqsXP_zV8f-mKeyL`)N8UjxE6ah@%CHBRjiYY zIAN$UE$g3x=1lf{!xt1ZRgVcz_({~_;ON4pN2KG(x>7`@%V6tAaX}iC*iEyNuh{Z< zP4>B7WT23ha(YzwOadZ+-><%ECjXix zzBw)Eg|m*luIPw7+s=KKGbMdlsp~TSU^^P?aC>~6uI9b%gV=r|dD=H2?R4lgxOouS z4y$=lE(0JSD=3f=cism*huq^ph>fl#eruAl&Qgel9&OAcjRk)uVWD_8t)s7{ATQEaE!nr^~>g=%(4pS8+Bu z5=G_(yS2XmS6Y<<>th8PL}|H=vh_+`rF++VgzfHwF?)POOi$iLaTu-X)KFZf9*u?T z?)}Bqv}%JriAp(#fdiklAyZx}iL7=i?Yhzm53*70oyBuUiOR%j>5$SfU;Qg4%7qnxL^X zxI~FR3wjB?`nEIEh!feQ?EB5fxl_Xbb9SubMyRuRRP*Q$EOTP&drs zY?gCRJmNK?Yq1V9fe#~p=K6_U2~;O55mu0P&seC38CLy+tv~Wp-x`q#x&>ZM7-Q3m zW4eD3)V})uA3+zGj}#kgL19Tb|B00v_HN*#OGe|6oD8pH=^tCuMly2~^7SO4!t)?g zy6@oxDL?L3C~3=371z7zURp$Z`m{P~37?4aXWtd&fX+CBi)^dH8Pk?Ah@fkZ()!`) z1X0mZu{ka}gkauYDfc*`#Qvobr##rtiB{U4PY809iCvoe)JrGm!V8g_?{9}sjEN0= zJ7$GABFPts7amvF?Wxg3FuLN{L~fQO2Ji_C{H?Zr zfx1Mn>%xiT^c%1j?7^kj&;Ai(5oLtxTKUAKq*cME;>S!-zWFU`$Q5Qp+Y6LFt-d-* zI|Yi&1D5sozyVt6Io*3Y(QbMUH6Zt&!WxBbM#)2%rL4sK&J$q7R-gKw`9Ij&^fvO1 zmLCl&t)>E;}-txS})rj3hexI>&3B9Lw&HJr;em;dp7)6hFSx)BpE zDm*>V;wiw)K?Qc**$GE$ELU6tzKjmkL)s zu`UM#;VDh!tL`#Wf4#+=lYGrdZfsUx$JbO|r4oTwXfXkrg4`=3V%Izv8Kj(cTDGhf z58eQ=^xq<-^r*F|u>!{;_Em2^J)jAy{&s94JOl#v*YHux@HFrJNuq_KMj{cd4B!GI zyQy%L+U#GslYhy*L&uRMJLVUA4D-9ylSREQjKgNu|8-2>jqtiW+McHya%Ket_zkuj zm3$1|$Ks(pmrP4EkMi}jCyvG+`NH5bpZ;|q0Y&){coHggK%;uf=ygA9Gjg@-z<06? z?Ix4=xt4ntSo@+9oy6cjU5elzD4ubWSy9ggSr6AII%4s~kygw*$EfUf zCY$#?+H2MPU7Dk3nKJ$X=vT#P&+45%*v>a)aJH8>pmXiNa08~ z#uSsyGl@a`R z?7#3vi8E>bKryMKl1od9W2}*q@jjedoHB;K~H? z;h{>mbHAOQ?yR})5R|r$vm|$Ig$ZnnL#t*8KK5}E+@@Kv8^(9yG?EfT5PU1;lin{4 zLSUjS%c8>N;~$k2tlzG=zm*BM6jW_WNrM*T)#Gz|3{UBi_BFO`gWIeuu7`n7qJ7!+ zoB49j8dB$kFYl%CkKkQAyfMq60G`QsWATY0(n^Qq0MqRiwat%l{@qJH_O!Wp|2ASV zp~0AbnKEINW-+Zod_NkA)8QUU3OqKkv6{WUv7umiWmN6w4y`-SB|;BV;-wz#^2nX) z8^1f-0CkHspK8Awr2)AoVa;dY`-(cwmj-#a^qWnY@txS1{hgb=G>2m7{0c%!)H zbs=y!>P=MRpDu#0WZhGe0hmuS$N;@)mS|*4E~72mu{YNq`RZI2l38VG; z&9cBZbIPwX$TSBPKN?k&W?cZIRHxaR)9Ch}tUXnw4yeb9w9Y0HH=?rzg6qU0_kXX& zikwQyBcGjclOqS5tHEx2(=0Eyz4ua<7wa614t|PXbnX@1^gG5j4}b>F?#wG?KKQUL z>D3%@JC#ak1x48sS1KpB=kC_yJ!7{-np@4coMInIt48ux_LJd7=zo%8N)@-)@Mqn? zCB{TDMLie03HEs~sahW1-369Q7k;$b8PLlKc|e60hl^hIV7!27_os@JOqQ{&^m@It zOU+BY&Kd1I<4B8XNEOGQ1fxpfLKZXxc`gJZVp(4YLUrlS#O3!F;mrX!*VfOSO6<2k839IE#ij3B6`Z33p3V1pDGW)z||*)hzf4| z#5U$te~Ik~F3J|qKe`0sb25<>Dt`sQv!b)&q;?!Q=CJ{$~>URp+ihe!Y>>%EQdotYO8c58TuoTLmkppGqebiz(tbA&68SL)Ym2PBh`wyUU1P7^o-V&xxQ%&I z1`AGU3bH%$YNuf^IXQ&0!Tdb$I7(3)vaau0-GLq!gUc*E{8a*|5H8aRMZy`Da}U;Q zk0R&2Y^ra(MQ4s*7$;THElm5vNno7%&0ev8lfW&rjt8_-k;O9IROA2H67|DhXL*}G z{w0f8xhpks8E$F1j1p#xIWiXZjBfJ5^IaMxafGqW`53v4sVk*MQwPMFnyS<{K}hw@ zd!wvxfN`sRTTVb-x;RGfyoA$6%2mq}{0gGHfQRm0M9P^IUVT{HFSh1`bO^0`(macn znI;iM@Bp<>s_2kCwF=JY z&PSrL)N>3YjsOL_gI3lipNJ2XrOul1Wkw+f?&ac$$3qPNi@V_?<6r^H*bU;p$q=>> z1HvZjJge#_N_vj=kpT^tx;Xz>wFlov3AYhs1rZwWA82QuRk~@988N#nN1&YOzQHa3 zdf&p06k#ycU~rClDyg|%s8er0F|}lCP@)Z{v-VzuK5T>5<=`+i$=M^zg~GidW{KUn8}&u^k7nksXyJs5{jRl`tHwI4aOs5*?ya1z}BK`rSbW4Osuwk zzP!7-44zPQ)BpsgLeCxhiNT!Za_>?f*CXJ;7qQ8#<^K=Qzj13VkbE1||LKI?N#Dgz zIi7fmv}!*%-MEm8-?F^f%-T%3{*>l+mq_<~HYo0Jb>M|QdT}F~I?YSYm@dv}bQ}pW zFz@r0I@tf#7ANC?Q{`j!@F@?F3h&0m;LY#5vfPc>xE`I4U`@X&vyP-}ERVw+47+d* z8XbB2sGh5Hd+{W*%UR)T5f#vG^F8`sWtR@~8)B#C?QQLfjP@T}EoZ+c4rjM1ZjGme zhKp}sp}+!o1JpBW<=)Fa2TtF_-3=QXVIC2VH8rTfxH&^ZfK)EC-4LZ7vc)gt$j;&B zE7W-upbGnrXAHR~IRB-K{$r25*hy70%I93yIb>rw9p~$V7vkmchY;zmv47Gi0=ZcH zG-@I9F(-ru^j;GAl76&@L6#uC9|fE$kv7K>zz5BI7}0z>OgtK##ulGp-w47^V_Q0# zHuCjpJ4->8o$~7oo7i0@Y29LCCw36}NAQbYir!Nzr3tMo>~3U^4m{^j8Rl_h?YAiE zyhGp#8%ummkQ_Q>=-qTpeH?NTNV6{LaTjc#b_*38J~BuhbgN0&^W0t-1FuH~#hhC1 zMn{OGvG&h*`xt>KB~YjZM>i)`Nsmwxb)F$$rXEa__@Kz+APMRe*11R#?(&0!_hWIB<((OfR`TY$wQ>BK1O&x$6u zr8b-zG`lss-&MNJ83H{XmrzrxLCXP3zx616s_fK!t~=CSlew@PbyMR61D!a| zsLmv7%{S${Gp_IQYMXWX0v;SV?H^Pci3At^G0=Y~46@dK00jSvyO&zMzs`y%Yr2nYu6#9Y z*YTghKFZCC2=D}K1OtgE7My;a;3 zsO4r0%Bicd7q4#W5 zm+Xvgm|hj6LwVVX7t5zeMTOVcpsaPSw)$>PxMkvdb`kyB?G;aGW z#kZjiTUtq)U|p}b)@9@V#PZn`{gD1Td0g|}}0wF!xU=e0hy zHyG}uiLnRu=sn7Bh;pjoG`}Fdv!-#0vnYegTi{ni^fepVyOqj1R+r>Tla9W>4-K)UAhR`x5(nWp0izSdW$kHF1DHoWrOr2;ljn~@&VXv z!;to}?n^_JyK~TjZwrA8#KSX8dc!BRrSBlPOK7p;OZB1NX&jQUu>3+zhyr%DF>>dQ*Vj_VXb;zX( zIo_JSRUrlqS!=a;rg_c(E}I>bRiUcdPKf);b;qbq#k^^jYshOCbOAWZUqIUnQ9fUKBEK$&)hXv1&qdgv{1eugEQ%@G{a2OG^*wXuG3b~$Zv(2H3pToh28)cr zYev{el;D$^5k5A9V}#~b=H}vTS^YGz0)O@*21Z`O0#~2 zm-{BE`O_B3P}xZhi&+@oFrY>(h+qjmQno+gN&jLJZ+iW2Lp5>J!M6k+iEQybApKr0+#up4UogAeoM;JMHEB#{VVG0Br(~t z;o<{@RWdi)*lYX)(Ellg;`^QWQ~uKqQPcgOc8HPgMHiIdN%#N6MK-MwOPxW=U$AsA z6hD+|C?>Sb{SdlDXTqSEA{YPm#r(u&eVOc1!~;E`A@Ii?TO5jMV6eX7U#3^=(KbJw z)X$50!WpsTP5Ivi;dG`1(Q1Mn-iO$?#VW{dUm|NOB$ja@QE>~Ph#-mNJ6c1`0#bL8 zb^;qmJmr$E@2Yme^?T-@OSlKhpe$oGY@`wSL5|#*;yAw8FZzoULOkIY8Pjn%67i)} z{l}6W>Jt9?%jS@lTQ|d>jTNm#PJF*w6sHB{j<*k-*5Fdj_2a$#24~eH=jC%yHg2!H z2%3x{t~SIdKK>vjhfqvt_1%^QjO=(dj%>Ar?0bGs(A!D$zPa)z8Xsv49exN|7Z+O6 zQRUPUKl+X}fXYb+dctaPfkRo?R%^7-e7>~5?UUu~$KcRYt<>l?;Xf7hVW&f)4q zKa7KyF0KN+?^GbTpU)om1euxNw5TZ^rg5oVeTlQIZ`^tJb~Uv$0s|b;q%9c&;>51e z`c$)xs>`*eJ`x%qvrd4^9+drc4!I@NZrA_@HNnBsw{F1nVkF8z@ec`yiSEx14LDqy z(CcM#uu}3QSi0JRtqHAV&yzQlJ+KM6c}f*i3M;DrmHQUjI^~nLX$wx^x;4UaN$FVt z&Pmm5(XnW;jlm;3k}hAJAQFW@KVom}CA#H78GhaYQ}k-~GuPz3;-Eyv;kR5brwy=B zfR_?_yPB`{N!wjgu_j5w7@JOz2OZGu;OL@xz{Q;*pimlR!y=pCB{#+t5AL z32+inpvA+yi-KRXMGWNP^XPZAp}vroYP!sQG4E;+EVla%vHfn| z#|EaX2j^1(GZJzEND26^KBVP~l=+=c^IXE0_g9ivk#n7S?_xastD0^LpRc#-hdV_0 z{hMS2L%wspFLfHM`AppRfmS04pWmTbkzSqy*eo0l65!CmA7^`)_Abq90Nw2}UpR@b zjs1LC*2a%m*M;*QVbo&1a^QSx7#|CBpliq@NG4%{O(l;%KbSXgE&km>;pUK}O#LY7 z=hKdHY$ZPGW{US;_!1p?0+d#mw!Ifo*>HlFQT4NYKK3Xx=%8w}&z^)%lHyL5sH{iV zQlF5F9w7Upp0rr!ed-P?%I&>cfx#jb4Y6>zdKeJ+1*oh}oS%=)jNbSgBl(?h{7G(h z3mkpT=d=<%WB8h)flTzOV9O;(hB8mL9kMwruOktKJSwJobi9HGFIkOasl_Se(vrzI z%21W_{aWwfkgZ?Y-lndle*~}utqX+WJJEhlYJk+i+X|(+ms~(05=ByP)FJUvJ{*@t zA0UuB_vEcydLk(s6&v~4(`h))Cc`&Un_zxPiYeT<0))7e6|JO%vqVKb!nHdzRPQar^O`$jL}9z zcyBLNT;7G_UWPng87D7{$!@~X$jtVVDcw-^!O--QX8KHDsde0RJpzB=Dl8CGTWVX; z@z}4;-=5e^MKO4ZMW5ZX8QNt)v*|Dr=8Jk=B(Ie^;aW$=do+aqxqxHb|3JavGWra5 zNAiX(4t5fh7us?>RPI#)^Zbo>R|As>Q{=BSGd7fKNt-!1Ld6HSL-vHePI$O@TM*k}5G zq^A`yH&o-#vlckF*uR?APTJx>E?Zt)SPk|+!z72N4GVeicTyS;X^jnzEuc?(yP3P> zlN08Op$ef~naLYyYId-keedgm&K9%#VT>(izue;kL43eqt%)>!v%o88*#7LwDY|8_^$tgu9%gq<`i_mHPkDUVII;Qtw_RB+^7)5mC zCO)~}!GYsE*TkmRB3s_G>oXw8BYs`AY5sg;!geCmo1pHIex)aefr2!DvA#Du%ja7g zDT(m(jrA81Ja0NuFR4?npk#SW=Tgxj>f>|w4$m(>3*rxA$Nt%CR|a@Y`UTlmzk=`{ zlJa_LK&$aMpOzv$X4@i{)S2$*P^*VjVyi`d>`~!zR2>ac!W2?GYJ3BA%brp-`1T-|#o!YMKf7{>+eEb0 z!+5L{^jYlE?$bB4;fJkXuVVT8R&evJ-b|1^3j?y}_DNB@Qmhb~hIpqim9aAH5+>c)Y#7rpKts zZ(aG`MU5}{u>`^kuxaA4s219GVYtPiXZkO2mN)&g2(3*85 zekL#c#|vwqsPkYpbh%Qbr(NbNcExY?J_YDh8Z^w7#{*G;43yA?l`OC2K^dy!{v6SH zps#MZGnK;JmJV?nmvaY@bw2t4~(UEcWEs7l%MB_4?CHKX#Q!hWaXl6K6-TZ5uZ=I^?gOjBz|5}r&69B4}z_Tzld}wGy z?b%1(T+-Xdvq{HW@~S}iyt|ILpxD5FIltHoy%AD(m<*-9OrjE#=`&No~h z&a5kP)IFE8-cJKe!KPQ3n0;~FRcZ(VX?#sEJ~^5!nlJ&J+T5a$c0~u#e_WY-)+gKH z{YZ+(zrN=FbSpBh_D&uHpO)o?oyPXOrEcIeidyH1wR_`PpE?ueekOtlSq zD?*Um=U0_Q!WP0HmHr;0IDH($;EznIm)1y;*Uw?!R{{+X&=}A!?<5WRid`iRdZFm& zs+(j3E{U~_ZlOKp%p!IEGLh?yA26aZ^Jr+5KW(&=>FBfk$ha?qWhR^MGpKL!pMKr$ z7H_L>>D%j8#_jS#U#seThtMgb!S$HNFps~D>K6`Rg~$(tc6ZusJL_?`wc1}j9}iJbfaut)2sEk--+ z=k|Oi_(_-E>o54eBAm;1UF`l>4h<9QdG{*Q0M!SAvrL_28w3V~MH0;eJIaZKp0v29 z&8L=>zYp8bg30W+;VutS>>Hof2%kjM&0lXT{-?6DY-;oS+IEp5#jUsnEAH+Nh2lYq z6{p2DxVr_~QrtbbyA>}E#e;i+;BI$*|C#3lJTLPmbIoM!wXeO`ah$Pjy|bz4_Md;~ z1pC@Ph0t7v#4(dWv8uN^bN0A5tHRbFqD*Fu_d=(_{>f6i@-qE|xqU7m}YZ4OB1(@)vk7l|OabbzUsXrP!g8cW9xZUgV;}&7FRAIGETMG4Qrc z@Rcqr_DE;dhCVJEZw@J!9ZD-9!<^q9?2uEH5}_nyZc=qL*#3$=-TZa@m4Q!GdR6EU zY4dEtdO*_8qNu$V?|#l`$KVS~E7m$SZ(prQt+X8QNkep)GiGJi&2G26{L^daEZycT z0URxTOx4*qHtXF&(amaJTQ(5U6R32nkJcNKyGF-aYJi~Z%rhUGfjt8)G*Wh_n4ye| zZ5=c1H%zCvqvVahp$pm$Mk{QSf!7M&wdCw*V-Q<*DX?w&RYHw_PTRtKRD@@Cq(zii zt?u$wxSHhJF;K4=a%UZOB&L8KibDYJ`ufhIDyMtYH6BaRStSfkIWkwNG4l&t(SxYJ zQLQF`xN_vv{wyCaZ3~tpf^SRmTuiH&-0V_x6!%xw6rX|$3ILYPtUHWr;FSzYX&~@n6+zWdPH@-*=_8Us~~T#}hK z@N*_gzQ2b^ zx|WPHq$4pL3fGtHr0?`l)0fV5ivu;yzAajO`U^J&U;4RYQ!5P{U%oY7%`9|Wu8HJR z(-Gzn&XqZwD{+HyJ~^i-OJi@j#631hjddE()`yHmt57~FEo>vS_;W7Y*7Lb0Gi9*k zn6ht=38plebV1|SSqhBbd_+mA=y(3l&F!asQT$W6(CKy$(Z6pXFhQ1N`5w<2w)15j z9odK{jC;(c*JjE&C%`l3cN!Le0fv>-!1OqU$tH*A(E5nr5y;}!?O8NyN;W%q`bp56 zU+>u<+@g=_-?kcDzZ^l_9i(>K7B!WJfCp&eEp4?|?8Okn+GS zT>@*2p4(bjyL&ZTzw2()&PmDGE;^QvR2&Le553=b5P>jB87lho$Vtn?l+lg~B_KxR z%i|BP(Tzs6D+sv*OVrpQ)s;aY6pdCX&P7H}O~-L`b=Z(tWHAO; z0SjPieNi@v_gnd&TD;`@&{MWwPoa7#qokn6N#O3+-VD&~dsROYw)rA29lkd|5CR?R z(!8zwUPV>1syn!rUoQL%UwMJqj8@iI;X5^}PCrMF*a9p@%>$|C_N5b0vDn?GY5L>x`NI|S=4kN8UhC-(u8?-#qWPk>GvY!hTsQ3>$elCyZ1wiFoM=6^zt53mj_xLUqaDdBI*2;&ndxKcbao6CnTY5RGuXUdiv!~|-l`MzV}lS+#5;zJ-IVr*U%&TC*u|K0^ zf}G~(aVqCOX;9?~8S=Gm%SmpysOc;p#n2FFL+_-qi6T)j0v^vhY97Ye|3{p8cC3X>-o}cd&TX9WRcHtwxh2 ze~ZRP9M;=crn5a~#rp#|M{`;hwOYO^_%dziUVV)v8hoIco9tODo1YCDXl76J4dkHs zJH|uIgFy6&y2vc=8okc|G~xbf@a2A>Qf0xX(+!2Zk80(AxF-L5UPvX<_+d%5TpcgA zqP!(O(--EWZ=7YKE7&jl`82e-2$#Mk=OIx_xb!V}a)k5)RX6E|4p?EoHZNl`{meSf zElFi(<2+}sSK$)C+7E*{(ZZzzZS(WOzBm*IT!+MBAvn<76h#vfH7~=|ALqjKjTES@TEU0!YgbU~Rgb@O-&BENqDu=QN24{s@~={eReNU6E4=aQBoJ zzI1Eo9rrzlHmvAQ_pT8W)21an2KAa{)?y|A9Zk|x!u5+hSi>x1juF3}FZ0^t#hMmC zTk?1Rl`0FN@ZeBs%}*q{pq``Yw4`h#`F;oqqW#>abcnmkW{@bmgRNqTdHaKWnv?Tp zi<)ShZ8BM%r9)_kR^mH7aE)~=mmKIomZ5(N1(F&ceUC9UU;u$?qz?Ai4JImr~!Cdmrn(% zu2>#`Z36A`a6A`pZ(gfeo5zzY_B*8_>1=Lqr=v;|31BZsE;nqdPa^0o;xzjW-b08E z47SBlhPGEj5|*c#eE35?#=v_aeN<$j4l4(6aT zV&CYNpDouTGUyjCi1~8B)6iSIvL3A4GX(q={PrkFOw>mrUyHOm+EBK(61DJlxa}Y= zvpU3;AD4pDx3XhMir#gllFyHPVBc%u< zIh0{{Q4)H6;%620zIBt{BW8Mru zG0#P>RK_T=)G^~KMfa>G_|GE;A7Ur_*1uo1drGF38p_<^QW3E@9}!zoGx1zVLFb-K zHy13$*N*>Fr3O2qj}Vp>9|2}$wy{d$r|=3x*aCzS9Hh&~kS6BdE*riDGG1ni$r0zd ze<-U=Y|AifYm)6-Eo)bqks@)j6Y`?jS8~tbvd~zfuO)y#p99oYEMYrOrVxuTKXkq% zdQ(gA)Yawm!!m77y7@&vM%W&6u2*-YH5aIZ)MSx_SwZiSXxb0S_J@S_V~BL@<_2(% zkVMalhm5@ruCOY%ErBRG&ASCDhgov5@S83%31k&=CA#qzFb$Y8gbK&gCJP?ur+n6j zScb?LO@*dr>ImjWXMO>#pm{~#CnB(8MdF)1=^*bI3CS#>z+%>Byr~@v=zrpf04!XI zpK=_lh{0lECh@VP*o-AD@r)(1tWldjgD#k zK}+*BMMMV%5!m_}!(R44G`0JHo9DMNDUT~4yOG|iC?^WL^W<wRcpCpbZ&tw1EYJ#W=mWT|tW?TCwVq%jHJ zZ5KS~_XlicZ_m(Ff<)Eoc0DP3&ouUna3KCA;!{(JC%^V#j@Ed2+jk$ox}cAWLT!99 zV{S6`Sf5E!pbkhSHnkJeY92(9pB&KAeQ|F$V7FrHKUySb>|r7t@3da`;o2QjYJ2-E zl0eP3xq|9^GmNV)WVzfw{1vw%PvvYEccJ>Gs@sPY-40sv7BDqlLtb5#lERvjv2ldvj|V@y1W4oI5q?6foBgkH zDYz9~77WRA3H2zcWY}o{$s6S{99wPto*QrJpn(T6L<$P;6w-mrj^)^~3HEc<$@Ar# zu1BGaCecg=ii9d_l>Il^PFOmd`>1Qb~ z3pRU>UF}qc89%|)49*uDH*dNz!cq)(u{KS#VmiE! z#z$qe#Kt&K+kHG1Um>#0Piq}wKO=`$y!#L z5HC`{Dt5pvJN{Q(&OJYw_BM@;#1qD%k!V==&6QG%_nQBu7E_mv2C|uqtcIY9)u)?u z5sMY0Mfw`Z@GsxhOV(Q6|9WU)zdO;8+(8lH8ZB*5YiQ-W8*FdZMDP=0dd^~WiJm*= zt~Pt=;N_Vj$9W{M!4vHQ`Or;(Pp*_=$=ODDBkM23%0^kMV@2tJ)=W7|KG%?Vh5QM~ zBj=eVy1zJvbxCgG*l|2zr?j|uzMwrE+R?2008;tk7UB*)p8qn#u z=9A~A=hKKPEt8%IZ3FM0FhuPIo5z8pGu~ccK>W+co^5~{Mp86<2`P0( zkvNV<88MPGlZyLr@fBNZEM3R{2{76PN$I^S*wI1vKvJB^%mra3@5vK!Vs5)Qz zKxPI(x%&x-3=2WCh;6hn0N5$<-9|5mWU`)a+#XO$g{YP!XE_8PCpi{~AdTh5f5M_O z<*Dk791XVh6f68NJVB?GLtFtlxQp?<{#8(I>vpkiIj+uXRt@t;heRS#P;ovb%XpWa zdJ30Pu{=JH(2$iHQWjNI824mVzTe)$?T-J1fKX*KY)`TS+`t;+iqol z;mIsxo+5kX)byt1*akU;m|zZjd(20OcPS>rCAaW{cUnf6CRwPR2=+ewrIo2y7W-VF zFS7;ROoKQKJNuVuOz-*04Uk0oNVFL3Gs&2L8Gh=7oE5ir@4{k%K!#b1l0~(2u0FP# zRfF1t6rL-;Vpogd0$)qGX)Z3?W*d+_errbl^3cWsSyyFJ2TWMw9<}NIV~* ziQ$ZD{ys+0Bst9nhuif+JFy#Wo^0?g5Y3&j?IsouNsM;Z`kKNO1HtFxceUu1Kd~|^xTRhcc#Vb;5k~SsfFAZZxddhLo0G} z3XQ~ZBoc-(SrF)~O4ph4UCZZ*ti_wXrY-J2jmOlhiis*74U^XGDF#o|;_$zr)y@`6 zw8?qBzn>RP5rBm6&UEAVOJscQxOX$9`!*+qW8l9X zl5jG0%>Q@h5l6c*)llUKpMT03e3@C%=U3Jh3r`HSr_QCkk)ozTX`djmh^}FCtR8ZG zR+$m09T<-8{%9MBmt0E=zEEQ%PkR|m(%sfdJkBxhXuIFF-ULoL!_6H$65z-;kZK!_ z1a9-*N96h_x>)h%1)TQ=r(d)K=^~jl86(6e6q79lPbq~DD9ExIoj{%4*L6b|9@16X zgV|}qEQj`+s8Tw1M}1<0ERG>reUIxh_04_#vknTAnY0br@_k|l!!<*`sA<3Lsk}U! z@UkCvjE;sa=?oLjDp?*_9iegDH>;F$iE8D6Z{!H^dd(}?mLaV<>&K5c>C$U%^qoD!jRaumQ)@)(XA$Dkz zT(^4T&z~#)WDnRDwi%qp4jcf1$$LhSF%#wxlY8}q2Bw6@OQdw8t9G^Y4RJ+9nT-Y6 z%O1u2;bw>3cGDkqMzqvv&h@`@i&zkP*uSDfu@mtLPZfv!NzXCNJ2|zh{CQRV-Lr3M z;Wdz@&=+h5z76>Bb5e|ZO-N`7ZT+cNYk$-(xkE(rpgMI)(I^#%)(-tkDB*fTcPXhp zWvU_bZ*%x?;7BvzwN;Zn+nzX3P)`);;cv~*ed&MKE?5$&W8?2sBO-h#U z`lPO986`baZ#KaN@%x(30G!*1gOlu~1wG&M#uUBg056Bpl^@=B7zSsPAADrwOX|h? z%==Mq`?o=+f0Y>lA=aeN0Z!%Q&y!LnBGVHP0*irT+o@UU`+OPh<7o+Ou`%iv#h|kW z>rLj=$8^D8fw7SK=;7J-+X&I~J{6!$SfdwNi{UOzQ2vT2(#g=~yxi>SjQ?|_q{{q5 z`(FbAoJULXX$^7Sl_^O-myl|I3$tfb>NGU(2xK?Z=KSN7f(@{Lx>MY=MO;G7F>K!z z-Dcl#iD@YGCE^3Y!`V6p4%P_b5{`a{7g`dB{IWl$iz2o|JuJUm%mzTSZ+2dV>L zQEWu8EL9JzW@1$jlXLDdjKH{?Wk)I-;(;o8Q~5Pi!XcEv5Ii zLotVmMB92J%aVoMyogWLL7dnwcfP$mi?8L{hk_~P7fPHD^@5_@hHP|J4WZt}On=i9 zQ%CstnpM(7#dutJlU61^2=elpvTXWQxmp2+<3lTr_E$E4ROWJGg9^c%@OMmNBHc<{ zU%|z;JF8#oEw%{2c-Eoc7O|@&k5f)`zriTO=`BbF2evcx?_>0iIKAimc}4GL zC#NJXniI{}M4=wMb|iedDvu{l?4W@C2L*l}13n4ks6cvT6Er}i ztbj9P%Qkq}1%O40*57v0tMK*wI{r_$dkv)k{N=2RD>8_LDw5s?hpI>D|x;Z$>2{`J`HFhlE-+GF!3m0x2SB_RP1;euJ+`AzN7xNlP#=50gBf~U_7@K{1mI2 z%l-*CsG7lZe?0Vc%nxxy7ubgq_LWae`ohJ+c_|(vIpgQvEO`-UXFKqDq-cE2M|)>m zzL*-%68p9`aaS7UeRc*GKG#jPo!trmsL6+%57dvN1a;4YFHWv`UX~7PJMQ~!&v`9c zGe<(GfF#s6E!3dzZ|fvA`9y;)nQSTiLg$wuS5BF4t$Uh#)CWugKj!wJ13Z+|pW#7e z^IqR_I?SjjS)ji8b#ONdmf}3o-ZU98-y1XE%b4fkvuLpn{y-rujITk4K-N1!D&I^F z2X}VEH>}(xtMXATDh5|O(Fs|QK{`!DR^1N#C|^e$_Z0Zdfx`AJE6KSJY7yNoHOZ#p z+*+C0Y`^QJwY_hiW#DU7{6@s;)ZvIdz*H~r&gZx~PI>-;B+u*Jh?*!*bw2wDO8D&< zis|h{r$JM8TD5)7v*=M{=h%6>m8Skx;#@my@P{oN zedyIcBCmG;^A9>O{?BgRP`{gm>^`q)nAc!Rc55tlI8Sw_CQ9AfAR%hYen|OCA`+(a zgl=SidR`qiAW#GpGdr~@r(DyH_FVyW_>VS+bq!R?fz%R7suiHW@xz{ z8CIvW51-`xL?0(B#qw+|BTTtYii*ml&&Ra*-%ZV$i@9Kl?{#I1i$fnj1<|;AOjut6 zg}a$irou3fbij@gEUR6u`zQr;Mlk=ATDe`mlpPbqRV{iLog(BP2|V{2AFmSLeT!x% z;Z{TE*7G zKtgA&5zq=-5h;Mal$0cS zR=;iPz&p#?@PR*1kXBO`ZKU*9etEeQ~p5yf^s1rwyc+V@2!>n4x#3^on?vQ1R;{;!(WHM2(v8>E>0~5u=l*uyjU@q z0%VeEVryW-eGNHWQ07XoAFmroP^W$4C<;n=9t*ihNb9PdNY7cj^j(A(`FcgoePUj9> zdPOl%N)P2ev>9A5M;(aUkGf68?n!gQC(&)6yz+2s6er7&7QHkT=k@ zAv#aC?b@|MG z^jL1#?NMpCTavL<2H;9ASBLzgA1s8Zeo+NTZ~!b4z#CdL$as|leV<&2rR+s9rnq%f zpiRyOAiutW1s4O*G_ZdG@}d0w_6%fHH1??TDDZjgPM!K2(Q9K{mezFk-yxGhT1sp` z5+&=+J{6R)wQnqC2r(nus{Sv0+nn~AgGglM%nf9-B#N2}e><&ezl8`obK*`0VG>(y zY~Yt=VPZb%07yc|V|lcEi7*>)|V$PmwLm zbN=#(XnC+0j572(e}1m50%Ul<`{n9eN1T9T826Ck_}2{I*~|#(O)p0^#{8It5^pFF zb&?q-ai=CnK|F1gD3hD_k3kJHU7j{SUn^007#_v0QO<9CFrq5_?qTZMRT<7-YHpJ) zT8+_pVKgI(^@+=(wal$=_oA4hN}w+#fJt(Z3=3aOpeXlQEDx_v`w1?9h+wX`m+ivb z+)ZTY>t}c9n-fNE`Jm+73>4iCCV59~xd%SAO!1LyGDEF>;QIW|_rDAl-l`WA1Vg57 z^xneX7r@l_5@Qrcs`J8^7UDb$-WeK4m+XtJ3&KZ7S6+^$NZ>!&X`PU!KTz#P_GAY} zUxpQ}?a#t%EUg?CV!M87FVE7`zVtM28Ws)SA`7AtOq~2ECaAz`qv_?F~C3qSUCUM{7shv@F07{vSkdl;zdrs${-|{2xkbS>yl! literal 0 HcmV?d00001 diff --git a/docs/source/autogen/index.rst b/docs/source/autogen/index.rst new file mode 100644 index 0000000..300e950 --- /dev/null +++ b/docs/source/autogen/index.rst @@ -0,0 +1,62 @@ +:orphan: + +=========== +Quick Start +=========== + + + +.. raw:: html + +
+ +.. thumbnail-parent-div-open + +.. raw:: html + +
+ +.. only:: html + + .. image:: /autogen/images/thumb/sphx_glr_how_to_thumb.png + :alt: + + :ref:`sphx_glr_autogen_how_to.py` + +.. raw:: html + +
How to guides
+
+ + +.. thumbnail-parent-div-close + +.. raw:: html + +
+ + +.. toctree:: + :hidden: + + /autogen/how_to + + +.. only:: html + + .. container:: sphx-glr-footer sphx-glr-footer-gallery + + .. container:: sphx-glr-download sphx-glr-download-python + + :download:`Download all examples in Python source code: autogen_python.zip ` + + .. container:: sphx-glr-download sphx-glr-download-jupyter + + :download:`Download all examples in Jupyter notebooks: autogen_jupyter.zip ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/docs/source/autogen/quick_start.codeobj.json b/docs/source/autogen/quick_start.codeobj.json new file mode 100644 index 0000000..2dd842f --- /dev/null +++ b/docs/source/autogen/quick_start.codeobj.json @@ -0,0 +1,263 @@ +{ + "PythonJob": [ + { + "is_class": true, + "is_explicit": false, + "module": "aiida_pythonjob.calculations.pythonjob", + "module_short": "aiida_pythonjob", + "name": "PythonJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida_pythonjob.calculations", + "module_short": "aiida_pythonjob", + "name": "PythonJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida_pythonjob", + "module_short": "aiida_pythonjob", + "name": "PythonJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida.engine.processes.calcjobs.calcjob", + "module_short": "aiida.engine", + "name": "CalcJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida.engine.processes.calcjobs", + "module_short": "aiida.engine", + "name": "CalcJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida.engine.processes", + "module_short": "aiida.engine", + "name": "CalcJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida.engine", + "module_short": "aiida.engine", + "name": "CalcJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "CalcJob" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida.engine.processes.process", + "module_short": "aiida.engine", + "name": "Process" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida.engine.processes", + "module_short": "aiida.engine", + "name": "Process" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida.engine", + "module_short": "aiida.engine", + "name": "Process" + }, + { + "is_class": true, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "Process" + }, + { + "is_class": true, + "is_explicit": false, + "module": "plumpy.processes", + "module_short": "plumpy", + "name": "Process" + }, + { + "is_class": true, + "is_explicit": false, + "module": "plumpy", + "module_short": "plumpy", + "name": "Process" + }, + { + "is_class": true, + "is_explicit": false, + "module": "plumpy.base.state_machine", + "module_short": "plumpy.base", + "name": "StateMachine" + }, + { + "is_class": true, + "is_explicit": false, + "module": "plumpy.base", + "module_short": "plumpy.base", + "name": "StateMachine" + }, + { + "is_class": true, + "is_explicit": false, + "module": "plumpy", + "module_short": "plumpy", + "name": "StateMachine" + }, + { + "is_class": true, + "is_explicit": false, + "module": "plumpy.persistence", + "module_short": "plumpy", + "name": "Savable" + }, + { + "is_class": true, + "is_explicit": false, + "module": "plumpy", + "module_short": "plumpy", + "name": "Savable" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida_pythonjob", + "module_short": "aiida_pythonjob", + "name": "PythonJob" + } + ], + "add": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "function" + } + ], + "inputs": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "dict" + } + ], + "load_profile": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "function" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "load_profile" + } + ], + "node": [ + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation.calcjob", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process.calculation", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes.process", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm.nodes", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.orm", + "module_short": "aiida.orm", + "name": "CalcJobNode" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida", + "module_short": "aiida", + "name": "CalcJobNode" + } + ], + "prepare_pythonjob_inputs": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "function" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida_pythonjob", + "module_short": "aiida_pythonjob", + "name": "prepare_pythonjob_inputs" + } + ], + "result": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "dict" + } + ], + "run_get_node": [ + { + "is_class": false, + "is_explicit": false, + "module": "builtins", + "module_short": "builtins", + "name": "function" + }, + { + "is_class": false, + "is_explicit": false, + "module": "aiida.engine", + "module_short": "aiida.engine", + "name": "run_get_node" + } + ] +} \ No newline at end of file diff --git a/docs/source/autogen/quick_start.ipynb b/docs/source/autogen/quick_start.ipynb new file mode 100644 index 0000000..49d4d25 --- /dev/null +++ b/docs/source/autogen/quick_start.ipynb @@ -0,0 +1,75 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n# Quick Start\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Introduction\n\nTo run this tutorial, you need to install ``aiida-pythonjob``. Open a\nterminal and run:\n\n.. code:: console\n\n pip install aiida-pythonjob\n\nLoad the AiiDA profile.\n\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from aiida import load_profile\n\nload_profile()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## First example\n\nSuppose we want to calculate ``x + y`` on a remote computer. We can\n\n\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "collapsed": false + }, + "outputs": [], + "source": [ + "from aiida_pythonjob import prepare_pythonjob_inputs, PythonJob\nfrom aiida.engine import run_get_node\n\n\ndef add(x, y):\n return x + y\n\n\ninputs = prepare_pythonjob_inputs(add,\n function_inputs={\"x\": 1, \"y\": 2},\n function_outputs=[{\"name\": \"add\"}],\n computer=\"localhost\",\n )\nresult, node = run_get_node(PythonJob, inputs=inputs)\nprint(\"result: \", result)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## What\u2019s Next\n+-----------------------------------------+------------------------------------------------------+\n| [HowTo](../howto/index.rst)_ | Advanced topics and tips, e.g flow control using |\n| | ``if``, ``for``, ``while`` and ``context``. |\n| | |\n+-----------------------------------------+------------------------------------------------------+\n| [Tutorials](../tutorial/index.rst)_ | Real-world examples in computational materials |\n| | science and more. |\n| | |\n+-----------------------------------------+------------------------------------------------------+\n\n\n\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.0" + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} \ No newline at end of file diff --git a/docs/source/autogen/quick_start.py b/docs/source/autogen/quick_start.py new file mode 100644 index 0000000..ae068de --- /dev/null +++ b/docs/source/autogen/quick_start.py @@ -0,0 +1,66 @@ +""" +Quick Start +=========== + +""" + + +###################################################################### +# Introduction +# ------------ +# +# To run this tutorial, you need to install ``aiida-pythonjob``. Open a +# terminal and run: +# +# .. code:: console +# +# pip install aiida-pythonjob +# +# Load the AiiDA profile. +# + +from aiida import load_profile + +load_profile() + + +###################################################################### +# First example +# -------------- +# +# Suppose we want to calculate ``x + y`` on a remote computer. We can +# +from aiida_pythonjob import prepare_pythonjob_inputs, PythonJob +from aiida.engine import run_get_node + + +def add(x, y): + return x + y + + +inputs = prepare_pythonjob_inputs(add, + function_inputs={"x": 1, "y": 2}, + function_outputs=[{"name": "add"}], + computer="localhost", + ) +result, node = run_get_node(PythonJob, inputs=inputs) +print("result: ", result) + + + + + +###################################################################### +# What’s Next +# ----------- +# +-----------------------------------------+------------------------------------------------------+ +# | `HowTo <../howto/index.rst>`__ | Advanced topics and tips, e.g flow control using | +# | | ``if``, ``for``, ``while`` and ``context``. | +# | | | +# +-----------------------------------------+------------------------------------------------------+ +# | `Tutorials <../tutorial/index.rst>`__ | Real-world examples in computational materials | +# | | science and more. | +# | | | +# +-----------------------------------------+------------------------------------------------------+ +# +# diff --git a/docs/source/autogen/quick_start.py.md5 b/docs/source/autogen/quick_start.py.md5 new file mode 100644 index 0000000..f416735 --- /dev/null +++ b/docs/source/autogen/quick_start.py.md5 @@ -0,0 +1 @@ +bddfd4d8be857f5fd010142682258625 \ No newline at end of file diff --git a/docs/source/autogen/quick_start.rst b/docs/source/autogen/quick_start.rst new file mode 100644 index 0000000..62e7e47 --- /dev/null +++ b/docs/source/autogen/quick_start.rst @@ -0,0 +1,152 @@ + +.. DO NOT EDIT. +.. THIS FILE WAS AUTOMATICALLY GENERATED BY SPHINX-GALLERY. +.. TO MAKE CHANGES, EDIT THE SOURCE PYTHON FILE: +.. "autogen/quick_start.py" +.. LINE NUMBERS ARE GIVEN BELOW. + +.. only:: html + + .. note:: + :class: sphx-glr-download-link-note + + :ref:`Go to the end ` + to download the full example code. + +.. rst-class:: sphx-glr-example-title + +.. _sphx_glr_autogen_quick_start.py: + + +Quick Start +=========== + +.. GENERATED FROM PYTHON SOURCE LINES 9-21 + +Introduction +------------ + +To run this tutorial, you need to install ``aiida-pythonjob``. Open a +terminal and run: + +.. code:: console + + pip install aiida-pythonjob + +Load the AiiDA profile. + + +.. GENERATED FROM PYTHON SOURCE LINES 21-27 + +.. code-block:: Python + + + from aiida import load_profile + + load_profile() + + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + + Profile + + + +.. GENERATED FROM PYTHON SOURCE LINES 28-33 + +First example +-------------- + +Suppose we want to calculate ``x + y`` on a remote computer. We can + + +.. GENERATED FROM PYTHON SOURCE LINES 33-53 + +.. code-block:: Python + + from aiida_pythonjob import prepare_pythonjob_inputs, PythonJob + from aiida.engine import run_get_node + + + def add(x, y): + return x + y + + + inputs = prepare_pythonjob_inputs(add, + function_inputs={"x": 1, "y": 2}, + function_outputs=[{"name": "add"}], + computer="localhost", + ) + result, node = run_get_node(PythonJob, inputs=inputs) + print("result: ", result) + + + + + + + + + +.. rst-class:: sphx-glr-script-out + + .. code-block:: none + + result: {'remote_folder': , 'retrieved': , 'add': } + + + + +.. GENERATED FROM PYTHON SOURCE LINES 54-67 + +What’s Next +----------- ++-----------------------------------------+------------------------------------------------------+ +| `HowTo <../howto/index.rst>`__ | Advanced topics and tips, e.g flow control using | +| | ``if``, ``for``, ``while`` and ``context``. | +| | | ++-----------------------------------------+------------------------------------------------------+ +| `Tutorials <../tutorial/index.rst>`__ | Real-world examples in computational materials | +| | science and more. | +| | | ++-----------------------------------------+------------------------------------------------------+ + + + + +.. rst-class:: sphx-glr-timing + + **Total running time of the script:** (0 minutes 3.881 seconds) + + +.. _sphx_glr_download_autogen_quick_start.py: + +.. only:: html + + .. container:: sphx-glr-footer sphx-glr-footer-example + + .. container:: sphx-glr-download sphx-glr-download-jupyter + + :download:`Download Jupyter notebook: quick_start.ipynb ` + + .. container:: sphx-glr-download sphx-glr-download-python + + :download:`Download Python source code: quick_start.py ` + + .. container:: sphx-glr-download sphx-glr-download-zip + + :download:`Download zipped: quick_start.zip ` + + +.. only:: html + + .. rst-class:: sphx-glr-signature + + `Gallery generated by Sphinx-Gallery `_ diff --git a/docs/source/autogen/quick_start.zip b/docs/source/autogen/quick_start.zip new file mode 100644 index 0000000000000000000000000000000000000000..b1ea08e071dacd3cfb85d57f587aba9106cb79f4 GIT binary patch literal 5353 zcmds*-EQPG6oAVvAQU7dq#kHQlmuDIiNuQpa66@*HcNqoz0xHagDcT8gttzL7?&IFAfh z5*~M2#o2_47mDay5;Nu*F}YEiQ!yYjl@rNWOpGF2!nlPXVMsZT=}tN`Vb2tbNfPIsWj-nh>9o|)G+YilsqJ}FeD0`Ml?$l)J7^vb8w$WUcv)~D79WOsJ&giY^qsG zHES9IEtnOsX^ z2MRq3-Q)-&aJ|_rkRMn{lwMwjxwLbz9PQDiJ9Q7q{(!hMxITKlk`9#{eC4ClOIOl_ zfdUtX;l4iWqowlqkt-C$W~?&f_L19mG|O^f1_ZqaE@?Y+t9JeX=CvJMaoKiK&869L z3t<@P0DO#kaB4zX%RK~qm&=$d}0Iox&i<`+95(z9p%u`{ACcs^o%IdPJky>z-8#L12)BSs1 z7n2udUe9b?sw}?{nmPG_QL!^oTEtbB%V0K_8H(a31QL7#sSLjoS^2uFx+{w~gTUJW zBucZE=a)NhRU^wa-OzGZF@X$!Qscriz=c2l{_&r8A8%~n-=|k_fu}QhehC@YjhGM_ zcjj6 zLiF7k-T@=7q^N;J-%=v#@R1XJ1jRn8YEtwYRP^0mqT9Z_6)pO*z(v32MKD+a3&@oZ}mAGg;4k7%K9xh*@cW~1+DI{oH%7L6Jl zZA2Eq(FWzV*tnXfZbyx)+30w=@53EM_21o>zqnwT`OcrMkO+X8Ov3=&tG&|MC}GlMfdGKBJ<1nod9oS5d*JQwdwZq7;AO1Oz!Yps zVK)T^KEt8c6#<;2Db%{>ygqsI-n|F+m!B-+i6fS`Z}5NjoUtu?-8KUyu)KY<3+I$= WS-a^tdGf)B*nAIuF5&u<9mRj>az!%$ literal 0 HcmV?d00001 diff --git a/docs/source/autogen/sg_execution_times.rst b/docs/source/autogen/sg_execution_times.rst new file mode 100644 index 0000000..81cf58c --- /dev/null +++ b/docs/source/autogen/sg_execution_times.rst @@ -0,0 +1,37 @@ + +:orphan: + +.. _sphx_glr_autogen_sg_execution_times: + + +Computation times +================= +**00:22.087** total execution time for 1 file **from autogen**: + +.. container:: + + .. raw:: html + + + + + + + + .. list-table:: + :header-rows: 1 + :class: table table-striped sg-datatable + + * - Example + - Time + - Mem (MB) + * - :ref:`sphx_glr_autogen_how_to.py` (``how_to.py``) + - 00:22.087 + - 0.0 diff --git a/docs/source/conf.py b/docs/source/conf.py new file mode 100644 index 0000000..292bdf5 --- /dev/null +++ b/docs/source/conf.py @@ -0,0 +1,174 @@ +# Configuration file for the Sphinx documentation builder. +# +# This file only contains a selection of the most common options. For a full +# list see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Path setup -------------------------------------------------------------- + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +import os +import sys +import shutil +from pathlib import Path + + +sys.path.insert(0, os.path.abspath("../..")) + + +# -- Project information ----------------------------------------------------- + +project = "AiiDA PythonJob" +copyright = "2024, Xing Wang" +author = "Xing Wang" + +# version = "" + +# The master toctree document. +master_doc = "index" + + +# -- General configuration --------------------------------------------------- + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx_rtd_theme", + "sphinx.ext.autodoc", + "sphinx.ext.coverage", + "sphinx.ext.napoleon", + "sphinx.ext.viewcode", + "nbsphinx", + "sphinx_gallery.gen_gallery", +] + + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +gallery_src_relative_dir = ( + "../gallery" # relative path of the gallery src wrt. sphinx src +) +sphinx_src_autogen_dirs = [ + "autogen", +] +# we mimik the structure in the sphinx src directory in the gallery src directory + +# path of python scripts that should be executed +gallery_src_dirs = [ + os.path.join(gallery_src_relative_dir, autogen_dir) + for autogen_dir in sphinx_src_autogen_dirs +] +sphinx_gallery_conf = { + "filename_pattern": "/*", + "examples_dirs": gallery_src_dirs, # in sphinx-gallery doc referred as gallery source + "gallery_dirs": sphinx_src_autogen_dirs, # path to where to gallery puts generated files +} + +exclude_patterns = [] +# ignore in the autogenerated ipynb files to surpress warning +exclude_patterns.extend( + [ + os.path.join(sphinx_src_autogen_dir, "*ipynb") + for sphinx_src_autogen_dir in sphinx_src_autogen_dirs + ] +) + +# -- Options for HTML output ------------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +# html_theme = 'alabaste +html_theme = "furo" + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ["_static"] +html_css_files = [ + "css/theme.css", +] + +html_theme_options = { + "source_repository": "https://github.com/aiidateam/aiida-pythonjob/", + "source_branch": "main", + "source_directory": "docs/source", + "footer_icons": [ + { + "name": "GitHub", + "url": "https://github.com/aiidateam/aiida-pythonjob", + "html": """ + + + + """, + "class": "", + }, + ], + # "announcement": "Important announcement!", +} + +# pygments_style = "colorful" +# pygments_dark_style = "monokai" + + +# Function to copy HTML files +def copy_html_files(app, exception): + """ + Copy all .html files from source to build directory, maintaining the directory structure. + """ + copy_print_info = "Copying HTML files to build directory" + print() + print(copy_print_info) + print(len(copy_print_info) * "=") + if exception is not None: # Only copy files if the build succeeded + print( + "Build failed, but we still try to copy the HTML files to the build directory" + ) + try: + src_path = Path(app.builder.srcdir) + build_path = Path(app.builder.outdir) + + copy_print_info = f"Copying html files from sphinx src directory {src_path}" + print() + print(copy_print_info) + print(len(copy_print_info) * "-") + for html_file in src_path.rglob("*.html"): + relative_path = html_file.relative_to(src_path) + destination_file = build_path / relative_path + destination_file.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(html_file, destination_file) + print(f"Copy {html_file} to {destination_file}") + + gallery_src_path = Path(app.builder.srcdir / Path(gallery_src_relative_dir)) + + copy_print_info = ( + f"Copying html files from gallery src directory {gallery_src_path} to build" + ) + print() + print(copy_print_info) + print(len(copy_print_info) * "-") + for html_file in gallery_src_path.rglob("*.html"): + relative_path = html_file.relative_to(gallery_src_path) + destination_file = build_path / relative_path + destination_file.parent.mkdir(parents=True, exist_ok=True) + shutil.copy(html_file, destination_file) + print(f"Copy {html_file} to {destination_file}") + except Exception as e: + print(f"Failed to copy HTML files: {e}") + + +def setup(app): + app.connect("build-finished", copy_html_files) diff --git a/docs/source/index.rst b/docs/source/index.rst new file mode 100644 index 0000000..767d698 --- /dev/null +++ b/docs/source/index.rst @@ -0,0 +1,26 @@ + +AiiDA PythonJob +=========================================== + + +`PythonJob` allows users to run Python functions on a remote computer. It is designed to enable users from non-AiiDA communities to run their Python functions remotely and construct workflows with checkpoints, maintaining all data provenance. For instance, users can use ASE's calculator to run a DFT calculation on a remote computer directly. + +**Key Features** + +1. **Remote Execution**: Seamlessly run Python functions on a remote computer. +2. **User-Friendly**: Designed for users who are not familiar with AiiDA, simplifying the process of remote execution. +3. **Workflow Management**: Construct workflows using WorkGraph with checkpoints, ensuring that intermediate states and results are preserved. +4. **Data Provenance**: Maintain comprehensive data provenance, tracking the full history and transformations of data. + + +.. toctree:: + :maxdepth: 1 + :caption: Contents: + :hidden: + + installation + autogen/how_to + tutorial/index + + + diff --git a/docs/source/installation.rst b/docs/source/installation.rst new file mode 100644 index 0000000..64b8530 --- /dev/null +++ b/docs/source/installation.rst @@ -0,0 +1,44 @@ +============ +Installation +============ + +.. _installation:requirements: + +Requirements +============ + +To work with ``aiida-pythonjob``, you should have: + +* installed ``aiida-core`` +* configured an AiiDA profile. + +Please refer to the `documentation `_ of ``aiida-core`` for detailed instructions. + + +.. _installation:installation: + +Installation +============ + + +The recommended method of installation is to use the Python package manager |pip|_: + +.. code-block:: console + + $ pip install aiida-pythonjob + +This will install the latest stable version that was released to PyPI. + +To install the package from source, first clone the repository and then install using |pip|_: + +.. code-block:: console + + $ git clone https://github.com/aiidateam/aiida-pythonjob + $ cd aiida-pythonjob + $ pip install -e . + +The ``-e`` flag will install the package in editable mode, meaning that changes to the source code will be automatically picked up. + + +.. |pip| replace:: ``pip`` +.. _pip: https://pip.pypa.io/en/stable/ diff --git a/docs/source/pythonjob.ipynb b/docs/source/pythonjob.ipynb new file mode 100644 index 0000000..6407202 --- /dev/null +++ b/docs/source/pythonjob.ipynb @@ -0,0 +1,2503 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "22d177dc-6cfb-4de2-9509-f1eb45e10cf2", + "metadata": {}, + "source": [ + "# PythonJob\n", + "## Introduction\n", + "\n", + "The `PythonJob` is a built-in task that allows users to run Python functions on a remote computer. It is designed to enable users from non-AiiDA communities to run their Python functions remotely and construct workflows with checkpoints, maintaining all data provenance. For instance, users can use ASE's calculator to run a DFT calculation on a remote computer directly. Users only need to write normal Python code, and the WorkGraph will handle the data transformation to AiiDA data.\n", + "\n", + "### Key Features\n", + "\n", + "1. **Remote Execution**: Seamlessly run Python functions on a remote computer.\n", + "2. **User-Friendly**: Designed for users who are not familiar with AiiDA, simplifying the process of remote execution.\n", + "3. **Workflow Management**: Construct workflows with checkpoints, ensuring that intermediate states and results are preserved.\n", + "4. **Data Provenance**: Maintain comprehensive data provenance, tracking the full history and transformations of data.\n", + "\n", + "\n", + "Load the AiiDA profile." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "c6b83fb5", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "Profile" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "%load_ext aiida\n", + "from aiida import load_profile\n", + "load_profile()" + ] + }, + { + "cell_type": "markdown", + "id": "0f46d277", + "metadata": {}, + "source": [ + "## First Workflow\n", + "Suppose you need to calculate `(x + y) * z` in two steps: first, add `x` and `y`; second, multiply the result by `z`.\n", + "\n", + "There are three methods to declare a `PythonJob` task in a workflow:\n", + "\n", + "1. **Using the `task.pythonjob` decorator:** Apply this decorator directly when you define the function. This method is straightforward and integrates the task declaration seamlessly with function definition.\n", + "\n", + "2. **Converting an existing function with `task.pythonjob`:** If the function is already defined, you can convert it into a `PythonJob` task by wrapping it with the `task.pythonjob` decorator. This approach is useful when adapting pre-existing code into a task-based workflow.\n", + "\n", + "3. **Specifying `PythonJob` during task addition to the WorkGraph:** When adding a task to the WorkGraph, you can explicitly identify it as a `PythonJob`. This method offers flexibility, allowing you to dynamically assign the task type based on the workflow design requirements.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "9d9e24f8", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from aiida_workgraph import WorkGraph, task\n", + "\n", + "# decorator to define a pythonjob\n", + "@task.pythonjob()\n", + "def add(x, y):\n", + " return x + y\n", + "\n", + "\n", + "# here is a normal python function\n", + "def multiply(x, y):\n", + " return x*y\n", + "\n", + "wg = WorkGraph(\"first_workflow\")\n", + "wg.add_task(add, name=\"add\")\n", + "# we can also use a normal python function directly, but provide the \"PythonJob\" as the first argument\n", + "wg.add_task(\"PythonJob\", function=multiply, name=\"multiply\", x=wg.tasks[\"add\"].outputs[0])\n", + "\n", + "# visualize the workgraph\n", + "wg.to_html()\n", + "# visualize the workgraph in jupyter-notebook\n", + "# wg" + ] + }, + { + "cell_type": "markdown", + "id": "9bd7aa49", + "metadata": {}, + "source": [ + "### Prepare the inputs and submit the workgraph\n", + "\n", + "\n", + "**Code**: We can set the `computer` to the remote computer where we want to run the job. This will create a code `python3@computer` if it does not already exist. You can also set the `code` directly if you have already created the code.\n", + "\n", + "**Data**: Users are recommended to use normal Python data as input. The workgraph will handle the transfer and serialization of data to AiiDA data. When serializing to AiiDA data, the workgraph will first search for the corresponding AiiDA data entry point based on the module name and class name (e.g., `ase.atoms.Atoms`). If the corresponding entry point exists, it will be used to serialize the value. If not found, `GeneralData` (pickle) will be used to serialize the value into binary data.\n", + "\n", + "**Python Version**: Since pickle is used to store and load data, the Python version on the remote computer should match the one used on the localhost. You can use conda to create a virtual environment with the same Python version. Activate the environment before running the script.\n", + "\n", + "For real applications, you can pass metadata to the scheduler to activate the conda environment:\n", + "\n", + "```python\n", + "metadata = {\n", + " \"options\": {\n", + " 'custom_scheduler_commands': 'module load anaconda\\nconda activate py3.11\\n',\n", + " }\n", + "}\n", + "```\n", + "\n", + "#### Create a conda environment on the remote computer\n", + "One can use the `create_conda_env` function to create a conda environment on the remote computer. The function will create a conda environment with the specified packages and modules. The function will update the packages if the environment already exists.\n", + "\n", + "```python\n", + "from aiida_workgraph.utils import create_conda_env\n", + "# create a conda environment on remote computer\n", + "create_conda_env(\"merlin6\", \"test_pythonjob\", modules=[\"anaconda\"],\n", + " pip=[\"numpy\", \"matplotlib\"],\n", + " conda={\"channels\": [\"conda-forge\"],\n", + " \"dependencies\": [\"qe\"]},\n", + " )\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "02464256", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WorkGraph process created, PK: 151133\n", + "\n", + "Result of multiply is uuid: ffd35a52-a7c3-4675-adba-a2108cc242cd (pk: 151154) value: 20 \n", + "\n", + "\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "N151133\n", + "\n", + "WorkGraph<first_workflow> (151133)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151140\n", + "\n", + "PythonJob<add> (151140)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151133->N151140\n", + "\n", + "\n", + "CALL_CALC\n", + "add\n", + "\n", + "\n", + "\n", + "N151150\n", + "\n", + "PythonJob<multiply> (151150)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151133->N151150\n", + "\n", + "\n", + "CALL_CALC\n", + "multiply\n", + "\n", + "\n", + "\n", + "N151155\n", + "\n", + "Int (151155)\n", + "\n", + "\n", + "\n", + "N151133->N151155\n", + "\n", + "\n", + "RETURN\n", + "execution_count\n", + "\n", + "\n", + "\n", + "N151142\n", + "\n", + "RemoteData (151142)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151140->N151142\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151143\n", + "\n", + "FolderData (151143)\n", + "\n", + "\n", + "\n", + "N151140->N151143\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151144\n", + "\n", + "Int (151144)\n", + "\n", + "\n", + "\n", + "N151140->N151144\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n", + "N151144->N151150\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__x\n", + "\n", + "\n", + "\n", + "N151152\n", + "\n", + "RemoteData (151152)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151150->N151152\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151153\n", + "\n", + "FolderData (151153)\n", + "\n", + "\n", + "\n", + "N151150->N151153\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151154\n", + "\n", + "Int (151154)\n", + "\n", + "\n", + "\n", + "N151150->N151154\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from aiida_workgraph.utils import generate_node_graph\n", + "\n", + "#------------------------- Submit the calculation -------------------\n", + "# For real applications, one can pass metadata to the scheduler to activate the conda environment\n", + "metadata = {\n", + " \"options\": {\n", + " # 'custom_scheduler_commands' : 'module load anaconda\\nconda activate py3.11\\n',\n", + " 'custom_scheduler_commands' : '',\n", + " }\n", + "}\n", + "\n", + "wg.submit(inputs = {\"add\": {\"x\": 2, \"y\": 3,\n", + " \"computer\": \"localhost\",\n", + " \"metadata\": metadata},\n", + " \"multiply\": {\"y\": 4,\n", + " \"computer\": \"localhost\",\n", + " \"metadata\": metadata}},\n", + " wait=True)\n", + "#------------------------- Print the output -------------------------\n", + "print(\"\\nResult of multiply is {} \\n\\n\".format(wg.tasks[\"multiply\"].outputs['result'].value))\n", + "#------------------------- Generate node graph -------------------\n", + "generate_node_graph(wg.pk)" + ] + }, + { + "cell_type": "markdown", + "id": "66b34ef1", + "metadata": {}, + "source": [ + "## Use parent folder\n", + "The parent_folder parameter allows a task to access the output files of a parent task. This feature is particularly useful when you want to reuse data generated by a previous computation in subsequent computations. In the following example, the multiply task uses the `result.txt` file created by the add task.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "aa47c860", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from aiida_workgraph import WorkGraph, task\n", + "\n", + "def add(x, y):\n", + " z = x + y\n", + " with open(\"result.txt\", \"w\") as f:\n", + " f.write(str(z))\n", + "\n", + "def multiply(x, y):\n", + " with open(\"parent_folder/result.txt\", \"r\") as f:\n", + " z = int(f.read())\n", + " return x*y + z\n", + "\n", + "wg = WorkGraph(\"PythonJob_parent_folder\")\n", + "wg.add_task(\"PythonJob\", function=add, name=\"add\")\n", + "wg.add_task(\"PythonJob\", function=multiply, name=\"multiply\",\n", + " parent_folder=wg.tasks[\"add\"].outputs[\"remote_folder\"],\n", + " )\n", + "\n", + "wg.to_html()" + ] + }, + { + "cell_type": "markdown", + "id": "7c4650ac", + "metadata": {}, + "source": [ + "Submit the workgraph and print the result." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "6367b6eb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WorkGraph process created, PK: 151156\n", + "\n", + "Result of multiply is uuid: 0849fb0a-d461-4a08-8e73-89fb4f848d59 (pk: 151178) value: 17 \n", + "\n", + "\n" + ] + } + ], + "source": [ + "wg.submit(inputs = {\"add\": {\"x\": 2, \"y\": 3, \"computer\": \"localhost\"},\n", + " \"multiply\": {\"x\": 3, \"y\": 4, \"computer\": \"localhost\"}},\n", + " wait=True)\n", + "print(\"\\nResult of multiply is {} \\n\\n\".format(wg.tasks[\"multiply\"].outputs['result'].value))" + ] + }, + { + "cell_type": "markdown", + "id": "1bc31c81", + "metadata": {}, + "source": [ + "## Upload files or folders to the remote computer\n", + "The `upload_files` parameter allows users to upload files or folders to the remote computer. The files will be uploaded to the working directory of the remote computer.\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "ba00e2ce", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WorkGraph process created, PK: 151180\n", + "\n", + "Result of add is uuid: 14393adc-ddfd-4b2a-8754-d7d785a8c3e0 (pk: 151191) value: 5 \n", + "\n", + "\n" + ] + } + ], + "source": [ + "from aiida_workgraph import WorkGraph, task\n", + "\n", + "# create a temporary file \"input.txt\" in the current directory\n", + "with open(\"input.txt\", \"w\") as f:\n", + " f.write(\"2\")\n", + "\n", + "# create a temporary folder \"inputs_folder\" in the current directory\n", + "# and add a file \"another_input.txt\" in the folder\n", + "import os\n", + "os.makedirs(\"inputs_folder\", exist_ok=True)\n", + "with open(\"inputs_folder/another_input.txt\", \"w\") as f:\n", + " f.write(\"3\")\n", + "\n", + "def add():\n", + " with open(\"input.txt\", \"r\") as f:\n", + " a = int(f.read())\n", + " with open(\"inputs_folder/another_input.txt\", \"r\") as f:\n", + " b = int(f.read())\n", + " return a + b\n", + "\n", + "\n", + "wg = WorkGraph(\"PythonJob_upload_files\")\n", + "wg.add_task(\"PythonJob\", function=add, name=\"add\")\n", + "\n", + "#------------------------- Submit the calculation -------------------\n", + "# we need use full path to the file\n", + "input_file = os.path.abspath(\"input.txt\")\n", + "input_folder = os.path.abspath(\"inputs_folder\")\n", + "\n", + "wg.submit(inputs = {\"add\": {\n", + " \"computer\": \"localhost\",\n", + " \"upload_files\": {\"input.txt\": input_file,\n", + " \"inputs_folder\": input_folder,\n", + " },\n", + " },\n", + " },\n", + " wait=True)\n", + "print(\"\\nResult of add is {} \\n\\n\".format(wg.tasks[\"add\"].outputs['result'].value))" + ] + }, + { + "cell_type": "markdown", + "id": "2174a45e", + "metadata": {}, + "source": [ + "## First Real-world Workflow: atomization energy of molecule\n", + "\n", + "The atomization energy, $\\Delta E$, of a molecule can be expressed as:\n", + "\n", + "$$\n", + "\\Delta E = n_{\\text{atom}} \\times E_{\\text{atom}} - E_{\\text{molecule}}\n", + "$$\n", + "\n", + "Where:\n", + "- $\\Delta E$ is the atomization energy of the molecule.\n", + "- $n_{\\text{atom}}$ is the number of atoms.\n", + "- $E_{\\text{atom}}$ is the energy of an isolated atom.\n", + "- $E_{\\text{molecule}}$ is the energy of the molecule.\n", + "\n", + "\n", + "### Define a task to calculate the energy of the atoms using EMT potential" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "713da634", + "metadata": {}, + "outputs": [], + "source": [ + "from aiida_workgraph import task, WorkGraph\n", + "\n", + "def emt(atoms):\n", + " from ase.calculators.emt import EMT\n", + " atoms.calc = EMT()\n", + " energy = atoms.get_potential_energy()\n", + " return energy\n", + "\n", + "\n", + "def atomization_energy(mol, energy_molecule, energy_atom):\n", + " energy = energy_atom*len(mol) - energy_molecule\n", + " return energy\n" + ] + }, + { + "cell_type": "markdown", + "id": "00a7531e", + "metadata": {}, + "source": [ + "### Define a workgraph\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "a81fa9e0", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "wg = WorkGraph(\"atomization_energy\")\n", + "pw_atom = wg.add_task(\"PythonJob\", function=emt, name=\"emt_atom\")\n", + "pw_mol = wg.add_task(\"PythonJob\", function=emt, name=\"emt_mol\")\n", + "wg.add_task(\"PythonJob\", function=atomization_energy, name=\"atomization_energy\",\n", + " energy_atom=pw_atom.outputs[\"result\"],\n", + " energy_molecule=pw_mol.outputs[\"result\"])\n", + "wg.to_html()" + ] + }, + { + "cell_type": "markdown", + "id": "b686f3ba", + "metadata": {}, + "source": [ + "### Prepare the inputs and submit the workflow" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "11e3bca1-dda6-44e9-9585-54feeda7e7db", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WorkGraph process created, PK: 151193\n", + "Energy of a N atom: 5.100\n", + "Energy of an un-relaxed N2 molecule: 0.549\n", + "Atomization energy: 9.651 eV\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "N151193\n", + "\n", + "WorkGraph<atomization_energy> (151193)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151199\n", + "\n", + "PythonJob<emt_mol> (151199)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151193->N151199\n", + "\n", + "\n", + "CALL_CALC\n", + "emt_mol\n", + "\n", + "\n", + "\n", + "N151205\n", + "\n", + "PythonJob<emt_atom> (151205)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151193->N151205\n", + "\n", + "\n", + "CALL_CALC\n", + "emt_atom\n", + "\n", + "\n", + "\n", + "N151219\n", + "\n", + "PythonJob<atomization_energy> (151219)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151193->N151219\n", + "\n", + "\n", + "CALL_CALC\n", + "atomization_energy\n", + "\n", + "\n", + "\n", + "N151224\n", + "\n", + "Int (151224)\n", + "\n", + "\n", + "\n", + "N151193->N151224\n", + "\n", + "\n", + "RETURN\n", + "execution_count\n", + "\n", + "\n", + "\n", + "N151208\n", + "\n", + "RemoteData (151208)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151199->N151208\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151210\n", + "\n", + "FolderData (151210)\n", + "\n", + "\n", + "\n", + "N151199->N151210\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151212\n", + "\n", + "GeneralData (151212)\n", + "\n", + "\n", + "\n", + "N151199->N151212\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n", + "N151209\n", + "\n", + "RemoteData (151209)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151205->N151209\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151211\n", + "\n", + "FolderData (151211)\n", + "\n", + "\n", + "\n", + "N151205->N151211\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151213\n", + "\n", + "GeneralData (151213)\n", + "\n", + "\n", + "\n", + "N151205->N151213\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n", + "N151212->N151219\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__energy_molecule\n", + "\n", + "\n", + "\n", + "N151213->N151219\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__energy_atom\n", + "\n", + "\n", + "\n", + "N151221\n", + "\n", + "RemoteData (151221)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151219->N151221\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151222\n", + "\n", + "FolderData (151222)\n", + "\n", + "\n", + "\n", + "N151219->N151222\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151223\n", + "\n", + "GeneralData (151223)\n", + "\n", + "\n", + "\n", + "N151219->N151223\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from ase.build import molecule\n", + "from ase import Atoms\n", + "\n", + "load_profile()\n", + "\n", + "# create input structure\n", + "n_atom = Atoms(\"N\", pbc=True)\n", + "n_atom.center(vacuum=5.0)\n", + "n2_molecule = molecule(\"N2\", pbc=True)\n", + "n2_molecule.center(vacuum=5.0)\n", + "\n", + "\n", + "#------------------------- Set the inputs -------------------------\n", + "wg.tasks[\"emt_atom\"].set({\"atoms\": n_atom, \"computer\": \"localhost\"})\n", + "wg.tasks[\"emt_mol\"].set({\"atoms\": n2_molecule, \"computer\": \"localhost\"})\n", + "wg.tasks[\"atomization_energy\"].set({\"mol\": n2_molecule, \"computer\": \"localhost\"})\n", + "#------------------------- Submit the calculation -------------------\n", + "wg.submit(wait=True, timeout=200)\n", + "#------------------------- Print the output -------------------------\n", + "print('Energy of a N atom: {:0.3f}'.format(wg.tasks['emt_atom'].outputs[\"result\"].value.value))\n", + "print('Energy of an un-relaxed N2 molecule: {:0.3f}'.format(wg.tasks['emt_mol'].outputs[\"result\"].value.value))\n", + "print('Atomization energy: {:0.3f} eV'.format(wg.tasks['atomization_energy'].outputs[\"result\"].value.value))\n", + "#------------------------- Generate node graph -------------------\n", + "generate_node_graph(wg.pk)\n" + ] + }, + { + "cell_type": "markdown", + "id": "d25beb02-ee82-4a27-ae48-edc5c147904c", + "metadata": {}, + "source": [ + "## Call shell commands in the PythonJob task\n", + "\n", + "We want to calculate `(x+y)*z` in two steps using `echo` and `bc` commands.\n", + "\n", + "Step 1: Calculate (x+y) and store it as result\n", + "```\n", + "result=$(echo \"$x + $y\" | bc)\n", + "```\n", + "\n", + "Step 2: Multiply result by z and store the final result\n", + "```\n", + "result=$(echo \"$result * $z\" | bc)\n", + "```\n", + "\n", + "If one wanted to run this workflow in AiiDA, one would have to write plugins for `echo` and `bc` commands, and a WorkChain to handle the workflow. With aiida-workgraph and the `PythonJob` task, this can be run through AiiDA with the following workgraph:" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "d8471a01", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from aiida_workgraph import task, WorkGraph\n", + "\n", + "def add(x, y):\n", + " import os\n", + " os.system(\"echo '{} + {}' | bc > result.txt\".format(x, y))\n", + " with open(\"result.txt\", \"r\") as f:\n", + " return float(f.read())\n", + "\n", + "\n", + "def multiply(x, y):\n", + " import os\n", + " os.system(\"echo '{} * {}' | bc > result.txt\".format(x, y))\n", + " with open(\"result.txt\", \"r\") as f:\n", + " return float(f.read())\n", + "\n", + "\n", + "wg = WorkGraph(\"PythonJob_shell_command\")\n", + "wg.add_task(\"PythonJob\", function=add, name=\"add\")\n", + "wg.add_task(\"PythonJob\", function=multiply, name=\"multiply\", x=wg.tasks[\"add\"].outputs[0])\n", + "\n", + "# visualize the workgraph\n", + "wg.to_html()\n" + ] + }, + { + "cell_type": "markdown", + "id": "9cb86fa4", + "metadata": {}, + "source": [ + "submit the workgraph and print the result:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "id": "df7976d2", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WorkGraph process created, PK: 151225\n", + "\n", + "Result of multiply is uuid: deab3241-85b3-47d6-9551-9d8d14dc255d (pk: 151246) value: 20.0 \n", + "\n", + "\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "N151225\n", + "\n", + "WorkGraph<PythonJob_shell_command> (151225)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151232\n", + "\n", + "PythonJob<add> (151232)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151225->N151232\n", + "\n", + "\n", + "CALL_CALC\n", + "add\n", + "\n", + "\n", + "\n", + "N151242\n", + "\n", + "PythonJob<multiply> (151242)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151225->N151242\n", + "\n", + "\n", + "CALL_CALC\n", + "multiply\n", + "\n", + "\n", + "\n", + "N151247\n", + "\n", + "Int (151247)\n", + "\n", + "\n", + "\n", + "N151225->N151247\n", + "\n", + "\n", + "RETURN\n", + "execution_count\n", + "\n", + "\n", + "\n", + "N151234\n", + "\n", + "RemoteData (151234)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151232->N151234\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151235\n", + "\n", + "FolderData (151235)\n", + "\n", + "\n", + "\n", + "N151232->N151235\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151236\n", + "\n", + "Float (151236)\n", + "\n", + "\n", + "\n", + "N151232->N151236\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n", + "N151236->N151242\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__x\n", + "\n", + "\n", + "\n", + "N151244\n", + "\n", + "RemoteData (151244)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151242->N151244\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151245\n", + "\n", + "FolderData (151245)\n", + "\n", + "\n", + "\n", + "N151242->N151245\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151246\n", + "\n", + "Float (151246)\n", + "\n", + "\n", + "\n", + "N151242->N151246\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\n", + "wg.submit(inputs = {\"add\": {\"x\": 2, \"y\": 3, \"computer\": \"localhost\"},\n", + " \"multiply\": {\"y\": 4, \"computer\": \"localhost\"}},\n", + " wait=True)\n", + "#------------------------- Print the output -------------------------\n", + "print(\"\\nResult of multiply is {} \\n\\n\".format(wg.tasks[\"multiply\"].outputs['result'].value))\n", + "#------------------------- Generate node graph -------------------\n", + "generate_node_graph(wg.pk)" + ] + }, + { + "cell_type": "markdown", + "id": "824a056d", + "metadata": {}, + "source": [ + "## Note\n", + "One can not run a `graph_builder` task in a `PythonJob` task. The `graph_builder` task is used to build the workgraph, and it should be run in the localhost by the daemon.\n", + "\n", + "However, one can run a `PythonJob` task in a `graph_builder` task. The `PythonJob` task will be executed on the remote computer.\n", + "\n", + "The following code will raise an error:\n", + "\n", + "```python\n", + "from aiida_workgraph import task, WorkGraph\n", + "\n", + "\n", + "@task.graph_builder()\n", + "def add_multiply():\n", + " wg = WorkGraph()\n", + " return wg\n", + "\n", + "wg = WorkGraph()\n", + "wg.add_task(\"PythonJob\", function=add_multiply, name=\"add_multiply\")\n", + "\n", + "---------------------------------------------------------------------------\n", + "ValueError Traceback (most recent call last)\n", + "/tmp/ipykernel_3498848/1351840398.py in ()\n", + " 8 \n", + " 9 wg = WorkGraph()\n", + "---> 10 wg.add_task(\"PythonJob\", function=add_multiply, name=\"add_multiply\")\n", + "\n", + "~/repos/superstar54/aiida-workgraph/aiida_workgraph/collection.py in new(self, identifier, name, uuid, run_remotely, **kwargs)\n", + " 35 return super().new(identifier, name, uuid, **kwargs)\n", + " 36 if isinstance(identifier, str) and identifier.upper() == \"PYTHONJOB\":\n", + "---> 37 identifier, _ = build_pythonjob_task(kwargs.pop(\"function\"))\n", + " 38 return super().new(identifier, name, uuid, **kwargs)\n", + " 39 if isinstance(identifier, str) and identifier.upper() == \"SHELLJOB\":\n", + "\n", + "~/repos/superstar54/aiida-workgraph/aiida_workgraph/decorator.py in build_pythonjob_task(func)\n", + " 262 \n", + " 263 if func.node.node_type.upper() == \"GRAPH_BUILDER\":\n", + "--> 264 raise ValueError(\n", + " 265 \"GraphBuilder task cannot be run remotely. Please remove 'PythonJob'.\"\n", + " 266 )\n", + "\n", + "ValueError: GraphBuilder task cannot be run remotely. Please remove 'PythonJob'.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "c6eec323", + "metadata": {}, + "source": [ + "However, the following code will work:" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "3b226eb7", + "metadata": {}, + "outputs": [ + { + "data": { + "application/vnd.jupyter.widget-view+json": { + "model_id": "d30b54834fbc48c0908ae07d06594818", + "version_major": 2, + "version_minor": 1 + }, + "text/plain": [ + "NodeGraphWidget(settings={'minmap': False}, style={'width': '80%', 'height': '600px'}, value={'name': 'add_mul…" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from aiida_workgraph import task, WorkGraph\n", + "\n", + "\n", + "@task.graph_builder()\n", + "def add_multiply():\n", + " wg = WorkGraph()\n", + " wg.add_task(\"PythonJob\", function=add, name=\"add\")\n", + " return wg\n", + "\n", + "wg = WorkGraph()\n", + "wg.add_task(add_multiply, name=\"add_multiply\")" + ] + }, + { + "cell_type": "markdown", + "id": "68f96103", + "metadata": {}, + "source": [ + "### Using `parent_folder_name` for Data Continuity\n", + "\n", + "AiiDA runs each job in a separate folder. If one calculation requires data from previous calculations to be accessible in the current job's working directory. This has been managed with the `parent_folder` input, which specifies a source for copying necessary data. The new `parent_folder_name` input streamlines this process by allowing users to define a subfolder within the working directory to organize these files effectively.\n", + "\n", + "#### Example Usage: NSCF Calculation\n", + "In the context of an NSCF calculation, where data dependency exists on outputs from a SCF calculation, the workflow can be configured as follows:\n", + "\n", + "```python\n", + "nscf_task = wg.add_task(\"PythonJob\",\n", + " function=pw_calculator,\n", + " name=\"nscf\",\n", + " parent_folder=scf_task.outputs[\"remote_folder\"],\n", + " parent_output_folder=\"out\",\n", + " parent_folder_name=\"out\",\n", + ")\n", + "```\n", + "This setup will copy all content of the `out` folder from the SCF calculation's remote folder into an `out` folder within the working directory of the NSCF job.\n", + "\n", + "### Handling Multiple Data Sources with `copy_files`\n", + "The traditional `parent_folder` method is limited when calculations require inputs from multiple remote directories. For instance, Bader charge analysis with Quantum ESPRESSO may need both valence and all-electron density data from different calculations.\n", + "\n", + "The new `copy_files` input allows for flexible linkage to multiple remote folders. It facilitates copying necessary files from diverse sources into a single job's directory under dynamically generated subfolder names based on taskand socket names.\n", + "\n", + "#### Example Usage: Bader Charge Analysis\n", + "For a Bader analysis requiring different charge density files:\n", + "\n", + "```python\n", + "bader_task = wg.add_task(\"PythonJob\",\n", + " function=bader_calculator,\n", + " name=\"bader\",\n", + " command=bader_command,\n", + " charge_density_folder=\"pp_valence_remote_folder\",\n", + " reference_charge_density_folder=\"pp_all_remote_folder\",\n", + ")\n", + "wg.add_link(pp_valence.outputs[\"remote_folder\"], bader_task.inputs[\"copy_files\"])\n", + "wg.add_link(pp_all.outputs[\"remote_folder\"], bader_task.inputs[\"copy_files\"])\n", + "```\n", + "\n", + "The `bader_calculator` function using specified charge density data:\n", + "\n", + "```python\n", + "def bader_calculator(\n", + " command: str = \"pw.x\",\n", + " charge_density_folder: str = \"./\",\n", + " charge_density_filename: str = \"charge_density.cube\",\n", + " reference_charge_density_folder: str = \"./\",\n", + " reference_charge_density_filename: str = \"charge_density.cube\",\n", + "):\n", + " \"\"\"Run Bader charge analysis.\"\"\"\n", + " command_str = f\"{command} {charge_density_folder}/{charge_density_filename}\"\n", + " if reference_charge_density_filename:\n", + " command_str += f\" -ref {reference_charge_density_folder}/{reference_charge_density_filename}\"\n", + " os.system(command_str)\n", + "\n", + " with open(\"ACF.dat\", \"r\") as f:\n", + " lines = f.readlines()\n", + " charges = [float(line.split()[4]) for line in lines[2:-4]]\n", + "\n", + " return charges\n", + "```" + ] + }, + { + "cell_type": "markdown", + "id": "a04617ca", + "metadata": {}, + "source": [ + "\n", + "## Namespace Output\n", + "\n", + "The `PythonJob` allows users to define namespace outputs. A namespace output is a dictionary with keys and values returned by a function. Each value in this dictionary will be serialized to AiiDA data, and the key-value pair will be stored in the database.\n", + "\n", + "### Why Use Namespace Outputs?\n", + "\n", + "- **Dynamic and Flexible**: The keys and values in the namespace output are not fixed and can change based on the task's execution.\n", + "- **Querying**: The data in the namespace output is stored as an AiiDA data node, allowing for easy querying and retrieval.\n", + "- **Data Provenance**: When the data is used as input for subsequent tasks, the origin of data is tracked.\n", + "\n", + "### Example Use Case\n", + "\n", + "Consider a molecule adsorption calculation where the namespace output stores the surface slabs of the molecule adsorbed on different surface sites. The number of surface slabs can vary depending on the surface. These output surface slabs can be utilized as input to the next task to calculate the energy.\n", + "\n", + "### Defining Namespace Outputs\n", + "\n", + "To declare a namespace output, set the `identifier` to `workgraph.namespace` in the `outputs` parameter of the `@task` decorator. For example:\n", + "\n", + "```python\n", + "@task(outputs=[{\"name\": \"structures\", \"identifier\": \"workgraph.namespace\"}])\n", + "def generate_surface_slabs():\n", + " # Function logic to generate surface slabs\n", + " return {\"slab1\": slab_data1, \"slab2\": slab_data2}\n", + "```\n", + "\n", + "\n", + "One can also define nested namespace outputs by specifying the `identifier` as `workgraph.namespace` for sub-dictionaries within the namespace output. For example, here we define `add_multiply.add` as a nested namespace output:\n", + "\n", + "```python\n", + "@task(\n", + " outputs=[{\"name\": \"add_multiply\", \"identifier\": \"workgraph.namespace\"},\n", + " {\"name\": \"add_multiply.add\", \"identifier\": \"workgraph.namespace\"},\n", + " {\"name\": \"minus\"},\n", + " ]\n", + ")\n", + "def myfunc(x, y):\n", + " add = {\"order1\": x + y, \"order2\": x * x + y * y}\n", + " return {\n", + " \"add_multiply\": {\"add\": add, \"multiply\": x * y},\n", + " \"minus\": x - y,\n", + " }\n", + "```\n", + "\n", + "\n", + "### Using Namespace Outputs as Inputs\n", + "\n", + "A namespace output can be passed directly as an input to another task. It will be passed as a dictionary to the task, preserving the structure and allowing for flexible data handling.\n", + "\n", + "If you want to pass the value of a key in the namespace output as an input to another task, you need to define a output for that key. For example, to pass the value of `add_multiply.add` as an input to another task, you need to define an output for `add_multiply.add`:\n", + "\n", + "```python\n", + "@task(\n", + " outputs=[\n", + " {\"identifier\": \"workgraph.namespace\", \"name\": \"add_multiply\"},\n", + " {\"name\": \"add_multiply.add\"},\n", + " {\"name\": \"add_multiply.multiply\"},\n", + " {\"name\": \"minus\"},\n", + " ]\n", + ")\n", + "def myfunc(x, y):\n", + " return {\n", + " \"add_multiply\": {\"add\": x + y, \"multiply\": x * y},\n", + " \"minus\": x - y,\n", + " }\n", + "```\n", + "\n", + "then you can pass the value of `add_multiply.add` as an input to another task:\n", + "\n", + "```python\n", + "wg.add_task(\"PythonJob\",\n", + " function=myfunc3,\n", + " name=\"myfunc3\",\n", + " x=wg.tasks[\"myfunc\"].outputs[\"add_multiply.add\"],\n", + " )\n", + "```\n", + "\n", + "\n", + "## Second Real-world Workflow: Equation of state (EOS) WorkGraph\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "id": "dd00841a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from aiida_workgraph import WorkGraph, task\n", + "from ase.build import bulk\n", + "from ase import Atoms\n", + "from aiida import load_profile\n", + "\n", + "load_profile()\n", + "\n", + "@task(outputs=[{\"name\": \"scaled_atoms\", \"identifier\": \"workgraph.namespace\"},\n", + " {\"name\": \"volumes\"}]\n", + ")\n", + "def generate_scaled_atoms(atoms: Atoms, scales: list) -> dict:\n", + " \"\"\"Scale the structure by the given scales.\"\"\"\n", + " volumes = {}\n", + " scaled_atoms = {}\n", + " for i in range(len(scales)):\n", + " atoms1 = atoms.copy()\n", + " atoms1.set_cell(atoms.cell * scales[i], scale_atoms=True)\n", + " scaled_atoms[f\"s_{i}\"] = atoms1\n", + " volumes[f\"s_{i}\"] = atoms1.get_volume()\n", + " return {\"scaled_atoms\": scaled_atoms, \"volumes\": volumes}\n", + "\n", + "@task()\n", + "def emt(atoms):\n", + " from ase.calculators.emt import EMT\n", + " atoms.calc = EMT()\n", + " energy = atoms.get_potential_energy()\n", + " return {\"energy\": energy}\n", + "\n", + "# Output result from context to the output socket\n", + "@task.graph_builder(outputs=[{\"name\": \"results\", \"from\": \"context.results\"}])\n", + "def calculate_enegies(scaled_atoms):\n", + " \"\"\"Run the scf calculation for each structure.\"\"\"\n", + " from aiida_workgraph import WorkGraph\n", + " wg = WorkGraph()\n", + " for key, atoms in scaled_atoms.items():\n", + " emt1 = wg.add_task(\"PythonJob\", function=emt, name=f\"emt1_{key}\", atoms=atoms)\n", + " emt1.set({\"computer\": \"localhost\"})\n", + " # save the output parameters to the context\n", + " emt1.set_context({\"result\": f\"results.{key}\"})\n", + " return wg\n", + "\n", + "\n", + "@task()\n", + "def fit_eos(volumes: dict, emt_results: dict) -> dict:\n", + " \"\"\"Fit the EOS of the data.\"\"\"\n", + " from ase.eos import EquationOfState\n", + " from ase.units import kJ\n", + "\n", + " volumes_list = []\n", + " energies = []\n", + " for key, data in emt_results.items():\n", + " energy = data[\"energy\"]\n", + " energies.append(energy)\n", + " volumes_list.append(volumes[key])\n", + " #\n", + " eos = EquationOfState(volumes_list, energies)\n", + " v0, e0, B = eos.fit()\n", + " # convert B to GPa\n", + " B = B / kJ * 1.0e24\n", + " eos = {\"energy unit\": \"eV\", \"v0\": v0, \"e0\": e0, \"B\": B}\n", + " return eos\n", + "\n", + "atoms = bulk(\"Au\", cubic=True)\n", + "\n", + "wg = WorkGraph(\"pythonjob_eos_emt\")\n", + "scale_atoms_task = wg.add_task(\"PythonJob\",\n", + " function=generate_scaled_atoms,\n", + " name=\"scale_atoms\",\n", + " atoms=atoms,\n", + " )\n", + " # -------- calculate_enegies -----------\n", + "calculate_enegies_task = wg.add_task(calculate_enegies,\n", + " name=\"calculate_enegies\",\n", + " scaled_atoms=scale_atoms_task.outputs[\"scaled_atoms\"],\n", + " )\n", + " # -------- fit_eos -----------\n", + "wg.add_task(\"PythonJob\",\n", + " function=fit_eos,\n", + " name=\"fit_eos\",\n", + " volumes=scale_atoms_task.outputs[\"volumes\"],\n", + " emt_results=calculate_enegies_task.outputs[\"results\"],\n", + " )\n", + "wg.to_html()" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "3d8072ac", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WorkGraph process created, PK: 151263\n", + "The fitted EOS parameters are:\n" + ] + }, + { + "data": { + "text/plain": [ + "{'B': 167.61300824791,\n", + " 'e0': 0.006458727465855,\n", + " 'v0': 67.197735262521,\n", + " 'energy unit': 'eV'}" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\n", + "\n", + "wg.submit(\n", + " inputs={\"scale_atoms\": {\"atoms\": atoms,\n", + " \"scales\": [0.95, 1.0, 1.05],\n", + " \"computer\": \"localhost\"},\n", + " \"fit_eos\": {\"computer\": \"localhost\"}},\n", + " wait=True,\n", + " )\n", + "\n", + "print(\"The fitted EOS parameters are:\")\n", + "wg.tasks[\"fit_eos\"].outputs[\"result\"].value.value\n" + ] + }, + { + "cell_type": "markdown", + "id": "8321bb88", + "metadata": {}, + "source": [ + "Generate the node graph and check the data provenance." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "1f802430", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "N151263\n", + "\n", + "WorkGraph<pythonjob_eos_emt> (151263)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151270\n", + "\n", + "PythonJob<scale_atoms> (151270)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151263->N151270\n", + "\n", + "\n", + "CALL_CALC\n", + "scale_atoms\n", + "\n", + "\n", + "\n", + "N151278\n", + "\n", + "WorkGraph<calculate_enegies> (151278)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151263->N151278\n", + "\n", + "\n", + "CALL_WORK\n", + "calculate_enegies\n", + "\n", + "\n", + "\n", + "N151311\n", + "\n", + "PythonJob<fit_eos> (151311)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151263->N151311\n", + "\n", + "\n", + "CALL_CALC\n", + "fit_eos\n", + "\n", + "\n", + "\n", + "N151316\n", + "\n", + "Int (151316)\n", + "\n", + "\n", + "\n", + "N151263->N151316\n", + "\n", + "\n", + "RETURN\n", + "execution_count\n", + "\n", + "\n", + "\n", + "N151272\n", + "\n", + "RemoteData (151272)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151270->N151272\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151273\n", + "\n", + "FolderData (151273)\n", + "\n", + "\n", + "\n", + "N151270->N151273\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151274\n", + "\n", + "AtomsData (151274)\n", + "\n", + "\n", + "\n", + "N151270->N151274\n", + "\n", + "\n", + "CREATE\n", + "scaled_atoms__s_0\n", + "\n", + "\n", + "\n", + "N151275\n", + "\n", + "AtomsData (151275)\n", + "\n", + "\n", + "\n", + "N151270->N151275\n", + "\n", + "\n", + "CREATE\n", + "scaled_atoms__s_1\n", + "\n", + "\n", + "\n", + "N151276\n", + "\n", + "AtomsData (151276)\n", + "\n", + "\n", + "\n", + "N151270->N151276\n", + "\n", + "\n", + "CREATE\n", + "scaled_atoms__s_2\n", + "\n", + "\n", + "\n", + "N151277\n", + "\n", + "Dict (151277)\n", + "\n", + "\n", + "\n", + "N151270->N151277\n", + "\n", + "\n", + "CREATE\n", + "volumes\n", + "\n", + "\n", + "\n", + "N151274->N151278\n", + "\n", + "\n", + "INPUT_WORK\n", + "wg__tasks__emt1_s_0__inputs__atoms__property__value\n", + "\n", + "\n", + "\n", + "N151283\n", + "\n", + "PythonJob<emt1_s_0> (151283)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151274->N151283\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__atoms\n", + "\n", + "\n", + "\n", + "N151275->N151278\n", + "\n", + "\n", + "INPUT_WORK\n", + "wg__tasks__emt1_s_1__inputs__atoms__property__value\n", + "\n", + "\n", + "\n", + "N151288\n", + "\n", + "PythonJob<emt1_s_1> (151288)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151275->N151288\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__atoms\n", + "\n", + "\n", + "\n", + "N151276->N151278\n", + "\n", + "\n", + "INPUT_WORK\n", + "wg__tasks__emt1_s_2__inputs__atoms__property__value\n", + "\n", + "\n", + "\n", + "N151293\n", + "\n", + "PythonJob<emt1_s_2> (151293)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151276->N151293\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__atoms\n", + "\n", + "\n", + "\n", + "N151277->N151311\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__volumes\n", + "\n", + "\n", + "\n", + "N151278->N151283\n", + "\n", + "\n", + "CALL_CALC\n", + "emt1_s_0\n", + "\n", + "\n", + "\n", + "N151278->N151288\n", + "\n", + "\n", + "CALL_CALC\n", + "emt1_s_1\n", + "\n", + "\n", + "\n", + "N151278->N151293\n", + "\n", + "\n", + "CALL_CALC\n", + "emt1_s_2\n", + "\n", + "\n", + "\n", + "N151303\n", + "\n", + "Dict (151303)\n", + "\n", + "\n", + "\n", + "N151278->N151303\n", + "\n", + "\n", + "RETURN\n", + "results__s_0\n", + "\n", + "\n", + "\n", + "N151304\n", + "\n", + "Dict (151304)\n", + "\n", + "\n", + "\n", + "N151278->N151304\n", + "\n", + "\n", + "RETURN\n", + "results__s_1\n", + "\n", + "\n", + "\n", + "N151305\n", + "\n", + "Dict (151305)\n", + "\n", + "\n", + "\n", + "N151278->N151305\n", + "\n", + "\n", + "RETURN\n", + "results__s_2\n", + "\n", + "\n", + "\n", + "N151306\n", + "\n", + "Int (151306)\n", + "\n", + "\n", + "\n", + "N151278->N151306\n", + "\n", + "\n", + "RETURN\n", + "execution_count\n", + "\n", + "\n", + "\n", + "N151297\n", + "\n", + "RemoteData (151297)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151283->N151297\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151300\n", + "\n", + "FolderData (151300)\n", + "\n", + "\n", + "\n", + "N151283->N151300\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151283->N151303\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n", + "N151298\n", + "\n", + "RemoteData (151298)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151288->N151298\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151301\n", + "\n", + "FolderData (151301)\n", + "\n", + "\n", + "\n", + "N151288->N151301\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151288->N151304\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n", + "N151299\n", + "\n", + "RemoteData (151299)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151293->N151299\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151302\n", + "\n", + "FolderData (151302)\n", + "\n", + "\n", + "\n", + "N151293->N151302\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151293->N151305\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n", + "N151303->N151311\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__emt_results__s_0\n", + "\n", + "\n", + "\n", + "N151304->N151311\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__emt_results__s_1\n", + "\n", + "\n", + "\n", + "N151305->N151311\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__emt_results__s_2\n", + "\n", + "\n", + "\n", + "N151313\n", + "\n", + "RemoteData (151313)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151311->N151313\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151314\n", + "\n", + "FolderData (151314)\n", + "\n", + "\n", + "\n", + "N151311->N151314\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151315\n", + "\n", + "Dict (151315)\n", + "\n", + "\n", + "\n", + "N151311->N151315\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from aiida_workgraph.utils import generate_node_graph\n", + "\n", + "#------------------------- Generate node graph -------------------\n", + "generate_node_graph(wg.pk)" + ] + }, + { + "cell_type": "markdown", + "id": "a1cbc140", + "metadata": {}, + "source": [ + "## Retrieve additional files from the remote computer\n", + "Sometimes, one may want to retrieve additional files from the remote computer after the job has finished. For example, one may want to retrieve the output files generated by the `pw.x` calculation in Quantum ESPRESSO.\n", + "\n", + "One can use the `additional_retrieve_list` parameter to specify which files should be retrieved from the working directory and stored in the local repository after the job has finished" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "id": "e698190c", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WorkGraph process created, PK: 151317\n", + "File in the local repository: ['_scheduler-stderr.txt', '_scheduler-stdout.txt', 'aiida.out', 'result.txt', 'results.pickle']\n" + ] + } + ], + "source": [ + "from aiida_workgraph import WorkGraph\n", + "\n", + "def add(x, y):\n", + " z = x + y\n", + " with open(\"result.txt\", \"w\") as f:\n", + " f.write(str(z))\n", + " return x + y\n", + "\n", + "wg = WorkGraph(\"test_PythonJob_retrieve_files\")\n", + "wg.add_task(\"PythonJob\", function=add, name=\"add\")\n", + "# ------------------------- Submit the calculation -------------------\n", + "wg.submit(\n", + " inputs={\n", + " \"add\": {\n", + " \"x\": 2,\n", + " \"y\": 3,\n", + " \"computer\": \"localhost\",\n", + " \"metadata\": {\n", + " \"options\": {\n", + " \"additional_retrieve_list\": [\"result.txt\"],\n", + " }\n", + " }\n", + " },\n", + " },\n", + " wait=True,\n", + ")\n", + "\n", + "# ------------------------- Print the output -------------------------\n", + "filenames = wg.tasks['add'].outputs['retrieved'].value.list_object_names()\n", + "print(\"File in the local repository: \", filenames)" + ] + }, + { + "cell_type": "markdown", + "id": "fe376995", + "metadata": {}, + "source": [ + "We can see that the `result.txt` file is retrieved from the remote computer and stored in the local repository.\n", + "\n", + "## Exit Code\n", + "\n", + "The `PythonJob` task includes a built-in output socket, `exit_code`, which serves as a mechanism for error handling and status reporting during task execution. This `exit_code` is an integer value where `0` indicates a successful completion, and any non-zero value signals that an error occurred.\n", + "\n", + "### How it Works:\n", + "When the function returns a dictionary with an `exit_code` key, the system automatically parses and uses this code to indicate the task's status. In the case of an error, the non-zero `exit_code` value helps identify the specific problem.\n", + "\n", + "\n", + "### Benefits of `exit_code`:\n", + "\n", + "1. **Error Reporting:** \n", + " If the task encounters an error, the `exit_code` can communicate the reason. This is helpful during process inspection to determine why a task failed.\n", + "\n", + "2. **Error Handling and Recovery:** \n", + " You can utilize `exit_code` to add specific error handlers for particular exit codes. This allows you to modify the task's parameters and restart it.\n", + "\n", + "\n", + "Below is an example Python function that uses `exit_code` to handle potential errors:" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "id": "a96cbbcb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WorkGraph process created, PK: 151330\n", + "exit status: 410\n", + "exit message: Sum is negative\n" + ] + } + ], + "source": [ + "from aiida_workgraph import WorkGraph, task\n", + "\n", + "@task.pythonjob(outputs=[{\"name\": \"sum\"}])\n", + "def add(x: int, y: int) -> int:\n", + " sum = x + y\n", + " if sum < 0:\n", + " exit_code = {\"status\": 410, \"message\": \"Sum is negative\"}\n", + " return {\"sum\": sum, \"exit_code\": exit_code}\n", + " return {\"sum\": sum}\n", + "\n", + "wg = WorkGraph(\"test_PythonJob\")\n", + "wg.add_task(add, name=\"add\", x=1, y=-2)\n", + "wg.submit(wait=True)\n", + "\n", + "print(\"exit status: \", wg.tasks[\"add\"].node.exit_status)\n", + "print(\"exit message: \", wg.tasks[\"add\"].node.exit_message)" + ] + }, + { + "cell_type": "markdown", + "id": "8d4d935b", + "metadata": {}, + "source": [ + "In this example, the task failed with `exit_code = 410` due to the condition `Sum is negative`, which is also reflected in the state message.\n", + "\n", + "## Error-handling with `exit_code`\n", + "\n", + "One can register error handlers for specific exit codes to handle errors gracefully. This allows for customized error recovery strategies based on the specific error encountered.\n", + "\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "id": "d9ab42a3", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WorkGraph process created, PK: 151342\n", + "exit status: 0\n", + "exit message: None\n" + ] + } + ], + "source": [ + "\n", + "def handle_negative_sum(task) -> str:\n", + " \"\"\"Handle the failure code 410 of the `add`.\n", + " Simply make the inputs positive by taking the absolute value.\n", + " \"\"\"\n", + " # modify task inputs\n", + " task.set({\"x\": abs(task.inputs[\"x\"].value),\n", + " \"y\": abs(task.inputs[\"y\"].value)})\n", + " \n", + " msg = \"Run error handler: handle_negative_sum.\"\n", + " return msg\n", + "\n", + "\n", + "@task.pythonjob(outputs=[{\"name\": \"sum\"}],\n", + " error_handlers=[{\"handler\": handle_negative_sum,\n", + " \"exit_codes\": [410],\n", + " \"max_retries\": 5}])\n", + "@task.pythonjob(outputs=[{\"name\": \"sum\"}])\n", + "def add(x: int, y: int) -> int:\n", + " sum = x + y\n", + " if sum < 0:\n", + " exit_code = {\"status\": 410, \"message\": \"Sum is negative\"}\n", + " return {\"sum\": sum, \"exit_code\": exit_code}\n", + " return {\"sum\": sum}\n", + "\n", + "wg = WorkGraph(\"test_PythonJob\")\n", + "wg.add_task(add, name=\"add1\", x=1, y=-2, computer=\"localhost\")\n", + "wg.submit(wait=True)\n", + "print(\"exit status: \", wg.tasks[\"add1\"].node.exit_status)\n", + "print(\"exit message: \", wg.tasks[\"add1\"].node.exit_message)" + ] + }, + { + "cell_type": "markdown", + "id": "0c100bb7", + "metadata": {}, + "source": [ + "We can confirm that the task first fails again with a 410. Then the WorkGraph restarts the task with the new inputs, and it finishes successfully. " + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "id": "e06bf489", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[22mWorkGraph<151342> Finished [0]\n", + " ├── PythonJob<151349> Finished [410]\n", + " └── PythonJob<151360> Finished [0]\u001b[0m\n" + ] + } + ], + "source": [ + "%verdi process status {wg.pk}" + ] + }, + { + "cell_type": "markdown", + "id": "682fec82", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Define your data serializer\n", + "Workgraph search data serializer from the `aiida.data` entry point by the module name and class name (e.g., `ase.atoms.Atoms`). \n", + "\n", + "In order to let the workgraph find the serializer, you must register the AiiDA data with the following format:\n", + "```\n", + "[project.entry-points.\"aiida.data\"]\n", + "abc.ase.atoms.Atoms = \"abc.xyz:MyAtomsData\"\n", + "```\n", + "This will register a data serializer for `ase.atoms.Atoms` data. `abc` is the plugin name, module name is `xyz`, and the AiiDA data class name is `AtomsData`. Learn how to create a AiiDA data [here](https://aiida.readthedocs.io/projects/aiida-core/en/stable/topics/data_types.html#adding-support-for-custom-data-types).\n", + "\n", + "\n", + "### Avoid duplicate data serializer\n", + "If you have multiple plugins that register the same data serializer, the workgraph will raise an error. You can avoid this by selecting the plugin that you want to use in the configuration file.\n", + "\n", + "```json\n", + "{\n", + " \"serializers\": {\n", + " \"ase.atoms.Atoms\": \"abc.ase.atoms.Atoms\"\n", + " },\n", + "}\n", + "```\n", + "\n", + "Save the configuration file as `workgraph.json` in the aiida configuration directory (by default, `~/.aiida` directory).\n", + "\n", + "\n", + "## Use PythonJob outside WorkGraph\n", + "One can use the `PythonJob` task outside the WorkGraph to run a Python function on a remote computer. For example, in a `WorkChain` or run a single `CalcJob` calculation.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "9a1fa5e6", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Result: 3\n" + ] + } + ], + "source": [ + "from aiida import orm, load_profile\n", + "from aiida.engine import run_get_node\n", + "from aiida_workgraph.calculations.python import PythonJob\n", + "\n", + "load_profile()\n", + "\n", + "python_code = orm.load_code(\"python3@localhost\")\n", + "\n", + "def add(x, y):\n", + " return x + y\n", + "\n", + "result, node = run_get_node(PythonJob, code=python_code,\n", + " function=add,\n", + " function_kwargs = {\"x\": orm.Int(1), \"y\": orm.Int(2)},\n", + " function_outputs=[{\"name\": \"add\"}])\n", + "\n", + "print(\"Result: \", result[\"add\"].value)\n" + ] + }, + { + "cell_type": "markdown", + "id": "4fb22545", + "metadata": {}, + "source": [ + "You can see more details on any process, including its inputs and outputs, using the verdi command:" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "86e74979", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[22mProperty Value\n", + "----------- ------------------------------------\n", + "type PythonJob\n", + "state Finished [0]\n", + "pk 151415\n", + "uuid ff25998c-98d9-4d56-995a-fe9ecd66468a\n", + "label PythonJob\n", + "description\n", + "ctime 2024-09-13 10:46:05.231456+02:00\n", + "mtime 2024-09-13 10:46:08.263554+02:00\n", + "computer [1] localhost\n", + "\n", + "Inputs PK Type\n", + "---------------- ------ ---------------\n", + "function_kwargs\n", + " x 151412 Int\n", + " y 151413 Int\n", + "code 42316 InstalledCode\n", + "function 151411 PickledFunction\n", + "function_outputs 151414 List\n", + "\n", + "Outputs PK Type\n", + "------------- ------ ----------\n", + "add 151419 Int\n", + "remote_folder 151417 RemoteData\n", + "retrieved 151418 FolderData\u001b[0m\n" + ] + } + ], + "source": [ + "%verdi process show {node.pk}" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3.10.4 ('scinode')", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.0" + }, + "vscode": { + "interpreter": { + "hash": "2f450c1ff08798c4974437dd057310afef0de414c25d1fd960ad375311c3f6ff" + } + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/source/sg_execution_times.rst b/docs/source/sg_execution_times.rst new file mode 100644 index 0000000..276b6e4 --- /dev/null +++ b/docs/source/sg_execution_times.rst @@ -0,0 +1,37 @@ + +:orphan: + +.. _sphx_glr_sg_execution_times: + + +Computation times +================= +**00:22.087** total execution time for 1 file **from all galleries**: + +.. container:: + + .. raw:: html + + + + + + + + .. list-table:: + :header-rows: 1 + :class: table table-striped sg-datatable + + * - Example + - Time + - Mem (MB) + * - :ref:`sphx_glr_autogen_how_to.py` (``../gallery/autogen/how_to.py``) + - 00:22.087 + - 0.0 diff --git a/docs/source/tutorial/dft.ipynb b/docs/source/tutorial/dft.ipynb new file mode 100644 index 0000000..f462dd9 --- /dev/null +++ b/docs/source/tutorial/dft.ipynb @@ -0,0 +1,1128 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "id": "22d177dc-6cfb-4de2-9509-f1eb45e10cf2", + "metadata": {}, + "source": [ + "# DFT Calculation with ASE Calculator\n", + "## Introduction\n", + "\n", + "The `PythonJob` is a built-in task that allows users to run Python functions on a remote computer. For instance, users can use ASE's calculator to run a DFT calculation on a remote computer directly. Users only need to write normal Python code, and the WorkGraph will handle the data transformation to AiiDA data.\n", + "\n", + "The following examples are running with [AiiDA-WorkGraph](https://aiida-workgraph.readthedocs.io/en/latest/)." + ] + }, + { + "cell_type": "markdown", + "id": "2174a45e", + "metadata": {}, + "source": [ + "## First Real-world Workflow: atomization energy of molecule\n", + "\n", + "The atomization energy, $\\Delta E$, of a molecule can be expressed as:\n", + "\n", + "$$\n", + "\\Delta E = n_{\\text{atom}} \\times E_{\\text{atom}} - E_{\\text{molecule}}\n", + "$$\n", + "\n", + "Where:\n", + "- $\\Delta E$ is the atomization energy of the molecule.\n", + "- $n_{\\text{atom}}$ is the number of atoms.\n", + "- $E_{\\text{atom}}$ is the energy of an isolated atom.\n", + "- $E_{\\text{molecule}}$ is the energy of the molecule.\n", + "\n", + "\n", + "### Define a task to calculate the energy of the atoms using EMT potential" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "713da634", + "metadata": {}, + "outputs": [], + "source": [ + "from aiida_workgraph import task, WorkGraph\n", + "\n", + "def emt(atoms):\n", + " from ase.calculators.emt import EMT\n", + " atoms.calc = EMT()\n", + " energy = atoms.get_potential_energy()\n", + " return energy\n", + "\n", + "\n", + "def atomization_energy(mol, energy_molecule, energy_atom):\n", + " energy = energy_atom*len(mol) - energy_molecule\n", + " return energy\n" + ] + }, + { + "cell_type": "markdown", + "id": "00a7531e", + "metadata": {}, + "source": [ + "### Define a workgraph\n" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "a81fa9e0", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 2, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "wg = WorkGraph(\"atomization_energy\")\n", + "pw_atom = wg.add_task(\"PythonJob\", function=emt, name=\"emt_atom\")\n", + "pw_mol = wg.add_task(\"PythonJob\", function=emt, name=\"emt_mol\")\n", + "wg.add_task(\"PythonJob\", function=atomization_energy, name=\"atomization_energy\",\n", + " energy_atom=pw_atom.outputs[\"result\"],\n", + " energy_molecule=pw_mol.outputs[\"result\"])\n", + "wg.to_html()" + ] + }, + { + "cell_type": "markdown", + "id": "b686f3ba", + "metadata": {}, + "source": [ + "### Prepare the inputs and submit the workflow" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "11e3bca1-dda6-44e9-9585-54feeda7e7db", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WorkGraph process created, PK: 151193\n", + "Energy of a N atom: 5.100\n", + "Energy of an un-relaxed N2 molecule: 0.549\n", + "Atomization energy: 9.651 eV\n" + ] + }, + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "N151193\n", + "\n", + "WorkGraph<atomization_energy> (151193)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151199\n", + "\n", + "PythonJob<emt_mol> (151199)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151193->N151199\n", + "\n", + "\n", + "CALL_CALC\n", + "emt_mol\n", + "\n", + "\n", + "\n", + "N151205\n", + "\n", + "PythonJob<emt_atom> (151205)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151193->N151205\n", + "\n", + "\n", + "CALL_CALC\n", + "emt_atom\n", + "\n", + "\n", + "\n", + "N151219\n", + "\n", + "PythonJob<atomization_energy> (151219)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151193->N151219\n", + "\n", + "\n", + "CALL_CALC\n", + "atomization_energy\n", + "\n", + "\n", + "\n", + "N151224\n", + "\n", + "Int (151224)\n", + "\n", + "\n", + "\n", + "N151193->N151224\n", + "\n", + "\n", + "RETURN\n", + "execution_count\n", + "\n", + "\n", + "\n", + "N151208\n", + "\n", + "RemoteData (151208)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151199->N151208\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151210\n", + "\n", + "FolderData (151210)\n", + "\n", + "\n", + "\n", + "N151199->N151210\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151212\n", + "\n", + "GeneralData (151212)\n", + "\n", + "\n", + "\n", + "N151199->N151212\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n", + "N151209\n", + "\n", + "RemoteData (151209)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151205->N151209\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151211\n", + "\n", + "FolderData (151211)\n", + "\n", + "\n", + "\n", + "N151205->N151211\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151213\n", + "\n", + "GeneralData (151213)\n", + "\n", + "\n", + "\n", + "N151205->N151213\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n", + "N151212->N151219\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__energy_molecule\n", + "\n", + "\n", + "\n", + "N151213->N151219\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__energy_atom\n", + "\n", + "\n", + "\n", + "N151221\n", + "\n", + "RemoteData (151221)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151219->N151221\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151222\n", + "\n", + "FolderData (151222)\n", + "\n", + "\n", + "\n", + "N151219->N151222\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151223\n", + "\n", + "GeneralData (151223)\n", + "\n", + "\n", + "\n", + "N151219->N151223\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from ase.build import molecule\n", + "from ase import Atoms\n", + "\n", + "load_profile()\n", + "\n", + "# create input structure\n", + "n_atom = Atoms(\"N\", pbc=True)\n", + "n_atom.center(vacuum=5.0)\n", + "n2_molecule = molecule(\"N2\", pbc=True)\n", + "n2_molecule.center(vacuum=5.0)\n", + "\n", + "\n", + "#------------------------- Set the inputs -------------------------\n", + "wg.tasks[\"emt_atom\"].set({\"atoms\": n_atom, \"computer\": \"localhost\"})\n", + "wg.tasks[\"emt_mol\"].set({\"atoms\": n2_molecule, \"computer\": \"localhost\"})\n", + "wg.tasks[\"atomization_energy\"].set({\"mol\": n2_molecule, \"computer\": \"localhost\"})\n", + "#------------------------- Submit the calculation -------------------\n", + "wg.submit(wait=True, timeout=200)\n", + "#------------------------- Print the output -------------------------\n", + "print('Energy of a N atom: {:0.3f}'.format(wg.tasks['emt_atom'].outputs[\"result\"].value.value))\n", + "print('Energy of an un-relaxed N2 molecule: {:0.3f}'.format(wg.tasks['emt_mol'].outputs[\"result\"].value.value))\n", + "print('Atomization energy: {:0.3f} eV'.format(wg.tasks['atomization_energy'].outputs[\"result\"].value.value))\n", + "#------------------------- Generate node graph -------------------\n", + "generate_node_graph(wg.pk)\n" + ] + }, + { + "cell_type": "markdown", + "id": "a04617ca", + "metadata": {}, + "source": [ + "\n", + "\n", + "## Second Real-world Workflow: Equation of state (EOS) WorkGraph\n", + "\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "dd00841a", + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + " \n", + " " + ], + "text/plain": [ + "" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from aiida_workgraph import WorkGraph, task\n", + "from ase.build import bulk\n", + "from ase import Atoms\n", + "from aiida import load_profile\n", + "\n", + "load_profile()\n", + "\n", + "@task(outputs=[{\"name\": \"scaled_atoms\", \"identifier\": \"workgraph.namespace\"},\n", + " {\"name\": \"volumes\"}]\n", + ")\n", + "def generate_scaled_atoms(atoms: Atoms, scales: list) -> dict:\n", + " \"\"\"Scale the structure by the given scales.\"\"\"\n", + " volumes = {}\n", + " scaled_atoms = {}\n", + " for i in range(len(scales)):\n", + " atoms1 = atoms.copy()\n", + " atoms1.set_cell(atoms.cell * scales[i], scale_atoms=True)\n", + " scaled_atoms[f\"s_{i}\"] = atoms1\n", + " volumes[f\"s_{i}\"] = atoms1.get_volume()\n", + " return {\"scaled_atoms\": scaled_atoms, \"volumes\": volumes}\n", + "\n", + "@task()\n", + "def emt(atoms):\n", + " from ase.calculators.emt import EMT\n", + " atoms.calc = EMT()\n", + " energy = atoms.get_potential_energy()\n", + " return {\"energy\": energy}\n", + "\n", + "# Output result from context to the output socket\n", + "@task.graph_builder(outputs=[{\"name\": \"results\", \"from\": \"context.results\"}])\n", + "def calculate_enegies(scaled_atoms):\n", + " \"\"\"Run the scf calculation for each structure.\"\"\"\n", + " from aiida_workgraph import WorkGraph\n", + " wg = WorkGraph()\n", + " for key, atoms in scaled_atoms.items():\n", + " emt1 = wg.add_task(\"PythonJob\", function=emt, name=f\"emt1_{key}\", atoms=atoms)\n", + " emt1.set({\"computer\": \"localhost\"})\n", + " # save the output parameters to the context\n", + " emt1.set_context({\"result\": f\"results.{key}\"})\n", + " return wg\n", + "\n", + "\n", + "@task()\n", + "def fit_eos(volumes: dict, emt_results: dict) -> dict:\n", + " \"\"\"Fit the EOS of the data.\"\"\"\n", + " from ase.eos import EquationOfState\n", + " from ase.units import kJ\n", + "\n", + " volumes_list = []\n", + " energies = []\n", + " for key, data in emt_results.items():\n", + " energy = data[\"energy\"]\n", + " energies.append(energy)\n", + " volumes_list.append(volumes[key])\n", + " #\n", + " eos = EquationOfState(volumes_list, energies)\n", + " v0, e0, B = eos.fit()\n", + " # convert B to GPa\n", + " B = B / kJ * 1.0e24\n", + " eos = {\"energy unit\": \"eV\", \"v0\": v0, \"e0\": e0, \"B\": B}\n", + " return eos\n", + "\n", + "atoms = bulk(\"Au\", cubic=True)\n", + "\n", + "wg = WorkGraph(\"pythonjob_eos_emt\")\n", + "scale_atoms_task = wg.add_task(\"PythonJob\",\n", + " function=generate_scaled_atoms,\n", + " name=\"scale_atoms\",\n", + " atoms=atoms,\n", + " )\n", + " # -------- calculate_enegies -----------\n", + "calculate_enegies_task = wg.add_task(calculate_enegies,\n", + " name=\"calculate_enegies\",\n", + " scaled_atoms=scale_atoms_task.outputs[\"scaled_atoms\"],\n", + " )\n", + " # -------- fit_eos -----------\n", + "wg.add_task(\"PythonJob\",\n", + " function=fit_eos,\n", + " name=\"fit_eos\",\n", + " volumes=scale_atoms_task.outputs[\"volumes\"],\n", + " emt_results=calculate_enegies_task.outputs[\"results\"],\n", + " )\n", + "wg.to_html()" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "id": "3d8072ac", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "WorkGraph process created, PK: 151263\n", + "The fitted EOS parameters are:\n" + ] + }, + { + "data": { + "text/plain": [ + "{'B': 167.61300824791,\n", + " 'e0': 0.006458727465855,\n", + " 'v0': 67.197735262521,\n", + " 'energy unit': 'eV'}" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "\n", + "\n", + "wg.submit(\n", + " inputs={\"scale_atoms\": {\"atoms\": atoms,\n", + " \"scales\": [0.95, 1.0, 1.05],\n", + " \"computer\": \"localhost\"},\n", + " \"fit_eos\": {\"computer\": \"localhost\"}},\n", + " wait=True,\n", + " )\n", + "\n", + "print(\"The fitted EOS parameters are:\")\n", + "wg.tasks[\"fit_eos\"].outputs[\"result\"].value.value\n" + ] + }, + { + "cell_type": "markdown", + "id": "8321bb88", + "metadata": {}, + "source": [ + "Generate the node graph and check the data provenance." + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "id": "1f802430", + "metadata": {}, + "outputs": [ + { + "data": { + "image/svg+xml": [ + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "\n", + "N151263\n", + "\n", + "WorkGraph<pythonjob_eos_emt> (151263)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151270\n", + "\n", + "PythonJob<scale_atoms> (151270)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151263->N151270\n", + "\n", + "\n", + "CALL_CALC\n", + "scale_atoms\n", + "\n", + "\n", + "\n", + "N151278\n", + "\n", + "WorkGraph<calculate_enegies> (151278)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151263->N151278\n", + "\n", + "\n", + "CALL_WORK\n", + "calculate_enegies\n", + "\n", + "\n", + "\n", + "N151311\n", + "\n", + "PythonJob<fit_eos> (151311)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151263->N151311\n", + "\n", + "\n", + "CALL_CALC\n", + "fit_eos\n", + "\n", + "\n", + "\n", + "N151316\n", + "\n", + "Int (151316)\n", + "\n", + "\n", + "\n", + "N151263->N151316\n", + "\n", + "\n", + "RETURN\n", + "execution_count\n", + "\n", + "\n", + "\n", + "N151272\n", + "\n", + "RemoteData (151272)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151270->N151272\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151273\n", + "\n", + "FolderData (151273)\n", + "\n", + "\n", + "\n", + "N151270->N151273\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151274\n", + "\n", + "AtomsData (151274)\n", + "\n", + "\n", + "\n", + "N151270->N151274\n", + "\n", + "\n", + "CREATE\n", + "scaled_atoms__s_0\n", + "\n", + "\n", + "\n", + "N151275\n", + "\n", + "AtomsData (151275)\n", + "\n", + "\n", + "\n", + "N151270->N151275\n", + "\n", + "\n", + "CREATE\n", + "scaled_atoms__s_1\n", + "\n", + "\n", + "\n", + "N151276\n", + "\n", + "AtomsData (151276)\n", + "\n", + "\n", + "\n", + "N151270->N151276\n", + "\n", + "\n", + "CREATE\n", + "scaled_atoms__s_2\n", + "\n", + "\n", + "\n", + "N151277\n", + "\n", + "Dict (151277)\n", + "\n", + "\n", + "\n", + "N151270->N151277\n", + "\n", + "\n", + "CREATE\n", + "volumes\n", + "\n", + "\n", + "\n", + "N151274->N151278\n", + "\n", + "\n", + "INPUT_WORK\n", + "wg__tasks__emt1_s_0__inputs__atoms__property__value\n", + "\n", + "\n", + "\n", + "N151283\n", + "\n", + "PythonJob<emt1_s_0> (151283)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151274->N151283\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__atoms\n", + "\n", + "\n", + "\n", + "N151275->N151278\n", + "\n", + "\n", + "INPUT_WORK\n", + "wg__tasks__emt1_s_1__inputs__atoms__property__value\n", + "\n", + "\n", + "\n", + "N151288\n", + "\n", + "PythonJob<emt1_s_1> (151288)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151275->N151288\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__atoms\n", + "\n", + "\n", + "\n", + "N151276->N151278\n", + "\n", + "\n", + "INPUT_WORK\n", + "wg__tasks__emt1_s_2__inputs__atoms__property__value\n", + "\n", + "\n", + "\n", + "N151293\n", + "\n", + "PythonJob<emt1_s_2> (151293)\n", + "State: finished\n", + "Exit Code: 0\n", + "\n", + "\n", + "\n", + "N151276->N151293\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__atoms\n", + "\n", + "\n", + "\n", + "N151277->N151311\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__volumes\n", + "\n", + "\n", + "\n", + "N151278->N151283\n", + "\n", + "\n", + "CALL_CALC\n", + "emt1_s_0\n", + "\n", + "\n", + "\n", + "N151278->N151288\n", + "\n", + "\n", + "CALL_CALC\n", + "emt1_s_1\n", + "\n", + "\n", + "\n", + "N151278->N151293\n", + "\n", + "\n", + "CALL_CALC\n", + "emt1_s_2\n", + "\n", + "\n", + "\n", + "N151303\n", + "\n", + "Dict (151303)\n", + "\n", + "\n", + "\n", + "N151278->N151303\n", + "\n", + "\n", + "RETURN\n", + "results__s_0\n", + "\n", + "\n", + "\n", + "N151304\n", + "\n", + "Dict (151304)\n", + "\n", + "\n", + "\n", + "N151278->N151304\n", + "\n", + "\n", + "RETURN\n", + "results__s_1\n", + "\n", + "\n", + "\n", + "N151305\n", + "\n", + "Dict (151305)\n", + "\n", + "\n", + "\n", + "N151278->N151305\n", + "\n", + "\n", + "RETURN\n", + "results__s_2\n", + "\n", + "\n", + "\n", + "N151306\n", + "\n", + "Int (151306)\n", + "\n", + "\n", + "\n", + "N151278->N151306\n", + "\n", + "\n", + "RETURN\n", + "execution_count\n", + "\n", + "\n", + "\n", + "N151297\n", + "\n", + "RemoteData (151297)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151283->N151297\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151300\n", + "\n", + "FolderData (151300)\n", + "\n", + "\n", + "\n", + "N151283->N151300\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151283->N151303\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n", + "N151298\n", + "\n", + "RemoteData (151298)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151288->N151298\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151301\n", + "\n", + "FolderData (151301)\n", + "\n", + "\n", + "\n", + "N151288->N151301\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151288->N151304\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n", + "N151299\n", + "\n", + "RemoteData (151299)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151293->N151299\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151302\n", + "\n", + "FolderData (151302)\n", + "\n", + "\n", + "\n", + "N151293->N151302\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151293->N151305\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n", + "N151303->N151311\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__emt_results__s_0\n", + "\n", + "\n", + "\n", + "N151304->N151311\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__emt_results__s_1\n", + "\n", + "\n", + "\n", + "N151305->N151311\n", + "\n", + "\n", + "INPUT_CALC\n", + "function_kwargs__emt_results__s_2\n", + "\n", + "\n", + "\n", + "N151313\n", + "\n", + "RemoteData (151313)\n", + "@localhost\n", + "\n", + "\n", + "\n", + "N151311->N151313\n", + "\n", + "\n", + "CREATE\n", + "remote_folder\n", + "\n", + "\n", + "\n", + "N151314\n", + "\n", + "FolderData (151314)\n", + "\n", + "\n", + "\n", + "N151311->N151314\n", + "\n", + "\n", + "CREATE\n", + "retrieved\n", + "\n", + "\n", + "\n", + "N151315\n", + "\n", + "Dict (151315)\n", + "\n", + "\n", + "\n", + "N151311->N151315\n", + "\n", + "\n", + "CREATE\n", + "result\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from aiida_workgraph.utils import generate_node_graph\n", + "\n", + "#------------------------- Generate node graph -------------------\n", + "generate_node_graph(wg.pk)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "aiida", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.0" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/docs/source/tutorial/html/atomization_energy.html b/docs/source/tutorial/html/atomization_energy.html new file mode 100644 index 0000000..6f0bd6a --- /dev/null +++ b/docs/source/tutorial/html/atomization_energy.html @@ -0,0 +1,290 @@ + + + + + + + Rete.js with React in Vanilla JS + + + + + + + + + + + + + + + + + + + + + +
+ + + diff --git a/docs/source/tutorial/html/pythonjob_eos_emt.html b/docs/source/tutorial/html/pythonjob_eos_emt.html new file mode 100644 index 0000000..6a4a448 --- /dev/null +++ b/docs/source/tutorial/html/pythonjob_eos_emt.html @@ -0,0 +1,290 @@ + + + + + + + Rete.js with React in Vanilla JS + + + + + + + + + + + + + + + + + + + + + +
+ + + diff --git a/docs/source/tutorial/index.rst b/docs/source/tutorial/index.rst new file mode 100644 index 0000000..be42331 --- /dev/null +++ b/docs/source/tutorial/index.rst @@ -0,0 +1,11 @@ + +Tutorials +=========================================== + +In this tutorials, you will see severl examples in real applications. + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + dft diff --git a/examples/test_add.py b/examples/test_add.py new file mode 100644 index 0000000..314cc13 --- /dev/null +++ b/examples/test_add.py @@ -0,0 +1,15 @@ +from aiida_pythonjob import prepare_pythonjob_inputs, PythonJob +from aiida.engine import run +from aiida import load_profile + +load_profile() + + +def add(x, y): + return x + y + + +inputs = prepare_pythonjob_inputs(add, function_inputs={"x": 1, "y": 2}, + function_outputs=[{"name": "add"}], + computer="localhost") +run(PythonJob, inputs=inputs) \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..7fa459c --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,145 @@ +[build-system] +requires = ["hatchling"] +build-backend = "hatchling.build" + +[project] +# See https://www.python.org/dev/peps/pep-0621/ +name = "aiida-pythonjob" +dynamic = ["version"] # read from aiida_pythonjob/src/__init__.py +description = "Run Python functions on a remote computer." +authors = [{name = "Xing Wang", email = "xingwang1991@gmail.com"}] +readme = "README.md" +license = {file = "LICENSE"} +classifiers = [ + "Programming Language :: Python", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: MIT License", + "Natural Language :: English", + "Development Status :: 3 - Alpha", + "Framework :: AiiDA" +] +keywords = ["aiida", "plugin"] +requires-python = ">=3.9" +dependencies = [ + "aiida-core>=2.3,<3", + "voluptuous" +] + +[project.optional-dependencies] +pre-commit = [ + 'pre-commit~=3.5', +] +docs = [ + "sphinx", + "sphinxcontrib-contentui", + "sphinxcontrib-details-directive", + "furo", + "markupsafe<2.1" +] + +[project.urls] +Source = "https://github.com/aiidateam/aiida-pythonjob" + +[project.entry-points."aiida.data"] +"pythonjob.pickled_data" = "aiida_pythonjob.data.pickled_data:PickledData" +"pythonjob.pickled_function" = "aiida_pythonjob.data.pickled_function:PickledFunction" + +[project.entry-points."aiida.calculations"] +"pythonjob.pythonjob" = "aiida_pythonjob.calculations.pythonjob:PythonJob" + +[project.entry-points."aiida.parsers"] +"pythonjob.pythonjob" = "aiida_pythonjob.parsers.pythonjob:PythonJobParser" + + +[tool.pytest.ini_options] +# Configuration for [pytest](https://docs.pytest.org) +python_files = "test_*.py example_*.py" +addopts = "--pdbcls=IPython.terminal.debugger:TerminalPdb" +filterwarnings = [ + "ignore::DeprecationWarning:aiida:", + "ignore:Creating AiiDA configuration folder:", + "ignore::DeprecationWarning:plumpy:", + "ignore::DeprecationWarning:yaml:", +] + + +[tool.coverage.run] +# Configuration of [coverage.py](https://coverage.readthedocs.io) +# reporting which lines of your plugin are covered by tests +source = ["src/aiida_pythonjob"] + +[tool.ruff] +line-length = 120 + +[tool.ruff.lint] +ignore = [ + 'F403', # Star imports unable to detect undefined names + 'F405', # Import may be undefined or defined from star imports + 'PLR0911', # Too many return statements + 'PLR0912', # Too many branches + 'PLR0913', # Too many arguments in function definition + 'PLR0915', # Too many statements + 'PLR2004', # Magic value used in comparison + 'RUF005', # Consider iterable unpacking instead of concatenation + 'RUF012' # Mutable class attributes should be annotated with `typing.ClassVar` +] +select = [ + 'E', # pydocstyle + 'W', # pydocstyle + 'F', # pyflakes + 'I', # isort + 'N', # pep8-naming + 'PLC', # pylint-convention + 'PLE', # pylint-error + 'PLR', # pylint-refactor + 'PLW', # pylint-warning + 'RUF' # ruff +] + +## Hatch configurations + +[tool.hatch.version] +path = "src/aiida_pythonjob/__init__.py" + +[tool.hatch.envs.hatch-test] +dependencies = [ + 'pgtest~=1.3,>=1.3.1', + 'coverage~=7.0', + 'pytest~=7.0', + "pytest-cov~=4.1", + "ipdb" +] + +[tool.hatch.envs.hatch-test.scripts] +# These are the efault scripts provided by hatch. +# The have been copied to make the execution more transparent + +# This command is run with the command `hatch test` +run = "pytest{env:HATCH_TEST_ARGS:} {args}" +# The three commands below are run with the command `hatch test --coverage` +run-cov = "coverage run -m pytest{env:HATCH_TEST_ARGS:} {args}" +cov-combine = "coverage combine" +cov-report = "coverage report" + +[[tool.hatch.envs.hatch-test.matrix]] +python = ["3.9", "3.10", "3.11", "3.12"] + +[tool.hatch.envs.hatch-static-analysis] +dependencies = ["ruff==0.4.3"] + +[tool.hatch.envs.hatch-static-analysis.scripts] +# Fixes are executed with `hatch fmt`. +# Checks are executed with `hatch fmt --check`. + +format-check = "ruff format --check --config pyproject.toml {args:.}" +format-fix = "ruff format --config pyproject.toml {args:.}" +lint-check = "ruff check --config pyproject.toml {args:.}" +lint-fix = "ruff check --config pyproject.toml --fix --exit-non-zero-on-fix --show-fixes {args:.}" + +[tool.hatch.envs.docs] +features = ["docs"] + +[tool.hatch.envs.docs.scripts] +build = [ + "make -C docs" +] diff --git a/src/aiida_pythonjob/__init__.py b/src/aiida_pythonjob/__init__.py new file mode 100644 index 0000000..b0bedf5 --- /dev/null +++ b/src/aiida_pythonjob/__init__.py @@ -0,0 +1,15 @@ +"""AiiDA plugin that run Python function on remote computers.""" +__version__ = '0.1.0' + +from .calculations import PythonJob +from .data import PickledData, PickledFunction +from .parsers import PythonJobParser +from .launch import prepare_pythonjob_inputs + +__all__ = ( + 'PythonJob', + 'PickledData', + 'PickledFunction', + 'prepare_pythonjob_inputs', + 'PythonJobParser', +) diff --git a/src/aiida_pythonjob/calculations/__init__.py b/src/aiida_pythonjob/calculations/__init__.py new file mode 100644 index 0000000..f828963 --- /dev/null +++ b/src/aiida_pythonjob/calculations/__init__.py @@ -0,0 +1,3 @@ +from .pythonjob import PythonJob + +__all__ = ('PythonJob',) diff --git a/src/aiida_pythonjob/calculations/pythonjob.py b/src/aiida_pythonjob/calculations/pythonjob.py new file mode 100644 index 0000000..f896ec6 --- /dev/null +++ b/src/aiida_pythonjob/calculations/pythonjob.py @@ -0,0 +1,322 @@ +"""Calcjob to run a Python function on a remote computer.""" +from __future__ import annotations + +import pathlib +import typing as t + +from aiida.common.datastructures import CalcInfo, CodeInfo +from aiida.common.folders import Folder +from aiida.common.extendeddicts import AttributeDict +from aiida.engine import CalcJob, CalcJobProcessSpec +from aiida.orm import ( + Data, + SinglefileData, + Str, + List, + FolderData, + RemoteData, + to_aiida_type, +) +from aiida_pythonjob.data.pickled_function import PickledFunction, to_pickled_function + + +__all__ = ("PythonJob",) + + +class PythonJob(CalcJob): + """Calcjob to run a Python function on a remote computer.""" + + _internal_retrieve_list = [] + _retrieve_singlefile_list = [] + _retrieve_temporary_list = [] + + _DEFAULT_INPUT_FILE = "script.py" + _DEFAULT_OUTPUT_FILE = "aiida.out" + _DEFAULT_PARENT_FOLDER_NAME = "./parent_folder/" + + @classmethod + def define(cls, spec: CalcJobProcessSpec) -> None: # type: ignore[override] + """Define the process specification, including its inputs, outputs and known exit codes. + + :param spec: the calculation job process spec to define. + """ + super().define(spec) + spec.input( + "function", + valid_type=PickledFunction, + serializer=to_pickled_function, + required=False, + ) + spec.input( + "function_source_code", + valid_type=Str, + serializer=to_aiida_type, + required=False, + ) + spec.input( + "function_name", valid_type=Str, serializer=to_aiida_type, required=False + ) + spec.input( + "process_label", valid_type=Str, serializer=to_aiida_type, required=False + ) + spec.input_namespace( + "function_inputs", valid_type=Data, required=False + ) # , serializer=serialize_to_aiida_nodes) + spec.input( + "function_outputs", + valid_type=List, + default=lambda: List(), + required=False, + serializer=to_aiida_type, + help="The information of the output ports", + ) + spec.input( + "parent_folder", + valid_type=(RemoteData, FolderData, SinglefileData), + required=False, + help="Use a local or remote folder as parent folder (for restarts and similar)", + ) + spec.input( + "parent_folder_name", + valid_type=Str, + required=False, + serializer=to_aiida_type, + help="""Default name of the subfolder that you want to create in the working directory, + in which you want to place the files taken from parent_folder""", + ) + spec.input( + "parent_output_folder", + valid_type=Str, + default=None, + required=False, + serializer=to_aiida_type, + help="Name of the subfolder inside 'parent_folder' from which you want to copy the files", + ) + spec.input_namespace( + "upload_files", + valid_type=(FolderData, SinglefileData), + required=False, + help="The folder/files to upload", + ) + spec.input_namespace( + "copy_files", + valid_type=(RemoteData,), + required=False, + help="The folder/files to copy from the remote computer", + ) + spec.input( + "additional_retrieve_list", + valid_type=List, + default=None, + required=False, + serializer=to_aiida_type, + help="The names of the files to retrieve", + ) + spec.outputs.dynamic = True + # set default options (optional) + spec.inputs["metadata"]["options"]["parser_name"].default = "pythonjob.pythonjob" + spec.inputs["metadata"]["options"]["input_filename"].default = "script.py" + spec.inputs["metadata"]["options"]["output_filename"].default = "aiida.out" + spec.inputs["metadata"]["options"]["resources"].default = { + "num_machines": 1, + "num_mpiprocs_per_machine": 1, + } + # start exit codes - marker for docs + spec.exit_code( + 310, + "ERROR_READING_OUTPUT_FILE", + invalidates_cache=True, + message="The output file could not be read.", + ) + spec.exit_code( + 320, + "ERROR_INVALID_OUTPUT", + invalidates_cache=True, + message="The output file contains invalid output.", + ) + spec.exit_code( + 321, + "ERROR_RESULT_OUTPUT_MISMATCH", + invalidates_cache=True, + message="The number of results does not match the number of outputs.", + ) + + def _build_process_label(self) -> str: + """Use the function name as the process label. + + :returns: The process label to use for ``ProcessNode`` instances. + """ + if "process_label" in self.inputs: + return self.inputs.process_label.value + else: + data = self.get_function_data() + return f"PythonJob<{data['name']}>" + + def on_create(self) -> None: + """Called when a Process is created.""" + + super().on_create() + self.node.label = self._build_process_label() + + def get_function_data(self) -> dict[str, t.Any]: + """Get the function data. + + :returns: The function data. + """ + if "function" in self.inputs: + metadata = self.inputs.function.metadata + metadata["source_code"] = ( + metadata["import_statements"] + + "\n" + + metadata["source_code_without_decorator"] + ) + return metadata + else: + return { + "source_code": self.inputs.function_source_code.value, + "name": self.inputs.function_name.value, + } + + def prepare_for_submission(self, folder: Folder) -> CalcInfo: + """Prepare the calculation for submission. + + 1) Write the python script to the folder. + 2) Write the inputs to a pickle file and save it to the folder. + + :param folder: A temporary folder on the local file system. + :returns: A :class:`aiida.common.datastructures.CalcInfo` instance. + """ + import cloudpickle as pickle + + dirpath = pathlib.Path(folder._abspath) + inputs: dict[str, t.Any] + + if self.inputs.function_inputs: + inputs = dict(self.inputs.function_inputs) + else: + inputs = {} + if "parent_folder_name" in self.inputs: + parent_folder_name = self.inputs.parent_folder_name.value + else: + parent_folder_name = self._DEFAULT_PARENT_FOLDER_NAME + function_data = self.get_function_data() + # create python script to run the function + script = f""" +import pickle + +# define the function +{function_data["source_code"]} + +# load the inputs from the pickle file +with open('inputs.pickle', 'rb') as handle: + inputs = pickle.load(handle) + +# run the function +result = {function_data["name"]}(**inputs) +# save the result as a pickle file +with open('results.pickle', 'wb') as handle: + pickle.dump(result, handle) +""" + # write the script to the folder + with folder.open(self.options.input_filename, "w", encoding="utf8") as handle: + handle.write(script) + # symlink = settings.pop('PARENT_FOLDER_SYMLINK', False) + symlink = True + + remote_copy_list = [] + local_copy_list = [] + remote_symlink_list = [] + remote_list = remote_symlink_list if symlink else remote_copy_list + + source = self.inputs.get("parent_folder", None) + + if source is not None: + if isinstance(source, RemoteData): + dirpath = pathlib.Path(source.get_remote_path()) + if self.inputs.parent_output_folder is not None: + dirpath = ( + pathlib.Path(source.get_remote_path()) + / self.inputs.parent_output_folder.value + ) + remote_list.append( + ( + source.computer.uuid, + str(dirpath), + parent_folder_name, + ) + ) + elif isinstance(source, FolderData): + dirname = ( + self.inputs.parent_output_folder.value + if self.inputs.parent_output_folder is not None + else "" + ) + local_copy_list.append((source.uuid, dirname, parent_folder_name)) + elif isinstance(source, SinglefileData): + local_copy_list.append((source.uuid, source.filename, source.filename)) + if "upload_files" in self.inputs: + upload_files = self.inputs.upload_files + for key, source in upload_files.items(): + # replace "_dot_" with "." in the key + key = key.replace("_dot_", ".") + if isinstance(source, FolderData): + local_copy_list.append((source.uuid, "", key)) + elif isinstance(source, SinglefileData): + local_copy_list.append( + (source.uuid, source.filename, source.filename) + ) + else: + raise ValueError( + f"""Input folder/file: {source} is not supported. +Only AiiDA SinglefileData and FolderData are allowed.""" + ) + if "copy_files" in self.inputs: + copy_files = self.inputs.copy_files + for key, source in copy_files.items(): + # replace "_dot_" with "." in the key + key = key.replace("_dot_", ".") + dirpath = pathlib.Path(source.get_remote_path()) + remote_list.append((source.computer.uuid, str(dirpath), key)) + # create pickle file for the inputs + input_values = {} + for key, value in inputs.items(): + if isinstance(value, Data) and hasattr(value, "value"): + # get the value of the pickled data + input_values[key] = value.value + # TODO: should check this recursively + elif isinstance(value, (AttributeDict, dict)): + # if the value is an AttributeDict, use recursively + input_values[key] = {k: v.value for k, v in value.items()} + else: + raise ValueError( + f"Input data {value} is not supported. Only AiiDA data Node with a value attribute is allowed. " + ) + # save the value as a pickle file, the path is absolute + filename = "inputs.pickle" + dirpath = pathlib.Path(folder._abspath) + with folder.open(filename, "wb") as handle: + pickle.dump(input_values, handle) + # create a singlefiledata object for the pickled data + file_data = SinglefileData(file=f"{dirpath}/{filename}") + file_data.store() + local_copy_list.append((file_data.uuid, file_data.filename, filename)) + + codeinfo = CodeInfo() + codeinfo.stdin_name = self.options.input_filename + codeinfo.stdout_name = self.options.output_filename + codeinfo.code_uuid = self.inputs.code.uuid + + calcinfo = CalcInfo() + calcinfo.codes_info = [codeinfo] + calcinfo.local_copy_list = local_copy_list + calcinfo.remote_copy_list = remote_copy_list + calcinfo.remote_symlink_list = remote_symlink_list + calcinfo.retrieve_list = ["results.pickle", self.options.output_filename] + if self.inputs.additional_retrieve_list is not None: + calcinfo.retrieve_list += self.inputs.additional_retrieve_list.get_list() + calcinfo.retrieve_list += self._internal_retrieve_list + + calcinfo.retrieve_temporary_list = self._retrieve_temporary_list + calcinfo.retrieve_singlefile_list = self._retrieve_singlefile_list + + return calcinfo diff --git a/src/aiida_pythonjob/config.py b/src/aiida_pythonjob/config.py new file mode 100644 index 0000000..418060a --- /dev/null +++ b/src/aiida_pythonjob/config.py @@ -0,0 +1,13 @@ +import json +from aiida.manage.configuration.settings import AIIDA_CONFIG_FOLDER + + +def load_config() -> dict: + """Load the configuration from the config file.""" + config_file_path = AIIDA_CONFIG_FOLDER / "pythonjob.json" + try: + with config_file_path.open("r") as f: + config = json.load(f) + except FileNotFoundError: + config = {} + return config diff --git a/src/aiida_pythonjob/data/__init__.py b/src/aiida_pythonjob/data/__init__.py new file mode 100644 index 0000000..2a00bfc --- /dev/null +++ b/src/aiida_pythonjob/data/__init__.py @@ -0,0 +1,4 @@ +from .pickled_data import PickledData +from .pickled_function import PickledFunction + +__all__ = ("PickledData", "PickledFunction") diff --git a/src/aiida_pythonjob/data/pickled_data.py b/src/aiida_pythonjob/data/pickled_data.py new file mode 100644 index 0000000..7aa7396 --- /dev/null +++ b/src/aiida_pythonjob/data/pickled_data.py @@ -0,0 +1,86 @@ +"""`Data` sub class to represent any data using pickle.""" + +from aiida import orm +import sys +import cloudpickle +from pickle import UnpicklingError + + +class Dict(orm.Dict): + @property + def value(self): + return self.get_dict() + + +class List(orm.List): + @property + def value(self): + return self.get_list() + + +class PickledData(orm.Data): + """Data to represent a pickled value using cloudpickle.""" + + FILENAME = "value.pkl" # Class attribute to store the filename + + def __init__(self, value=None, **kwargs): + """Initialize a `PickledData` node instance. + + :param value: raw Python value to initialize the `PickledData` node from. + """ + super().__init__(**kwargs) + self.set_value(value) + + def __str__(self): + return f"{super().__str__()} : {self.get_value()}" + + @property + def value(self): + """Return the contents of this node. + + :return: The unpickled value. + """ + return self.get_value() + + @value.setter + def value(self, value): + self.set_value(value) + + def get_value(self): + """Return the contents of this node, unpickling the stored value. + + :return: The unpickled value. + """ + return self._get_value_from_file() + + def _get_value_from_file(self): + """Read the pickled value from file and return it.""" + try: + with self.base.repository.open(self.FILENAME, mode="rb") as f: + return cloudpickle.loads(f.read()) # Deserialize the value + except (UnpicklingError, ValueError) as e: + raise ImportError( + "Failed to load the pickled value. This may be due to an incompatible pickle protocol. " + "Please ensure that the correct environment and cloudpickle version are being used." + ) from e + except ModuleNotFoundError as e: + raise ImportError( + "Failed to load the pickled value. This may be due to a missing module. " + "Please ensure that the correct environment and cloudpickle version are being used." + ) from e + + def set_value(self, value): + """Set the contents of this node by pickling the provided value. + + :param value: The Python value to pickle and store. + """ + # Serialize the value and store it + serialized_value = cloudpickle.dumps(value) + self.base.repository.put_object_from_bytes(serialized_value, self.FILENAME) + + # Store relevant metadata + python_version = f"{sys.version_info.major}.{sys.version_info.minor}" + self.base.attributes.set("python_version", python_version) + self.base.attributes.set("serializer_module", cloudpickle.__name__) + self.base.attributes.set("serializer_version", cloudpickle.__version__) + self.base.attributes.set("pickle_protocol", cloudpickle.DEFAULT_PROTOCOL) diff --git a/src/aiida_pythonjob/data/pickled_function.py b/src/aiida_pythonjob/data/pickled_function.py new file mode 100644 index 0000000..573f666 --- /dev/null +++ b/src/aiida_pythonjob/data/pickled_function.py @@ -0,0 +1,161 @@ +import inspect +import textwrap +from typing import Callable, Dict, Any, get_type_hints, _SpecialForm +from .pickled_data import PickledData + + +class PickledFunction(PickledData): + """Data class to represent a pickled Python function.""" + + def __init__(self, value=None, **kwargs): + """Initialize a PickledFunction node instance. + + :param value: a Python function + """ + super().__init__(**kwargs) + if not callable(value): + raise ValueError("value must be a callable Python function") + self.set_value(value) + self.set_attribute(value) + + def __str__(self): + return ( + f"PickledFunction<{self.base.attributes.get('function_name')}> pk={self.pk}" + ) + + @property + def metadata(self): + """Return a dictionary of metadata.""" + return { + "name": self.base.attributes.get("name"), + "import_statements": self.base.attributes.get("import_statements"), + "source_code": self.base.attributes.get("source_code"), + "source_code_without_decorator": self.base.attributes.get( + "source_code_without_decorator" + ), + "type": "function", + "is_pickle": True, + } + + @classmethod + def build_callable(cls, func): + """Return the executor for this node.""" + import cloudpickle as pickle + + executor = { + "executor": pickle.dumps(func), + "type": "function", + "is_pickle": True, + } + executor.update(cls.inspect_function(func)) + return executor + + def set_attribute(self, value): + """Set the contents of this node by pickling the provided function. + + :param value: The Python function to pickle and store. + """ + # Serialize the function and extract metadata + serialized_data = self.inspect_function(value) + + # Store relevant metadata + self.base.attributes.set("name", serialized_data["name"]) + self.base.attributes.set( + "import_statements", serialized_data["import_statements"] + ) + self.base.attributes.set("source_code", serialized_data["source_code"]) + self.base.attributes.set( + "source_code_without_decorator", + serialized_data["source_code_without_decorator"], + ) + + @classmethod + def inspect_function(cls, func: Callable) -> Dict[str, Any]: + """Serialize a function for storage or transmission.""" + try: + # we need save the source code explicitly, because in the case of jupyter notebook, + # the source code is not saved in the pickle file + source_code = inspect.getsource(func) + # Split the source into lines for processing + source_code_lines = source_code.split("\n") + function_source_code = "\n".join(source_code_lines) + # Find the first line of the actual function definition + for i, line in enumerate(source_code_lines): + if line.strip().startswith("def "): + break + function_source_code_without_decorator = "\n".join(source_code_lines[i:]) + function_source_code_without_decorator = textwrap.dedent( + function_source_code_without_decorator + ) + # we also need to include the necessary imports for the types used in the type hints. + try: + required_imports = cls.get_required_imports(func) + except Exception as e: + required_imports = {} + print( + f"Failed to get required imports for function {func.__name__}: {e}" + ) + # Generate import statements + import_statements = "\n".join( + f"from {module} import {', '.join(types)}" + for module, types in required_imports.items() + ) + except Exception as e: + print(f"Failed to inspect function {func.__name__}: {e}") + function_source_code = "" + function_source_code_without_decorator = "" + import_statements = "" + return { + "name": func.__name__, + "source_code": function_source_code, + "source_code_without_decorator": function_source_code_without_decorator, + "import_statements": import_statements, + } + + @classmethod + def get_required_imports(cls, func: Callable) -> Dict[str, set]: + """Retrieve type hints and the corresponding modules.""" + type_hints = get_type_hints(func) + imports = {} + + def add_imports(type_hint): + if isinstance( + type_hint, _SpecialForm + ): # Handle special forms like Any, Union, Optional + module_name = "typing" + type_name = type_hint._name or str(type_hint) + elif hasattr( + type_hint, "__origin__" + ): # This checks for higher-order types like List, Dict + module_name = type_hint.__module__ + type_name = getattr(type_hint, "_name", None) or getattr( + type_hint.__origin__, "__name__", None + ) + for arg in getattr(type_hint, "__args__", []): + if arg is type(None): # noqa: E721 + continue + add_imports(arg) # Recursively add imports for each argument + elif hasattr(type_hint, "__module__"): + module_name = type_hint.__module__ + type_name = type_hint.__name__ + else: + return # If no module or origin, we can't import it, e.g., for literals + + if type_name is not None: + if module_name not in imports: + imports[module_name] = set() + imports[module_name].add(type_name) + + for _, type_hint in type_hints.items(): + add_imports(type_hint) + + return imports + + +def to_pickled_function(value): + """Convert a Python function to a `PickledFunction` instance.""" + return PickledFunction(value) + + +class PickledLocalFunction(PickledFunction): + """PickledFunction subclass for local functions.""" diff --git a/src/aiida_pythonjob/data/serializer.py b/src/aiida_pythonjob/data/serializer.py new file mode 100644 index 0000000..5157d45 --- /dev/null +++ b/src/aiida_pythonjob/data/serializer.py @@ -0,0 +1,121 @@ +from .pickled_data import PickledData +from aiida import orm, common +from importlib.metadata import entry_points +from typing import Any +from aiida_pythonjob.config import load_config +import sys + + +def get_serializer_from_entry_points() -> dict: + """Retrieve the serializer from the entry points.""" + # import time + + # ts = time.time() + configs = load_config() + serializers = configs.get("serializers", {}) + excludes = serializers.get("excludes", []) + # Retrieve the entry points for 'aiida.data' and store them in a dictionary + eps = entry_points() + if sys.version_info >= (3, 10): + group = eps.select(group="aiida.data") + else: + group = eps.get("aiida.data", []) + eps = {} + for ep in group: + # split the entry point name by first ".", and check the last part + key = ep.name.split(".", 1)[-1] + # skip key without "." because it is not a module name for a data type + if "." not in key or key in excludes: + continue + eps.setdefault(key, []) + eps[key].append(ep) + + # print("Time to load entry points: ", time.time() - ts) + # check if there are duplicates + for key, value in eps.items(): + if len(value) > 1: + if key in serializers: + [ep for ep in value if ep.name == serializers[key]] + eps[key] = [ep for ep in value if ep.name == serializers[key]] + if not eps[key]: + raise ValueError( + f"Entry point {serializers[key]} not found for {key}" + ) + else: + msg = f"Duplicate entry points for {key}: {[ep.name for ep in value]}" + raise ValueError(msg) + return eps + + +eps = get_serializer_from_entry_points() + + +def serialize_to_aiida_nodes(inputs: dict = None) -> dict: + """Serialize the inputs to a dictionary of AiiDA data nodes. + + Args: + inputs (dict): The inputs to be serialized. + + Returns: + dict: The serialized inputs. + """ + new_inputs = {} + # save all kwargs to inputs port + for key, data in inputs.items(): + new_inputs[key] = general_serializer(data) + return new_inputs + + +def clean_dict_key(data): + """Replace "." with "__dot__" in the keys of a dictionary.""" + if isinstance(data, dict): + return {k.replace(".", "__dot__"): clean_dict_key(v) for k, v in data.items()} + return data + + +def general_serializer(data: Any, check_value=True) -> orm.Node: + """Serialize the data to an AiiDA data node.""" + if isinstance(data, orm.Data): + if check_value and not hasattr(data, "value"): + raise ValueError("Only AiiDA data Node with a value attribute is allowed.") + return data + elif isinstance(data, common.extendeddicts.AttributeDict): + # if the data is an AttributeDict, use it directly + return data + # if is string with syntax {{}}, this is a port will read data from ctx + elif isinstance(data, str) and data.startswith("{{") and data.endswith("}}"): + return data + # if data is a class instance, get its __module__ and class name as a string + # for example, an Atoms will have ase.atoms.Atoms + else: + data = clean_dict_key(data) + # try to get the serializer from the entry points + data_type = type(data) + ep_key = f"{data_type.__module__}.{data_type.__name__}" + # search for the key in the entry points + if ep_key in eps: + try: + new_node = eps[ep_key][0].load()(data) + except Exception as e: + raise ValueError(f"Error in serializing {ep_key}: {e}") + finally: + # try to save the node to da + try: + new_node.store() + return new_node + except Exception: + # try to serialize the value as a PickledData + try: + new_node = PickledData(data) + new_node.store() + return new_node + except Exception as e: + raise ValueError(f"Error in serializing {ep_key}: {e}") + else: + # try to serialize the data as a PickledData + try: + new_node = PickledData(data) + new_node.store() + return new_node + except Exception as e: + raise ValueError(f"Error in serializing {ep_key}: {e}") diff --git a/src/aiida_pythonjob/launch.py b/src/aiida_pythonjob/launch.py new file mode 100644 index 0000000..6c1f898 --- /dev/null +++ b/src/aiida_pythonjob/launch.py @@ -0,0 +1,76 @@ +from aiida.orm import AbstractCode, Computer, List, ProcessNode, SinglefileData, FolderData, Str +from typing import Any, Callable +from .data.pickled_function import PickledFunction +from aiida.engine import launch +from .calculations import PythonJob +from .utils import get_or_create_code +from .data.serializer import serialize_to_aiida_nodes + + +def prepare_pythonjob_inputs( + function: Callable[..., Any], + function_inputs: dict[str, Any] = None, + function_outputs: dict[str, Any] = None, + code: AbstractCode = None, + command_info: dict[str, str] = None, + computer: str | Computer = "localhost", + metadata: dict[str, Any] | None = None, + upload_files: dict[str, str] = {}, + **kwargs: Any, + ) -> dict[str, Any]: + + """Prepare the inputs for PythonJob""" + import os + + # get the names kwargs for the PythonJob, which are the inputs before _wait + executor = PickledFunction.build_callable(function) + new_upload_files = {} + # change the string in the upload files to SingleFileData, or FolderData + for key, source in upload_files.items(): + # only alphanumeric and underscores are allowed in the key + # replace all "." with "_dot_" + new_key = key.replace(".", "_dot_") + if isinstance(source, str): + if os.path.isfile(source): + new_upload_files[new_key] = SinglefileData(file=source) + elif os.path.isdir(source): + new_upload_files[new_key] = FolderData(tree=source) + elif isinstance(source, (SinglefileData, FolderData)): + new_upload_files[new_key] = source + else: + raise ValueError(f"Invalid upload file type: {type(source)}, {source}") + # + if code is None: + command_info = command_info or {} + code = get_or_create_code(computer=computer, + **command_info) + # get the source code of the function + function_name = executor["name"] + if executor.get("is_pickle", False): + function_source_code = ( + executor["import_statements"] + + "\n" + + executor["source_code_without_decorator"] + ) + else: + function_source_code = ( + f"from {executor['module']} import {function_name}" + ) + + # serialize the kwargs into AiiDA Data + function_inputs = function_inputs or {} + function_inputs = serialize_to_aiida_nodes(function_inputs) + # transfer the args to kwargs + inputs = { + "process_label": "PythonJob<{}>".format(function_name), + "function_source_code": Str(function_source_code), + "function_name": Str(function_name), + "code": code, + "function_inputs": function_inputs, + "upload_files": new_upload_files, + "function_outputs": List(function_outputs), + "metadata": metadata or {}, + **kwargs, + } + return inputs + diff --git a/src/aiida_pythonjob/parsers/__init__.py b/src/aiida_pythonjob/parsers/__init__.py new file mode 100644 index 0000000..b518d8a --- /dev/null +++ b/src/aiida_pythonjob/parsers/__init__.py @@ -0,0 +1,3 @@ +from .pythonjob import PythonJobParser + +__all__ = ('PythonJobParser',) diff --git a/src/aiida_pythonjob/parsers/pythonjob.py b/src/aiida_pythonjob/parsers/pythonjob.py new file mode 100644 index 0000000..992167d --- /dev/null +++ b/src/aiida_pythonjob/parsers/pythonjob.py @@ -0,0 +1,124 @@ +"""Parser for an `PythonJob` job.""" +from aiida.parsers.parser import Parser +from aiida_pythonjob.data.serializer import general_serializer +from aiida.engine import ExitCode + + +class PythonJobParser(Parser): + """Parser for an `PythonJob` job.""" + + def parse(self, **kwargs): + """Parse the contents of the output files stored in the `retrieved` output node. + + The function_outputs could be a namespce, e.g., + function_outputs=[ + {"identifier": "namespace", "name": "add_multiply"}, + {"name": "add_multiply.add"}, + {"name": "add_multiply.multiply"}, + {"name": "minus"}, + ] + """ + import pickle + + function_outputs = self.node.inputs.function_outputs.get_list() + if len(function_outputs) == 0: + function_outputs = [{"name": "result"}] + self.output_list = function_outputs + # first we remove nested outputs, e.g., "add_multiply.add" + top_level_output_list = [ + output for output in self.output_list if "." not in output["name"] + ] + exit_code = 0 + try: + with self.retrieved.base.repository.open("results.pickle", "rb") as handle: + results = pickle.load(handle) + if isinstance(results, tuple): + if len(top_level_output_list) != len(results): + self.exit_codes.ERROR_RESULT_OUTPUT_MISMATCH + for i in range(len(top_level_output_list)): + top_level_output_list[i]["value"] = self.serialize_output( + results[i], top_level_output_list[i] + ) + elif isinstance(results, dict) and len(top_level_output_list) > 1: + # pop the exit code if it exists + exit_code = results.pop("exit_code", 0) + for output in top_level_output_list: + if output.get("required", False): + if output["name"] not in results: + self.exit_codes.ERROR_MISSING_OUTPUT + output["value"] = self.serialize_output( + results.pop(output["name"]), output + ) + # if there are any remaining results, raise an warning + if results: + self.logger.warning( + f"Found extra results that are not included in the output: {results.keys()}" + ) + elif isinstance(results, dict) and len(top_level_output_list) == 1: + exit_code = results.pop("exit_code", 0) + # if output name in results, use it + if top_level_output_list[0]["name"] in results: + top_level_output_list[0]["value"] = self.serialize_output( + results[top_level_output_list[0]["name"]], + top_level_output_list[0], + ) + # otherwise, we assume the results is the output + else: + top_level_output_list[0]["value"] = self.serialize_output( + results, top_level_output_list[0] + ) + elif len(top_level_output_list) == 1: + # otherwise, we assume the results is the output + top_level_output_list[0]["value"] = self.serialize_output( + results, top_level_output_list[0] + ) + else: + raise ValueError( + "The number of results does not match the number of outputs." + ) + for output in top_level_output_list: + self.out(output["name"], output["value"]) + if exit_code: + if isinstance(exit_code, dict): + exit_code = ExitCode(exit_code["status"], exit_code["message"]) + elif isinstance(exit_code, int): + exit_code = ExitCode(exit_code) + return exit_code + except OSError: + return self.exit_codes.ERROR_READING_OUTPUT_FILE + except ValueError as exception: + self.logger.error(exception) + return self.exit_codes.ERROR_INVALID_OUTPUT + + def find_output(self, name): + """Find the output with the given name.""" + for output in self.output_list: + if output["name"] == name: + return output + return None + + def serialize_output(self, result, output): + """Serialize outputs.""" + + name = output["name"] + if output.get("identifier", "Any").upper() in ["NAMESPACE", "WORKGRAPH.NAMESPACE"]: + if isinstance(result, dict): + serialized_result = {} + for key, value in result.items(): + full_name = f"{name}.{key}" + full_name_output = self.find_output(full_name) + if ( + full_name_output + and full_name_output.get("identifier", "Any").upper() + in ["NAMESPACE", "WORKGRAPH.NAMESPACE"] + ): + serialized_result[key] = self.serialize_output( + value, full_name_output + ) + else: + serialized_result[key] = general_serializer(value) + return serialized_result + else: + self.exit_codes.ERROR_INVALID_OUTPUT + else: + return general_serializer(result) diff --git a/src/aiida_pythonjob/utils.py b/src/aiida_pythonjob/utils.py new file mode 100644 index 0000000..52badc2 --- /dev/null +++ b/src/aiida_pythonjob/utils.py @@ -0,0 +1,29 @@ + +from aiida.orm import load_code, load_computer, Computer, InstalledCode +from aiida.common.exceptions import NotExistent + +def get_or_create_code( + label: str = "python3", + computer: str | Computer = "localhost", + filepath_executable: str = None, + prepend_text: str = "", +): + """Try to load code, create if not exit.""" + + try: + return load_code(f"{label}@{computer}") + except NotExistent: + description = f"Code on computer: {computer}" + computer = load_computer(computer) + filepath_executable = filepath_executable or label + code = InstalledCode( + computer=computer, + label=label, + description=description, + filepath_executable=filepath_executable, + default_calc_job_plugin="pythonjob.pythonjob", + prepend_text=prepend_text, + ) + + code.store() + return code \ No newline at end of file diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..327591a --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,18 @@ +import pytest + +pytest_plugins = "aiida.tools.pytest_fixtures" + + +@pytest.fixture(scope="session", autouse=True) +def aiida_profile(aiida_config, aiida_profile_factory): + """Create and load a profile with RabbitMQ as broker.""" + with aiida_profile_factory(aiida_config, broker_backend="core.rabbitmq") as profile: + yield profile + + +@pytest.fixture +def fixture_localhost(aiida_localhost): + """Return a localhost `Computer`.""" + localhost = aiida_localhost + localhost.set_default_mpiprocs_per_machine(1) + return localhost diff --git a/tests/input.txt b/tests/input.txt new file mode 100644 index 0000000..d8263ee --- /dev/null +++ b/tests/input.txt @@ -0,0 +1 @@ +2 \ No newline at end of file diff --git a/tests/inputs_folder/another_input.txt b/tests/inputs_folder/another_input.txt new file mode 100644 index 0000000..e440e5c --- /dev/null +++ b/tests/inputs_folder/another_input.txt @@ -0,0 +1 @@ +3 \ No newline at end of file diff --git a/tests/test_data.py b/tests/test_data.py new file mode 100644 index 0000000..12f5a6e --- /dev/null +++ b/tests/test_data.py @@ -0,0 +1,24 @@ + +from aiida_pythonjob import PickledFunction + + +def test_PythonJob_typing(): + """Test function with typing.""" + from numpy import array + from typing import List + + def generate_structures( + strain_lst: List[float], + data: array, + strain_lst1: list = None, + data1: array = None, + ) -> list[array]: + pass + + + modules = PickledFunction.get_required_imports(generate_structures) + assert modules == { + "typing": {"List"}, + "builtins": {"list", "float"}, + "numpy": {"array"}, + } diff --git a/tests/test_parsers.py b/tests/test_parsers.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/test_pythonjob.py b/tests/test_pythonjob.py new file mode 100644 index 0000000..5cd31da --- /dev/null +++ b/tests/test_pythonjob.py @@ -0,0 +1,232 @@ +import pytest +from aiida_pythonjob import prepare_pythonjob_inputs, PythonJob +from aiida.engine import run_get_node + + +def test_function_default_outputs(fixture_localhost): + """Test decorator.""" + + def add(x, y): + return x + y + + inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2}, + ) + result, node = run_get_node(PythonJob, **inputs) + print("result: ", result) + + assert result["result"].value == 3 + assert node.process_label == "PythonJob" + +def test_function_custom_outputs(fixture_localhost): + """Test decorator.""" + + def add(x, y): + return {"sum": x + y, "diff": x - y} + + inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2}, + function_outputs=[ + {"name": "sum"}, + {"name": "diff"}, + ] + ) + result, node = run_get_node(PythonJob, **inputs) + + assert result["sum"].value == 3 + assert result["diff"].value == -1 + + +@pytest.mark.skip("Can not inspect the built-in function.") +def test_importable_function(fixture_localhost): + """Test importable function.""" + from operator import add + + inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2}, + function_outputs=[ + {"name": "sum"}, + ] + ) + result, node = run_get_node(PythonJob, **inputs) + print("result: ", result) + assert result["sum"].value == 3 + + +def test_kwargs_inputs(fixture_localhost): + """Test function with kwargs.""" + + def add(x, y=1, **kwargs): + x += y + for value in kwargs.values(): + x += value + return x + + inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2, "a": 3, "b": 4}, + function_outputs=[ + {"name": "sum"}, + ] + ) + result, node = run_get_node(PythonJob, **inputs) + assert result["sum"].value == 10 + + + +def test_namespace_output(fixture_localhost): + """Test function with namespace output and input.""" + + def myfunc(x, y): + add = {"order1": x + y, "order2": x * x + y * y} + return { + "add_multiply": {"add": add, "multiply": x * y}, + "minus": x - y, + } + + inputs = prepare_pythonjob_inputs( + myfunc, + function_inputs={"x": 1, "y": 2}, + function_outputs=[ + { + "name": "add_multiply", + "identifier": "namespace", + }, + { + "name": "add_multiply.add", + "identifier": "namespace", + }, + {"name": "minus"}, + ] + ) + result, node = run_get_node(PythonJob, **inputs) + print("result: ", result) + + assert result["add_multiply"]["add"]["order1"].value == 3 + assert result["add_multiply"]["add"]["order2"].value == 5 + assert result["add_multiply"]["multiply"].value == 2 + + +def test_parent_folder(fixture_localhost): + """Test function with parent folder.""" + + def add(x, y): + z = x + y + with open("result.txt", "w") as f: + f.write(str(z)) + return x + y + + def multiply(x, y): + with open("parent_folder/result.txt", "r") as f: + z = int(f.read()) + return x * y + z + + inputs1 = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2}, + function_outputs=[{"name": "sum"}], + ) + result1, node1 = run_get_node(PythonJob, inputs=inputs1) + + + inputs2 = prepare_pythonjob_inputs( + multiply, + function_inputs={"x": 1, "y": 2}, + function_outputs=[{"name": "product"}], + parent_folder=result1["remote_folder"], + ) + result2, node2 = run_get_node(PythonJob, inputs=inputs2) + + assert result2["product"].value == 5 + + +def test_upload_files(fixture_localhost): + """Test function with upload files.""" + + # create a temporary file "input.txt" in the current directory + with open("input.txt", "w") as f: + f.write("2") + + # create a temporary folder "inputs_folder" in the current directory + # and add a file "another_input.txt" in the folder + import os + + os.makedirs("inputs_folder", exist_ok=True) + with open("inputs_folder/another_input.txt", "w") as f: + f.write("3") + + def add(): + with open("input.txt", "r") as f: + a = int(f.read()) + with open("inputs_folder/another_input.txt", "r") as f: + b = int(f.read()) + return a + b + + + # ------------------------- Submit the calculation ------------------- + # we need use full path to the file + input_file = os.path.abspath("input.txt") + input_folder = os.path.abspath("inputs_folder") + inputs = prepare_pythonjob_inputs( + add, + upload_files = { + "input.txt": input_file, + "inputs_folder": input_folder, + }, + ) + result, node = run_get_node(PythonJob, inputs=inputs) + + # wait=True) + assert result["result"].value == 5 + + +def test_retrieve_files(fixture_localhost): + """Test retrieve files.""" + + def add(x, y): + z = x + y + with open("result.txt", "w") as f: + f.write(str(z)) + return x + y + + inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": 1, "y": 2}, + metadata = { + "options": { + "additional_retrieve_list": ["result.txt"], + } + }, + ) + result, node = run_get_node(PythonJob, inputs=inputs) + # ------------------------- Submit the calculation ------------------- + + assert ( + "result.txt" in result["retrieved"].list_object_names() + ) + + +def test_exit_code(fixture_localhost): + """Test function with exit code.""" + from numpy import array + + def add(x: array, y: array) -> array: + sum = x + y + if (sum < 0).any(): + exit_code = {"status": 410, "message": "Some elements are negative"} + return {"sum": sum, "exit_code": exit_code} + return {"sum": sum} + + inputs = prepare_pythonjob_inputs( + add, + function_inputs={"x": array([1, 1]), "y": array([1, -2])}, + ) + result, node = run_get_node(PythonJob, inputs=inputs) + assert node.exit_status == 410 + assert ( + node.exit_message + == "Some elements are negative" + )