diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md
new file mode 100644
index 00000000..94016b16
--- /dev/null
+++ b/.github/pull_request_template.md
@@ -0,0 +1,24 @@
+# Description of the changes
+
+
+Check all that apply:
+- [ ] updated documentation
+- [ ] Source added/refactored
+- [ ] Added unit tests
+- [ ] Added integration tests
+- [ ] (If applicable) Verified that manual tests requiring the /SNS and /HFIR filesystems pass without fail
+
+**References:**
+- Links to IBM EWM items:
+- Links to related issues or pull requests:
+
+# Manual test for the reviewer
+(Instructions for testing here)
+
+# Check list for the reviewer
+- [ ] best software practices
+ + [ ] clearly named variables (better to be verbose in variable names)
+ + [ ] code comments explaining the intent of code blocks
+- [ ] All the tests are passing
+- [ ] The documentation is up to date
+- [ ] code comments added when explaining intent
diff --git a/.github/workflows/systemtests.yml b/.github/workflows/systemtests.yml
index 48ca56e0..2ca5fabb 100644
--- a/.github/workflows/systemtests.yml
+++ b/.github/workflows/systemtests.yml
@@ -32,7 +32,7 @@ jobs:
conda activate webmon
make all
- name: Stand up docker containers
- run: docker-compose up --build -d
+ run: docker compose up --build -d
env:
DJANGO_SETTINGS_MODULE: reporting.reporting_app.settings.envtest
LDAP_SERVER_URI: .
@@ -49,4 +49,4 @@ jobs:
LDAP_DOMAIN_COMPONENT: .
LIVE_PLOT_SECRET_KEY: secretKey
- name: Stand down docker containers
- run: docker-compose down
+ run: docker compose down
diff --git a/Makefile b/Makefile
index c57f0396..97db47ac 100644
--- a/Makefile
+++ b/Makefile
@@ -10,7 +10,7 @@ REPORT_DB_INIT=/opt/conda/lib/python$(PYTHON_VERSION)/site-packages/reporting/fi
# command to run docker compose. change this to be what you have installed
# this can be overriden on the command line
# DOCKER_COMPOSE="docker compose" make startdev
-DOCKER_COMPOSE ?= docker-compose
+DOCKER_COMPOSE ?= "docker compose"
help:
@@ -127,19 +127,19 @@ nginx/nginx.crt nginx/nginx.key:
openssl req -x509 -out nginx/nginx.crt -keyout nginx/nginx.key -newkey rsa:2048 -nodes -sha256 --config nginx/san.cnf
localdev/up: ## create images and start containers for local development. Doesn't update python wheels, though.
- docker-compose --file docker-compose.yml up --build
+ docker compose --file docker-compose.yml up --build
localdev/dbup: ## dbdumpfile=database_dump_file.sql DATABASE_PASS=$(dotenv get DATABASE_PASS) make localdev/dbup
if ! test -f "${dbdumpfile}"; then echo "dbdumpfile does not exists" && false; fi
if test -z "${DATABASE_PASS}"; then echo "DATABASE_PASS undefined" && false; fi
- docker-compose --file docker-compose.yml stop
- docker-compose --file docker-compose.yml down --volumes
+ docker compose --file docker-compose.yml stop
+ docker compose --file docker-compose.yml down --volumes
sleep 2s
- docker-compose --file docker-compose.yml up --detach db
+ docker compose --file docker-compose.yml up --detach db
sleep 10s # give time for the database service to be ready
- docker exec -i data_workflow_db_1 /bin/bash -c "pg_restore -d workflow -U workflow" < ${dbdumpfile} | true # continue even if returned errors
- docker exec -i data_workflow_db_1 /bin/bash -c "psql -d workflow -U workflow -c \"ALTER ROLE workflow WITH PASSWORD '${DATABASE_PASS}';\""
- LOAD_INITIAL_DATA="false" docker-compose --file docker-compose.yml up --build
+ docker exec -i data_workflow-db-1 /bin/bash -c "pg_restore -d workflow -U workflow" < ${dbdumpfile} | true # continue even if returned errors
+ docker exec -i data_workflow-db-1 /bin/bash -c "psql -d workflow -U workflow -c \"ALTER ROLE workflow WITH PASSWORD '${DATABASE_PASS}';\""
+ LOAD_INITIAL_DATA="false" docker compose --file docker-compose.yml up --build
clean: wheel/clean ## delete the SNS data and all the python wheels
rm -f SNSdata.tar.gz
diff --git a/README.rst b/README.rst
index 6c0d2c95..64ce80ef 100644
--- a/README.rst
+++ b/README.rst
@@ -1,3 +1,6 @@
+.. image:: https://readthedocs.org/projects/data-workflow/badge/?version=latest
+ :target: https://data-workflow.readthedocs.io/en/latest/?badge=latest
+ :alt: Documentation Status
.. image:: https://github.com/neutrons/data_workflow/actions/workflows/ci.yml/badge.svg?branch=next
:alt: CI
:target: https://github.com/neutrons/data_workflow/actions/workflows/ci.yml?query=branch:next
@@ -19,7 +22,7 @@ Dependencies:
* `MySQLdb `_ if using MySQL
* `psycopg2 `_ if using PostgreSQL
-It consists of 3 applications (Workflow Manager, Web Monitor, and DASMON Listener) which are deployed via docker-compose.
+It consists of 3 applications (Workflow Manager, Web Monitor, and DASMON Listener) which are deployed via docker compose.
Workflow Manager
----------------
diff --git a/docs/developer/instruction/build.rst b/docs/developer/instruction/build.rst
index ca261dd8..47f5fe4f 100644
--- a/docs/developer/instruction/build.rst
+++ b/docs/developer/instruction/build.rst
@@ -47,7 +47,7 @@ The system test are run via `.github/workflow/systemtests.yml `.
diff --git a/docs/developer/instruction/docker.rst b/docs/developer/instruction/docker.rst
index 5d1f293a..579b6d19 100644
--- a/docs/developer/instruction/docker.rst
+++ b/docs/developer/instruction/docker.rst
@@ -5,23 +5,23 @@ Docker information
:maxdepth: 2
.. note::
- This document is updated, however, it may be good to read the ``docker-compose`` and ``Dockerfile.*`` in the repository themselves for the most up-to-date information.
+ This document is updated, however, it may be good to read the ``docker compose`` and ``Dockerfile.*`` in the repository themselves for the most up-to-date information.
- This guide assumes that ``docker`` and `docker-compose`_ are present on your system.
+ This guide assumes that ``docker`` and `docker compose`_ are present on your system.
Starting and Stopping
---------------------
-While docker can be used to start each individual container separately, using ``docker-compose up --build`` is the preferred method because it starts all services in the correct order.
+While docker can be used to start each individual container separately, using ``docker compose up --build`` is the preferred method because it starts all services in the correct order.
Pressing ``ctrl-c`` will cleanly shutdown interactive docker.
-Pressing ``ctrl-c`` multiple times will kill the running images and leave docker in a somewhat funny state that likely requires running ``docker-compose down`` before starting again
+Pressing ``ctrl-c`` multiple times will kill the running images and leave docker in a somewhat funny state that likely requires running ``docker compose down`` before starting again
An additional flag ``-d`` can be supplied to run docker in detached mode.
.. note::
- Use ``docker-compose --file `` to select a different configuration
+ Use ``docker compose --file `` to select a different configuration
-To start a single image, supply its name as an additional argument to ``docker-compose up``.
-To stop all images, including in detached mode, run ``docker-compose down``.
+To start a single image, supply its name as an additional argument to ``docker compose up``.
+To stop all images, including in detached mode, run ``docker compose down``.
Cleaning docker
---------------
@@ -46,9 +46,9 @@ Misc
* Add option ``--build`` to force rebuild the container if the local changes are not reflected in the container.
* Add option ``--force-recreate`` to recreate all images if ``--build`` does not work.
* If all fails (e.g. the local changes are not showing up in the runtime instances):
- * stop the instance with ``docker-compose down``.
+ * stop the instance with ``docker compose down``.
* prune caches of images, container and volumes.
- * restart the instance with ``docker-compose up -d --build --force-recreate``.
+ * restart the instance with ``docker compose up -d --build --force-recreate``.
2. If you cannot find web-monitor at ``localhost``, it is possible that the standard http port 80 is used by another application. Here are two possible solutions:
@@ -56,5 +56,5 @@ Misc
* Modify `the port of nginx`_ in the docker compose file to use a different port (e.g. change to ``81:80``).
.. _docker: https://www.docker.com/
-.. _docker-compose: https://docs.docker.com/compose/
+.. _docker compose: https://docs.docker.com/compose/
.. _the port of nginx: https://github.com/neutrons/data_workflow/blob/next/docker-compose.yml
diff --git a/tests/test_ReductionSetupPageView.py b/tests/test_ReductionSetupPageView.py
index 09249229..cfd25bf1 100644
--- a/tests/test_ReductionSetupPageView.py
+++ b/tests/test_ReductionSetupPageView.py
@@ -31,21 +31,21 @@ def logged_in_client(self, next, username, password):
def prepareEnvironmentForReductionScriptGeneration(self):
os.system(
"""
- docker exec data_workflow_autoreducer_1 mkdir -p /SNS/ARCS/IPTS-123/nexus
- docker exec data_workflow_autoreducer_1 touch /SNS/ARCS/IPTS-123/nexus/ARCS_100.nxs.h5
- docker exec data_workflow_autoreducer_1 mkdir -p /SNS/ARCS/shared/autoreduce/vanadium_files
- docker exec data_workflow_autoreducer_1 touch /SNS/ARCS/shared/autoreduce/reduce_ARCS_default.py
- docker exec data_workflow_autoreducer_1 touch /SNS/ARCS/shared/autoreduce/reduce_ARCS.py
- docker exec data_workflow_autoreducer_1 touch /SNS/ARCS/shared/autoreduce/reduce_ARCS.py.template
- docker exec -i data_workflow_autoreducer_1 bash -c 'echo "#!/usr/bin/env python3\n# this is a template\ndef init():\nprint(5)\n" > /SNS/ARCS/shared/autoreduce/reduce_ARCS.py.template'
- docker exec data_workflow_autoreducer_1 touch /SNS/ARCS/shared/autoreduce/ARCS_2X1_grouping.xml
- docker exec data_workflow_autoreducer_1 touch /SNS/ARCS/shared/autoreduce/vanadium_files/test_van201562.nxs
+ docker exec data_workflow-autoreducer-1 mkdir -p /SNS/ARCS/IPTS-123/nexus
+ docker exec data_workflow-autoreducer-1 touch /SNS/ARCS/IPTS-123/nexus/ARCS_100.nxs.h5
+ docker exec data_workflow-autoreducer-1 mkdir -p /SNS/ARCS/shared/autoreduce/vanadium_files
+ docker exec data_workflow-autoreducer-1 touch /SNS/ARCS/shared/autoreduce/reduce_ARCS_default.py
+ docker exec data_workflow-autoreducer-1 touch /SNS/ARCS/shared/autoreduce/reduce_ARCS.py
+ docker exec data_workflow-autoreducer-1 touch /SNS/ARCS/shared/autoreduce/reduce_ARCS.py.template
+ docker exec -i data_workflow-autoreducer-1 bash -c 'echo "#!/usr/bin/env python3\n# this is a template\ndef init():\nprint(5)\n" > /SNS/ARCS/shared/autoreduce/reduce_ARCS.py.template'
+ docker exec data_workflow-autoreducer-1 touch /SNS/ARCS/shared/autoreduce/ARCS_2X1_grouping.xml
+ docker exec data_workflow-autoreducer-1 touch /SNS/ARCS/shared/autoreduce/vanadium_files/test_van201562.nxs
""" # noqa: E501
)
def getReductionScriptContents(self):
return subprocess.check_output(
- "docker exec data_workflow_autoreducer_1 cat /SNS/ARCS/shared/autoreduce/reduce_ARCS.py", shell=True
+ "docker exec data_workflow-autoreducer-1 cat /SNS/ARCS/shared/autoreduce/reduce_ARCS.py", shell=True
).decode()
def initReductionGroup(self, conn, cursor):
@@ -101,7 +101,7 @@ def getReductionData(self, instrument_scientist_client):
def testReduction(self, instrument_scientist_client):
# backup reduce_ARCS.py
os.system(
- """docker exec -i data_workflow_autoreducer_1 bash -c \
+ """docker exec -i data_workflow-autoreducer-1 bash -c \
'cp /SNS/ARCS/shared/autoreduce/reduce_ARCS.py /tmp/reduce_ARCS.py'"""
)
self.prepareEnvironmentForReductionScriptGeneration()
@@ -127,6 +127,6 @@ def testReduction(self, instrument_scientist_client):
# return reduce_ARCS.py back to starting state
os.system(
- """docker exec -i data_workflow_autoreducer_1 bash -c \
+ """docker exec -i data_workflow-autoreducer-1 bash -c \
'cp /tmp/reduce_ARCS.py /SNS/ARCS/shared/autoreduce/reduce_ARCS.py'"""
)