Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 46 additions & 0 deletions .github/workflows/quality_checks.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
# This is a basic workflow to help you get started with Actions

name: quality checks

# Controls when the workflow will run
on:
# Triggers the workflow on push or pull request events but only for the master branch
push:
branches: [ main]
pull_request:
branches: [ main]

# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:

# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "build"
TestBuild:
# The type of runner that the job will run on
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ["3.12"]

# Steps represent a sequence of tasks that will be executed as part of the job
steps:
# Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it
- uses: actions/checkout@v4

# Install python
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

# Install the package
- name: Install packages
run: |
python -m pip install --upgrade pip
python -m pip install pytest pandas f4enix

# Run pytest
- name: Testing
run: |
python -m pytest
5 changes: 4 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
@@ -1 +1,4 @@
get_wwinp.py
get_wwinp.py
*pyc
.vscode
**/__pycache__/
Empty file added tests/__init__.py
Empty file.
132 changes: 132 additions & 0 deletions tests/test_quality_jade.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,132 @@
import json
import os
from pathlib import Path
import pandas as pd
import re
from f4enix import Input

rmode_pat = re.compile(r"RMODE", re.IGNORECASE)

ROOT = Path("jade_open_benchmarks")


def test_metadata():
for folder in os.listdir(ROOT / "inputs"):
path_folder = ROOT / "inputs" / folder
if not path_folder.is_dir():
continue
with open(path_folder / "benchmark_metadata.json") as f:
data = json.load(f)
assert data["name"] == folder

# check internal structure
ref_set_flag = False
for subrun in os.listdir(path_folder):
if subrun == "benchmark_metadata.json":
continue

codes = os.listdir(path_folder / subrun)
if not ref_set_flag:
ref_codes = codes
ref_set_flag = True
continue

# check that in all subrun the same codes are implemented
assert codes == ref_codes, f"Subrun {subrun}"

# check that the version is specified for each code
for code in ref_codes:
assert data["version"][code]


def test_error_relative():
absolute_errors = []

# these are exceptions where the rel error is indeed above 1
manually_checked = ["Tiara-BC", "FNS-TOF"]

for folder in os.listdir(ROOT / "exp_results"):
if folder in manually_checked:
continue
for file in os.listdir(ROOT / "exp_results" / folder):
file = ROOT / "exp_results" / folder / file
df = pd.read_csv(file)
if df["Error"].max() > 1:
absolute_errors.append(folder)
print(
f"Absolute error found in {file}: max error = {df['Error'].max()}"
)
absolute_errors = list(set(absolute_errors))
assert len(absolute_errors) == 0, f"Folders with absolute errors: {absolute_errors}"


def test_no_RMODE_card():
for file in mcnp_input_paths_generator():
with open(file) as f:
for line in f:
if rmode_pat.match(line):
raise AssertionError(f"RMODE card found in {file}")


def test_f4enix_readability():
"""test that all MCNP input files can be read by f4enix"""
for file in mcnp_input_paths_generator():
_ = Input.from_input(file)


def test_exp_data_naming_consistency():
"""test that all files in exp_results have a corresponding input file with the
same name"""
known_exceptions = ["Oktavian_Pb"]
for folder in os.listdir(Path(ROOT, "exp_results")):
path_folder = Path(ROOT, "exp_results", folder)
for file in os.listdir(path_folder):
name = file.split(" ")[0]
if name in known_exceptions:
continue
assert Path(ROOT, "inputs", folder, name).exists(), (
f"File {file} in {folder} does not match any input file"
)


def test_mcnp_filename_matches_subrun():
"""Test that MCNP input file names match their parent subrun folder names."""
for folder in os.listdir(ROOT / "inputs"):
folder_path = ROOT / "inputs" / folder
if not folder_path.is_dir():
continue
for subrun in os.listdir(folder_path):
subrun_path = folder_path / subrun
if not subrun_path.is_dir():
continue
for code in os.listdir(subrun_path):
if code != "mcnp":
continue
mcnp_path = subrun_path / code
# Get all .i files in the mcnp directory
i_files = list(mcnp_path.glob("*.i"))
assert len(i_files) == 1, (
f"Expected exactly 1 .i file in {mcnp_path}, found {len(i_files)}"
)

expected_filename = f"{subrun}.i"
actual_filename = i_files[0].name
assert actual_filename == expected_filename, (
f"File name mismatch in {mcnp_path}: expected '{expected_filename}', found '{actual_filename}'"
)


def mcnp_input_paths_generator():
for folder in os.listdir(Path(ROOT, "inputs")):
folder_path = Path(ROOT, "inputs", folder)
if not folder_path.is_dir():
continue
for subrun in os.listdir(folder_path):
subrun_path = folder_path / subrun
if not subrun_path.is_dir():
continue
for code in os.listdir(subrun_path):
if code != "mcnp":
continue

yield subrun_path / code / f"{subrun}.i"