Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
24 commits
Select commit Hold shift + click to select a range
ec6dc93
Allow helper functions at Hooks
tequdev Jan 19, 2026
5d90716
Add tests for Hooks fee
tequdev Jan 20, 2026
5dd1198
Merge commit '5d9071695a616e1af378142e09649abc7d0e8afa' into hook-hel…
tequdev Jan 20, 2026
043c60b
Update new tests
tequdev Jan 20, 2026
2e128ac
add execution test
tequdev Jan 21, 2026
63096d5
add test
tequdev Jan 21, 2026
e5b21f0
Merge pull request #692 from Xahau/sync-2.4.0-rebased
RichardAH Feb 24, 2026
65837f4
fix: Add AMMv1_3 amendment (#5203)
gregtatcam Jun 2, 2025
ec65e62
Merge fixAMMv1_3 amendment into featureAMM amendment
tequdev Feb 19, 2026
8673599
fixAMMClawbackRounding: adjust last holder's LPToken balance (#5513)
yinyiqian1 Jul 11, 2025
8cfee6c
Merge fixAMMClawbackRounding amendment into featureAMMClawback amendment
tequdev Feb 19, 2026
f96d9b6
Add tests for Hooks fee
tequdev Jan 20, 2026
1ba444a
Updated tests to align with the changes merged into the dev branch.
tequdev Feb 17, 2026
9bfca63
Update util_keylet fee test
tequdev Feb 24, 2026
2d29518
fix: typo `SignersListSet`
tequdev Mar 5, 2026
8c4c158
output ccache configuration in release-builder
tequdev Nov 27, 2025
f90ed41
enable ccache direct_mode
tequdev Nov 27, 2025
25123b3
chore: replace levelization shell script with python
sublimator Mar 13, 2026
4150f03
chore: use improved levelization script with threading and argparse
sublimator Mar 13, 2026
7f6ac75
Revert "chore: use improved levelization script with threading and ar…
sublimator Mar 13, 2026
66f7294
Test: hint build_test_hooks.sh when hook wasm is empty in hso()
tequdev Apr 1, 2026
05a3e04
Fix BEAST_ENHANCED_LOGGING not working and restore original behavior
tequdev Mar 6, 2026
cd00ed7
change build instructions url
alloynetworks Mar 13, 2026
04c2929
Merge remote-tracking branch 'upstream/dev' into hook-helper-func
tequdev Apr 27, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions .github/workflows/levelization.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ jobs:
steps:
- uses: actions/checkout@v3
- name: Check levelization
run: Builds/levelization/levelization.sh
run: python Builds/levelization/levelization.py
- name: Check for differences
id: assert
run: |
Expand Down Expand Up @@ -40,7 +40,7 @@ jobs:
To fix it, you can do one of two things:
1. Download and apply the patch generated as an artifact of this
job to your repo, commit, and push.
2. Run './Builds/levelization/levelization.sh' in your repo,
2. Run 'python Builds/levelization/levelization.py' in your repo,
commit, and push.

See Builds/levelization/README.md for more info.
Expand Down
6 changes: 6 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,9 @@ Builds/levelization/results/paths.txt
Builds/levelization/results/includes/
Builds/levelization/results/includedby/

# Python
__pycache__

# Ignore tmp directory.
tmp

Expand Down Expand Up @@ -126,3 +129,6 @@ generated

# Suggested in-tree build directory
/.build/

guard_checker
guard_checker.dSYM
6 changes: 3 additions & 3 deletions Builds/levelization/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ that `test` code should *never* be included in `ripple` code.)

## Validation

The [levelization.sh](levelization.sh) script takes no parameters,
The [levelization.py](levelization.py) script takes no parameters,
reads no environment variables, and can be run from any directory,
as long as it is in the expected location in the rippled repo.
It can be run at any time from within a checked out repo, and will
Expand Down Expand Up @@ -84,7 +84,7 @@ It generates many files of [results](results):
Github Actions workflow to test that levelization loops haven't
changed. Unfortunately, if changes are detected, it can't tell if
they are improvements or not, so if you have resolved any issues or
done anything else to improve levelization, run `levelization.sh`,
done anything else to improve levelization, run `levelization.py`,
and commit the updated results.

The `loops.txt` and `ordering.txt` files relate the modules
Expand All @@ -108,7 +108,7 @@ The committed files hide the detailed values intentionally, to
prevent false alarms and merging issues, and because it's easy to
get those details locally.

1. Run `levelization.sh`
1. Run `levelization.py`
2. Grep the modules in `paths.txt`.
* For example, if a cycle is found `A ~= B`, simply `grep -w
A Builds/levelization/results/paths.txt | grep -w B`
283 changes: 283 additions & 0 deletions Builds/levelization/levelization.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,283 @@
#!/usr/bin/env python3

"""
Usage: levelization.py
This script takes no parameters, and can be called from any directory in the file system.
"""

import os
import re
import sys
from collections import defaultdict
from pathlib import Path

# Compile regex patterns once at module level
INCLUDE_PATTERN = re.compile(r"^\s*#include.*/.*\.h")
INCLUDE_PATH_PATTERN = re.compile(r'[<"]([^>"]+)[>"]')


def dictionary_sort_key(s):
"""
Create a sort key that mimics 'sort -d' (dictionary order).
Dictionary order only considers blanks and alphanumeric characters.
"""
return "".join(c for c in s if c.isalnum() or c.isspace())


def get_level(file_path):
"""
Extract the level from a file path (second and third directory components).
Equivalent to bash: cut -d/ -f 2,3

Examples:
src/ripple/app/main.cpp -> ripple.app
src/test/app/Import_test.cpp -> test.app
"""
parts = file_path.split("/")

if len(parts) >= 3:
level = f"{parts[1]}/{parts[2]}"
elif len(parts) >= 2:
level = f"{parts[1]}/toplevel"
else:
level = file_path

# If the "level" indicates a file, cut off the filename
if "." in level.split("/")[-1]:
# Use the "toplevel" label as a workaround for `sort`
# inconsistencies between different utility versions
level = level.rsplit("/", 1)[0] + "/toplevel"

return level.replace("/", ".")


def extract_include_level(include_line):
"""
Extract the include path from an #include directive.
Gets the first two directory components from the include path.
Equivalent to bash: cut -d/ -f 1,2

Examples:
#include <ripple/basics/base_uint.h> -> ripple.basics
#include "ripple/app/main/Application.h" -> ripple.app
"""
match = INCLUDE_PATH_PATTERN.search(include_line)
if not match:
return None

include_path = match.group(1)
parts = include_path.split("/")

if len(parts) >= 2:
include_level = f"{parts[0]}/{parts[1]}"
else:
include_level = include_path

# If the "includelevel" indicates a file, cut off the filename
if "." in include_level.split("/")[-1]:
include_level = include_level.rsplit("/", 1)[0] + "/toplevel"

return include_level.replace("/", ".")


def find_repository_directories(start_path, depth_limit=10):
"""
Find the repository root by looking for src or include folders.
Walks up the directory tree from the start path.
"""
current = start_path.resolve()

for _ in range(depth_limit):
src_path = current / "src"
include_path = current / "include"
has_src = src_path.exists()
has_include = include_path.exists()

if has_src or has_include:
dirs = []
if has_src:
dirs.append(src_path)
if has_include:
dirs.append(include_path)
return current, dirs

parent = current.parent
if parent == current:
break
current = parent

raise RuntimeError(
"Could not find repository root. "
"Expected to find a directory containing 'src' and/or 'include' folders."
)


def main():
script_dir = Path(__file__).parent.resolve()
os.chdir(script_dir)

# Clean up and create results directory.
results_dir = script_dir / "results"
if results_dir.exists():
import shutil

shutil.rmtree(results_dir)
results_dir.mkdir()

# Find the repository root.
try:
repo_root, scan_dirs = find_repository_directories(script_dir)
print(f"Found repository root: {repo_root}")
for scan_dir in scan_dirs:
print(f" Scanning: {scan_dir.relative_to(repo_root)}")
except RuntimeError as e:
print(f"Error: {e}", file=sys.stderr)
sys.exit(1)

# Find all #include directives.
print("\nScanning for raw includes...")
raw_includes = []
rawincludes_file = results_dir / "rawincludes.txt"

with open(rawincludes_file, "w", buffering=8192) as raw_f:
for dir_path in scan_dirs:
for file_path in dir_path.rglob("*"):
if not file_path.is_file():
continue
try:
rel_path_str = str(file_path.relative_to(repo_root))
with open(
file_path, "r", encoding="utf-8", errors="ignore", buffering=8192
) as f:
for line in f:
if "#include" not in line or "boost" in line:
continue
if INCLUDE_PATTERN.match(line):
line_stripped = line.strip()
entry = f"{rel_path_str}:{line_stripped}\n"
print(entry, end="")
raw_f.write(entry)
raw_includes.append((rel_path_str, line_stripped))
except Exception as e:
print(f"Error reading {file_path}: {e}", file=sys.stderr)

# Build levelization paths and count directly.
print("Build levelization paths")
path_counts = defaultdict(int)

for file_path, include_line in raw_includes:
include_level = extract_include_level(include_line)
if not include_level:
continue
level = get_level(file_path)
if level != include_level:
path_counts[(level, include_level)] += 1

# Sort and deduplicate paths.
print("Sort and deduplicate paths")
sorted_items = sorted(
path_counts.items(),
key=lambda x: (dictionary_sort_key(x[0][0]), dictionary_sort_key(x[0][1])),
)

paths_file = results_dir / "paths.txt"
with open(paths_file, "w") as f:
for (level, include_level), count in sorted_items:
line = f"{count:7} {level} {include_level}\n"
print(line.rstrip())
f.write(line)

# Split into flat-file database.
print("Split into flat-file database")
includes_dir = results_dir / "includes"
includedby_dir = results_dir / "includedby"
includes_dir.mkdir()
includedby_dir.mkdir()

includes_data = defaultdict(list)
includedby_data = defaultdict(list)

for (level, include_level), count in sorted_items:
includes_data[level].append((include_level, count))
includedby_data[include_level].append((level, count))

for level in sorted(includes_data.keys(), key=dictionary_sort_key):
with open(includes_dir / level, "w") as f:
for include_level, count in includes_data[level]:
line = f"{include_level} {count}\n"
print(line.rstrip())
f.write(line)

for include_level in sorted(includedby_data.keys(), key=dictionary_sort_key):
with open(includedby_dir / include_level, "w") as f:
for level, count in includedby_data[include_level]:
line = f"{level} {count}\n"
print(line.rstrip())
f.write(line)

# Search for loops.
print("Search for loops")
loops_file = results_dir / "loops.txt"
ordering_file = results_dir / "ordering.txt"

# Pre-load all include files into memory for fast lookup.
includes_cache = {}
includes_lookup = {}

for include_file in sorted(includes_dir.iterdir(), key=lambda p: p.name):
if not include_file.is_file():
continue
includes_cache[include_file.name] = []
includes_lookup[include_file.name] = {}
with open(include_file, "r") as f:
for line in f:
parts = line.strip().split()
if len(parts) >= 2:
name, count = parts[0], int(parts[1])
includes_cache[include_file.name].append((name, count))
includes_lookup[include_file.name][name] = count

loops_found = set()

with open(loops_file, "w", buffering=8192) as loops_f, open(
ordering_file, "w", buffering=8192
) as ordering_f:
for source in sorted(includes_cache.keys()):
for include, include_freq in includes_cache[source]:
if include not in includes_lookup:
continue

source_freq = includes_lookup[include].get(source)

if source_freq is not None:
loop_key = tuple(sorted([source, include]))
if loop_key in loops_found:
continue
loops_found.add(loop_key)

loops_f.write(f"Loop: {source} {include}\n")

diff = include_freq - source_freq
if diff > 3:
loops_f.write(f" {source} > {include}\n\n")
elif diff < -3:
loops_f.write(f" {include} > {source}\n\n")
elif source_freq == include_freq:
loops_f.write(f" {include} == {source}\n\n")
else:
loops_f.write(f" {include} ~= {source}\n\n")
else:
ordering_f.write(f"{source} > {include}\n")

# Print results.
print("\nOrdering:")
with open(ordering_file, "r") as f:
print(f.read(), end="")

print("\nLoops:")
with open(loops_file, "r") as f:
print(f.read(), end="")


if __name__ == "__main__":
main()
Loading
Loading