diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 4fd5cfe..867311c 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -8,8 +8,3 @@ repos:
rev: 7.1.2
hooks:
- id: flake8
-- repo: https://github.com/pre-commit/mirrors-mypy
- rev: v1.15.0
- hooks:
- - id: mypy
- exclude: ^(docs/|example-plugin/)
diff --git a/Makefile b/Makefile
index c3877cc..2e24efc 100644
--- a/Makefile
+++ b/Makefile
@@ -15,7 +15,6 @@ validator_coldkey_name = validator-base
validator_hotkey_name = default
ewma_window_days = 10
-ewma_cutoff_days = 10
# Python virtual environment
venv_python=bt_venv/bin/python3
@@ -34,4 +33,3 @@ validator:
--logging.$(logging_level) \
--neuron.axon_off true \
--ewma.window_days $(ewma_window_days) \
- --ewma.cutoff_days $(ewma_cutoff_days)
diff --git a/alembic/versions/2b28a1b95303_add_column_rewards_prompt_name.py b/alembic/versions/2b28a1b95303_add_column_rewards_prompt_name.py
new file mode 100644
index 0000000..d0c8c2f
--- /dev/null
+++ b/alembic/versions/2b28a1b95303_add_column_rewards_prompt_name.py
@@ -0,0 +1,30 @@
+"""add column rewards prompt_name
+
+Revision ID: 2b28a1b95303
+Revises: a9227b0cb10b
+Create Date: 2025-11-27 17:57:01.394792
+
+"""
+
+from typing import Sequence, Union
+
+from alembic import op
+import sqlalchemy as sa
+
+
+# revision identifiers, used by Alembic.
+revision: str = "2b28a1b95303"
+down_revision: Union[str, None] = "a9227b0cb10b"
+branch_labels: Union[str, Sequence[str], None] = None
+depends_on: Union[str, Sequence[str], None] = None
+
+
+def upgrade() -> None:
+ op.add_column(
+ "miner_rewards",
+ sa.Column("prompt_name", sa.Text, server_default="low"),
+ )
+
+
+def downgrade() -> None:
+ op.drop_column("miner_rewards", "prompt_name")
diff --git a/docs/miner_reference.md b/docs/miner_reference.md
index da4dd58..bbe901d 100644
--- a/docs/miner_reference.md
+++ b/docs/miner_reference.md
@@ -166,39 +166,6 @@ pm2 start miner.config.js -- --blacklist.validator_min_stake 1000
[Back to top ^][table-of-contents]
-#### `--blacklist.validator_exceptions INTEGER INTEGER INTEGER ...`
-
-Optional list of validator exceptions (e.g., --blacklist.validator_exceptions 0 1 128 162 34 49 38).
-
-Default: `[]`
-
-Example:
-
-```js
-// miner.config.js
-module.exports = {
- apps: [
- {
- name: "miner",
- interpreter: "python3",
- script: "./neurons/miner.py",
- args: "--blacklist.validator_exceptions 0 1 128 162 34 49 38",
- env: {
- PYTHONPATH: ".",
- },
- },
- ],
-};
-```
-
-Alternatively, you can add the args directly to the command:
-
-```shell
-pm2 start miner.config.js -- --blacklist.validator_exceptions 0 1 128 162 34 49 38
-```
-
-[Back to top ^][table-of-contents]
-
#### `--logging.debug`
Turn on bittensor debugging information.
diff --git a/docs/validator_guide.md b/docs/validator_guide.md
index 2a4c892..dd3693f 100644
--- a/docs/validator_guide.md
+++ b/docs/validator_guide.md
@@ -9,8 +9,8 @@
- [5. Options](#5-options)
- [5.1. Common Options](#51-common-options)
- [`--axon.port INTEGER`](#--axonport-integer)
- - [`--ewma.cutoff_days INTEGER`](#--ewmacutoff_days-integer)
- - [`--ewma.window_days INTEGER`](#--ewmawindow_days-float)
+ - [`--ewma.window_days INTEGER`](#--ewmawindow_days-integer)
+ - [`--softmax.beta FLOAT`](#--softmaxbeta-float)
- [`--logging.debug`](#--loggingdebug)
- [`--logging.info`](#--logginginfo)
- [`--logging.trace`](#--loggingtrace)
@@ -25,7 +25,6 @@
- [`--neuron.sample_size INTEGER`](#--neuronsample_size-integer)
- [`--neuron.timeout INTEGER`](#--neurontimeout-integer)
- [`--neuron.nprocs INTEGER`](#--neuronnprocs-integer)
- - [`--neuron.use_multiprocess INTEGER`](#--neuronuse_multiprocess-integer)
- [`--neuron.vpermit_tao_limit INTEGER`](#--neuronvpermit_tao_limit-integer)
- [`--wallet.hotkey TEXT`](#--wallethotkey-text)
- [`--wallet.name TEXT`](#--walletname-text)
@@ -284,45 +283,6 @@ pm2 start validator.test.config.js -- --axon.port 8091
[Back to top ^][table-of-contents]
-#### `--ewma.cutoff_days INTEGER`
-
-The number of days against which to run the moving average, (e.g. 1).
-
-Default: `2`
-
-Example:
-
-```js
-// validator.config.js
-module.exports = {
- apps: [
- {
- name: "validator",
- interpreter: "python3",
- script: "./neurons/validator.py",
- args: "--ewma.cutoff_days 10",
- env: {
- PYTHONPATH: ".",
- },
- },
- ],
-};
-```
-
-Alternatively, you can add the args directly to the command:
-
-```shell
-pm2 start validator.config.js -- --ewma.cutoff_days 10
-```
-
-for testnet it's:
-
-```shell
-pm2 start validator.test.config.js -- --ewma.cutoff_days 10
-```
-
-[Back to top ^][table-of-contents]
-
#### `--ewma.window_days INTEGER`
The window in days for the rolling average, (e.g. 10).
@@ -358,9 +318,9 @@ pm2 start validator.config.js -- --ewma.window_days 10
#### `--softmax.beta FLOAT`
-Negative beta to give higher weight to lower scores.
+Negative beta to give higher weight to lower scores for the 1h prompt
-Default: `-0.002`
+Default: `-0.05`
Example:
@@ -372,7 +332,7 @@ module.exports = {
name: "validator",
interpreter: "python3",
script: "./neurons/validator.py",
- args: "--softmax.beta -0.003",
+ args: "--softmax.beta -0.05",
env: {
PYTHONPATH: ".",
},
@@ -384,7 +344,7 @@ module.exports = {
Alternatively, you can add the args directly to the command:
```shell
-pm2 start validator.config.js -- --softmax.beta -0.003
+pm2 start validator.config.js -- --softmax.beta -0.05
```
[Back to top ^][table-of-contents]
@@ -808,9 +768,9 @@ pm2 start validator.config.js -- --neuron.timeout 120
#### `--neuron.nprocs INTEGER`
-The number of processes to run for the validator dendrite, (e.g. 8).
+The number of processes to run for the validator dendrite, (e.g. 2).
-Default: `8`
+Default: `2`
Example:
@@ -819,10 +779,10 @@ Example:
module.exports = {
apps: [
{
- name: "validator",
- interpreter: "python3",
- script: "./neurons/validator.py",
- args: "--neuron.nprocs 8",
+ name: 'validator',
+ interpreter: 'python3',
+ script: './neurons/validator.py',
+ args: '--neuron.nprocs 2',
env: {
PYTHONPATH: ".",
},
@@ -834,7 +794,7 @@ module.exports = {
Alternatively, you can add the args directly to the command:
```shell
-pm2 start validator.config.js -- --neuron.nprocs 8
+pm2 start validator.config.js -- --neuron.nprocs 2
```
[Back to top ^][table-of-contents]
@@ -872,39 +832,6 @@ pm2 start validator.config.js -- --neuron.vpermit_tao_limit 1000
[Back to top ^][table-of-contents]
-#### `--neuron.use_multiprocess INTEGER`
-
-Wether to use multiple processes for the validator dendrite.
-
-Default: `1`
-
-Example to disable multiprocess:
-
-```js
-// validator.config.js
-module.exports = {
- apps: [
- {
- name: "validator",
- interpreter: "python3",
- script: "./neurons/validator.py",
- args: "--neuron.use_multiprocess 0",
- env: {
- PYTHONPATH: ".",
- },
- },
- ],
-};
-```
-
-Alternatively, you can add the args directly to the command:
-
-```shell
-pm2 start validator.config.js -- --neuron.nprocs 8
-```
-
-[Back to top ^][table-of-contents]
-
#### `--wallet.hotkey TEXT`
The hotkey of the wallet.
diff --git a/entrypoint-validator.sh b/entrypoint-validator.sh
index 3b39200..d9e4b2b 100644
--- a/entrypoint-validator.sh
+++ b/entrypoint-validator.sh
@@ -10,8 +10,7 @@ validator_coldkey_name=validator
validator_hotkey_name=default
ewma_window_days=10
-ewma_cutoff_days=10
-softmax_beta=-0.1
+softmax_beta=-0.05
log_id_prefix=my_validator_name
@@ -23,7 +22,6 @@ python3.10 ./neurons/validator.py \
--logging.debug \
--neuron.axon_off true \
--ewma.window_days $ewma_window_days \
- --ewma.cutoff_days $ewma_cutoff_days \
--softmax.beta $softmax_beta \
--neuron.vpermit_tao_limit $vpermit_tao_limit \
--gcp.log_id_prefix $log_id_prefix \
diff --git a/neurons/miner.py b/neurons/miner.py
index 8cf858e..616a71d 100644
--- a/neurons/miner.py
+++ b/neurons/miner.py
@@ -106,21 +106,12 @@ async def blacklist(self, synapse: Simulation) -> typing.Tuple[bool, str]:
uid = self.metagraph.hotkeys.index(synapse.dendrite.hotkey)
stake = self.metagraph.S[uid]
bt.logging.info(f"Requesting UID: {uid} | Stake at UID: {stake}")
- bt.logging.debug(
- f"Whitelisted validators: {self.config.blacklist.validator_exceptions}"
- )
-
- if uid in self.config.blacklist.validator_exceptions:
+ if stake <= self.config.blacklist.validator_min_stake:
+ # Ignore requests if the stake is below minimum
bt.logging.info(
- f"Requesting UID: {uid} whitelisted as a validator"
+ f"Hotkey: {synapse.dendrite.hotkey}: stake below minimum threshold of {self.config.blacklist.validator_min_stake}"
)
- else:
- if stake <= self.config.blacklist.validator_min_stake:
- # Ignore requests if the stake is below minimum
- bt.logging.info(
- f"Hotkey: {synapse.dendrite.hotkey}: stake below minimum threshold of {self.config.blacklist.validator_min_stake}"
- )
- return True, "Stake below minimum threshold"
+ return True, "Stake below minimum threshold"
if self.config.blacklist.force_validator_permit:
# If the config is set to force validator permit, then we should only allow requests from validators.
@@ -182,7 +173,7 @@ def load_state(self):
def set_weights(self):
pass
- async def forward_validator(self):
+ def forward_validator(self):
pass
def print_info(self):
diff --git a/neurons/validator.py b/neurons/validator.py
index 1009601..aff415a 100644
--- a/neurons/validator.py
+++ b/neurons/validator.py
@@ -1,9 +1,10 @@
# The MIT License (MIT)
# Copyright © 2023 Yuma Rao
# Copyright © 2023 Mode Labs
-import asyncio
-from datetime import datetime, timedelta
+from datetime import datetime, timedelta, timezone
import multiprocessing as mp
+import sched
+import time
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -29,19 +30,23 @@
from synth.utils.helpers import (
get_current_time,
round_time_to_minutes,
- timeout_until,
)
from synth.utils.logging import setup_gcp_logging
from synth.utils.opening_hours import should_skip_xau
from synth.validator.forward import (
calculate_moving_average_and_update_rewards,
- calculate_rewards_and_update_scores,
+ calculate_scores,
get_available_miners_and_update_metagraph_history,
query_available_miners_and_save_responses,
send_weights_to_bittensor_and_update_weights_history,
)
from synth.validator.miner_data_handler import MinerDataHandler
from synth.validator.price_data_provider import PriceDataProvider
+from synth.validator.prompt_config import (
+ PromptConfig,
+ LOW_FREQUENCY,
+ HIGH_FREQUENCY,
+)
load_dotenv()
@@ -67,43 +72,19 @@ def __init__(self, config=None):
self.miner_data_handler = MinerDataHandler()
self.price_data_provider = PriceDataProvider()
- self.simulation_input_list = [
- # input data: give me prediction of BTC price for the next 1 day for every 5 min of time
- SimulationInput(
- asset="BTC",
- time_increment=300,
- time_length=86400,
- num_simulations=1000,
- ),
- SimulationInput(
- asset="ETH",
- time_increment=300,
- time_length=86400,
- num_simulations=1000,
- ),
- SimulationInput(
- asset="XAU",
- time_increment=300,
- time_length=86400,
- num_simulations=1000,
- ),
- SimulationInput(
- asset="SOL",
- time_increment=300,
- time_length=86400,
- num_simulations=1000,
- ),
- ]
- self.timeout_extra_seconds = 60
+ self.scheduler = sched.scheduler(time.time, time.sleep)
+ self.miner_uids: list[int] = []
+ self.asset_list = ["BTC", "ETH", "XAU", "SOL"]
+ HIGH_FREQUENCY.softmax_beta = self.config.softmax.beta
self.assert_assets_supported()
def assert_assets_supported(self):
# Assert assets are all implemented in the price data provider:
- for simulation in self.simulation_input_list:
- assert simulation.asset in PriceDataProvider.TOKEN_MAP
+ for asset in self.asset_list:
+ assert asset in PriceDataProvider.TOKEN_MAP
- async def forward_validator(self):
+ def forward_validator(self):
"""
Validator forward pass. Consists of:
- Generating the query
@@ -112,95 +93,135 @@ async def forward_validator(self):
- Rewarding the miners
- Updating the scores
"""
- bt.logging.info("calling forward_validator()")
- return [
- asyncio.create_task(self.forward_prompt()),
- ]
-
- async def wait_till_next_simulation(
- self, request_time: datetime, simulation_input_list: list
+ self.miner_uids = get_available_miners_and_update_metagraph_history(
+ base_neuron=self,
+ miner_data_handler=self.miner_data_handler,
+ )
+ self.schedule_cycle(get_current_time(), HIGH_FREQUENCY, True)
+ self.schedule_cycle(get_current_time(), LOW_FREQUENCY, True)
+ self.scheduler.run()
+
+ def schedule_cycle(
+ self,
+ cycle_start_time: datetime,
+ prompt_config: PromptConfig,
+ immediately: bool = False,
):
- # wait until the next simulation
- next_iteration = request_time + timedelta(
- minutes=60 / len(simulation_input_list)
+ asset = self.asset_list[0]
+
+ latest_asset = self.miner_data_handler.get_latest_asset(
+ prompt_config.time_length
+ )
+ if latest_asset is not None and latest_asset in self.asset_list:
+ latest_index = self.asset_list.index(latest_asset)
+ asset = self.asset_list[(latest_index + 1) % len(self.asset_list)]
+
+ delay = prompt_config.initial_delay
+ if not immediately:
+ next_cycle = cycle_start_time + timedelta(
+ minutes=prompt_config.total_cycle_minutes
+ / len(self.asset_list)
+ )
+ delay = (next_cycle - get_current_time()).total_seconds()
+
+ # Schedule the launch of high frequency prompt
+ high_frequency_launch = datetime(
+ 2025, 12, 2, 18, 0, 0, tzinfo=timezone.utc
)
- wait_time = timeout_until(next_iteration)
+ if (
+ prompt_config.label == HIGH_FREQUENCY.label
+ and get_current_time() <= high_frequency_launch
+ ):
+ return
+
bt.logging.info(
- f"Waiting for {wait_time/60} minutes until the next simulation",
- "forward_prompt",
+ f"Scheduling next {prompt_config.label} frequency cycle for asset {asset} in {delay} seconds"
)
- await asyncio.sleep(wait_time)
-
- async def forward_prompt(self):
- for simulation_input in self.simulation_input_list:
- # ================= Step 1 ================= #
- # Getting available miners from metagraph and saving information about them
- # and their properties (rank, incentives, emission) at the current moment in the database
- # in the metagraph_history table and in the miners table
- # ========================================== #
-
- miner_uids = get_available_miners_and_update_metagraph_history(
- base_neuron=self,
- miner_data_handler=self.miner_data_handler,
- )
- if len(miner_uids) == 0:
- bt.logging.error(
- "No miners available",
- "forward_prompt",
- )
- await self.forward_score()
- await self.wait_till_next_simulation(
- get_current_time(), self.simulation_input_list
- )
- continue
-
- request_time = get_current_time()
- start_time = round_time_to_minutes(
- request_time, 60, self.timeout_extra_seconds
- )
+ method = (
+ self.cycle_low_frequency
+ if prompt_config.label == LOW_FREQUENCY.label
+ else self.cycle_high_frequency
+ )
+ self.scheduler.enter(
+ delay=delay,
+ priority=1,
+ action=method,
+ argument=(asset,),
+ )
+
+ def cycle_low_frequency(self, asset: str):
+ bt.logging.info(f"starting the {LOW_FREQUENCY.label} frequency cycle")
+ cycle_start_time = get_current_time()
- if should_skip_xau(start_time) and simulation_input.asset == "XAU":
- bt.logging.info(
- "Skipping XAU simulation as market is closed",
- "forward_prompt",
- )
- await self.forward_score()
- await self.wait_till_next_simulation(
- request_time, self.simulation_input_list
- )
- continue
-
- # ================= Step 2 ================= #
- # Query all the available miners and save all their responses
- # in the database in miner_predictions table
- # ========================================== #
-
- # add the start time to the simulation input
- simulation_input.start_time = start_time.isoformat()
-
- await query_available_miners_and_save_responses(
- base_neuron=self,
- miner_data_handler=self.miner_data_handler,
- miner_uids=miner_uids,
- simulation_input=simulation_input,
- request_time=request_time,
+ self.sync()
+ # update the miners, also for the high frequency prompt that will use the same list
+ self.miner_uids = get_available_miners_and_update_metagraph_history(
+ base_neuron=self,
+ miner_data_handler=self.miner_data_handler,
+ )
+ self.forward_prompt(asset, LOW_FREQUENCY)
+ self.forward_score_low_frequency()
+ # self.cleanup_history()
+ self.schedule_cycle(cycle_start_time, LOW_FREQUENCY)
+
+ def cycle_high_frequency(self, asset: str):
+ cycle_start_time = get_current_time()
+ self.forward_prompt(asset, HIGH_FREQUENCY)
+
+ current_time = get_current_time()
+ scored_time: datetime = round_time_to_minutes(current_time)
+ bt.logging.info(f"forward score {HIGH_FREQUENCY.label} frequency")
+ calculate_scores(
+ self.miner_data_handler,
+ self.price_data_provider,
+ scored_time,
+ HIGH_FREQUENCY,
+ )
+ self.schedule_cycle(cycle_start_time, HIGH_FREQUENCY)
+
+ def forward_prompt(self, asset: str, prompt_config: PromptConfig):
+ bt.logging.info(f"forward prompt for {prompt_config.label} frequency")
+ if len(self.miner_uids) == 0:
+ bt.logging.error(
+ "No miners available",
+ "forward_prompt",
)
+ return
+
+ request_time = get_current_time()
+ start_time: datetime = round_time_to_minutes(
+ request_time, prompt_config.timeout_extra_seconds
+ )
- await self.forward_score()
- await self.wait_till_next_simulation(
- request_time, self.simulation_input_list
+ if should_skip_xau(start_time) and asset == "XAU":
+ bt.logging.info(
+ "Skipping XAU simulation as market is closed",
+ "forward_prompt",
)
+ return
- async def forward_score(self):
- current_time = get_current_time()
+ simulation_input = SimulationInput(
+ asset=asset,
+ start_time=start_time.isoformat(),
+ time_increment=prompt_config.time_increment,
+ time_length=prompt_config.time_length,
+ num_simulations=prompt_config.num_simulations,
+ )
- # round current time to the closest minute and add extra minutes
- # to be sure we are after the start time of the prompt
- scored_time = round_time_to_minutes(
- current_time, 60, self.timeout_extra_seconds * 2
+ query_available_miners_and_save_responses(
+ base_neuron=self,
+ miner_data_handler=self.miner_data_handler,
+ miner_uids=self.miner_uids,
+ simulation_input=simulation_input,
+ request_time=request_time,
)
+ def forward_score_low_frequency(self):
+ bt.logging.info(f"forward score {LOW_FREQUENCY.label} frequency")
+ current_time = get_current_time()
+ scored_time: datetime = round_time_to_minutes(current_time)
+
# ================= Step 3 ================= #
# Calculate rewards based on historical predictions data
# from the miner_predictions table:
@@ -210,11 +231,11 @@ async def forward_score(self):
# we store the rewards in the miner_scores table
# ========================================== #
- success = calculate_rewards_and_update_scores(
- miner_data_handler=self.miner_data_handler,
- price_data_provider=self.price_data_provider,
- scored_time=scored_time,
- cutoff_days=self.config.ewma.cutoff_days,
+ success = calculate_scores(
+ self.miner_data_handler,
+ self.price_data_provider,
+ scored_time,
+ LOW_FREQUENCY,
)
if not success:
@@ -229,9 +250,6 @@ async def forward_score(self):
moving_averages_data = calculate_moving_average_and_update_rewards(
miner_data_handler=self.miner_data_handler,
scored_time=scored_time,
- cutoff_days=self.config.ewma.cutoff_days,
- window_days=self.config.ewma.window_days,
- softmax_beta=self.config.softmax.beta,
)
if len(moving_averages_data) == 0:
diff --git a/pyproject.toml b/pyproject.toml
index 3a63505..48bded7 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -19,7 +19,7 @@ exclude = '''
'''
[tool.mypy]
-disable_error_code = ["type-arg"]
+disable_error_code = ["type-arg", "import-untyped", "no-untyped-def", "no-untyped-call"]
python_version = 3.10
strict = true
show_error_codes = true
diff --git a/synth/base/dendrite.py b/synth/base/dendrite.py
index 654716c..748c0c9 100644
--- a/synth/base/dendrite.py
+++ b/synth/base/dendrite.py
@@ -265,7 +265,7 @@ def log_exception(exception: Exception):
ValidationError,
),
):
- bt.logging.debug(f"{error_type}#{error_id}: {exception}")
+ bt.logging.trace(f"{error_type}#{error_id}: {exception}")
else:
bt.logging.error(f"{error_type}#{error_id}: {exception}")
traceback.print_exc(file=sys.stderr)
diff --git a/synth/base/dendrite_multiprocess.py b/synth/base/dendrite_multiprocess.py
index d01eead..6cb75d1 100644
--- a/synth/base/dendrite_multiprocess.py
+++ b/synth/base/dendrite_multiprocess.py
@@ -169,7 +169,7 @@ async def call(
synapse.dendrite.signature = signature
try:
- bt.logging.debug(
+ bt.logging.trace(
f"dendrite | --> | {synapse.get_total_size()} B | {synapse.name} | {synapse.axon.hotkey} | {synapse.axon.ip}:{str(synapse.axon.port)} | 0 | Success"
)
response = await client.post(
@@ -189,7 +189,7 @@ async def call(
synapse = process_error_message(synapse, REQUEST_NAME, e)
finally:
- bt.logging.debug(
+ bt.logging.trace(
f"dendrite | <-- | {synapse.get_total_size()} B | {synapse.name} | {synapse.axon.hotkey} | {synapse.axon.ip}:{str(synapse.axon.port)} | {synapse.dendrite.status_code} | {synapse.dendrite.status_message}"
)
@@ -304,7 +304,7 @@ def sync_forward_multiprocess(
axons: list[bt.AxonInfo],
synapse: Simulation,
timeout: float,
- nprocs: int = 8,
+ nprocs: int = 2,
) -> list[Simulation]:
bt.logging.debug(
f"Starting multiprocess forward with {nprocs} processes.", "dendrite"
diff --git a/synth/base/neuron.py b/synth/base/neuron.py
index 8364fc5..8613c65 100644
--- a/synth/base/neuron.py
+++ b/synth/base/neuron.py
@@ -102,7 +102,7 @@ def __init__(self, config=None):
async def forward_miner(self, synapse: bt.Synapse) -> bt.Synapse: ...
@abstractmethod
- async def forward_validator(self): ...
+ def forward_validator(self): ...
@abstractmethod
def resync_metagraph(self): ...
@@ -127,7 +127,7 @@ def sync(self):
# in the run() method. This to save to database the result of the set_weights.
# Always save state.
- self.save_state()
+ # self.save_state()
def check_registered(self):
# --- Check for registration.
diff --git a/synth/base/validator.py b/synth/base/validator.py
index 5c6cd09..1544db1 100644
--- a/synth/base/validator.py
+++ b/synth/base/validator.py
@@ -104,10 +104,6 @@ def serve_axon(self):
f"Failed to create Axon initialize with exception: {e}"
)
- async def concurrent_forward(self):
- coroutines = await self.forward_validator()
- await asyncio.gather(*coroutines)
-
def run(self):
"""
Initiates and manages the main loop for the miner on the Bittensor network. The main loop handles graceful shutdown on keyboard interrupts and logs unforeseen errors.
@@ -129,26 +125,14 @@ def run(self):
"""
bt.logging.info(f"Validator starting at block: {self.block}")
- # This loop maintains the validator's operations until intentionally stopped.
try:
- while True:
- # Run multiple forwards concurrently.
- self.loop.run_until_complete(self.concurrent_forward())
-
- # Check if we should exit.
- if self.should_exit:
- break
-
- # Sync metagraph and potentially set weights.
- self.sync()
-
- self.step += 1
-
+ self.forward_validator()
# If someone intentionally stops the validator, it'll safely terminate operations.
except KeyboardInterrupt:
if not self.config.neuron.axon_off:
self.axon.stop()
bt.logging.success("Validator killed by keyboard interrupt.")
+ traceback.print_exc(file=sys.stderr)
exit()
# In case of unforeseen errors, the validator will log the error and continue operations.
@@ -156,50 +140,6 @@ def run(self):
bt.logging.error(f"Error during validation: {str(err)}")
traceback.print_exc(file=sys.stderr)
- def run_in_background_thread(self):
- """
- Starts the validator's operations in a background thread upon entering the context.
- This method facilitates the use of the validator in a 'with' statement.
- """
- if not self.is_running:
- bt.logging.debug("Starting validator in background thread.")
- self.should_exit = False
- self.thread = threading.Thread(target=self.run, daemon=True)
- self.thread.start()
- self.is_running = True
- bt.logging.debug("Started")
-
- def stop_run_thread(self):
- """
- Stops the validator's operations that are running in the background thread.
- """
- if self.is_running:
- bt.logging.debug("Stopping validator in background thread.")
- self.should_exit = True
- if self.thread is not None:
- self.thread.join(5)
- self.is_running = False
- bt.logging.debug("Stopped")
-
- def __enter__(self):
- self.run_in_background_thread()
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- """
- Stops the validator's background operations upon exiting the context.
- This method facilitates the use of the validator in a 'with' statement.
-
- Args:
- exc_type: The type of the exception that caused the context to be exited.
- None if the context was exited without an exception.
- exc_value: The instance of the exception that caused the context to be exited.
- None if the context was exited without an exception.
- traceback: A traceback object encoding the stack trace.
- None if the context was exited without an exception.
- """
- self.stop_run_thread()
-
def set_weights(self):
"""
Sets the validator weights to the metagraph hotkeys based on the scores it has received from the miners. The weights determine the trust and incentive level the validator assigns to miner nodes on the network.
diff --git a/synth/db/models.py b/synth/db/models.py
index 3bed9ff..5c1faac 100644
--- a/synth/db/models.py
+++ b/synth/db/models.py
@@ -13,6 +13,7 @@
String,
JSON,
ForeignKey,
+ Text,
)
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.orm import DeclarativeBase, relationship, Session
@@ -131,6 +132,7 @@ class MinerReward(Base):
)
smoothed_score = Column(Float, nullable=False)
reward_weight = Column(Float, nullable=False)
+ prompt_name = Column(Text, nullable=True)
updated_at = Column(DateTime(timezone=True), nullable=False)
miner = relationship("Miner", back_populates="rewards")
diff --git a/synth/miner/run.py b/synth/miner/run.py
index ee5eff4..5eb8bcb 100644
--- a/synth/miner/run.py
+++ b/synth/miner/run.py
@@ -17,7 +17,7 @@
)
current_time = get_current_time()
- start_time = round_time_to_minutes(current_time, 60, 120)
+ start_time = round_time_to_minutes(current_time, 120)
simulation_input.start_time = start_time.isoformat()
print("start_time", simulation_input.start_time)
diff --git a/synth/utils/config.py b/synth/utils/config.py
index 33045cb..f23d52d 100644
--- a/synth/utils/config.py
+++ b/synth/utils/config.py
@@ -141,14 +141,6 @@ def add_miner_args(_, parser):
help="Minimum validator stake to accept forward requests from as a miner",
)
- parser.add_argument(
- "--blacklist.validator_exceptions",
- type=int,
- nargs="+",
- default=[],
- help="List of validator exceptions (e.g., --blacklist.validator_exceptions 0 1 128 162 34 49 38)",
- )
-
parser.add_argument(
"--wandb.enabled",
type=bool,
@@ -199,14 +191,7 @@ def add_validator_args(_, parser: argparse.ArgumentParser):
"--neuron.nprocs",
type=int,
help="The number of processes to run for the validator dendrite.",
- default=8,
- )
-
- parser.add_argument(
- "--neuron.use_multiprocess",
- type=int,
- help="The number of processes to run for the validator dendrite.",
- default=1,
+ default=2,
)
parser.add_argument(
@@ -253,18 +238,11 @@ def add_validator_args(_, parser: argparse.ArgumentParser):
default=10,
)
- parser.add_argument(
- "--ewma.cutoff_days",
- type=int,
- help="The number of days against which to run the moving average",
- default=10,
- )
-
parser.add_argument(
"--softmax.beta",
type=float,
help="Negative beta to give higher weight to lower scores.",
- default=-0.1,
+ default=-0.05,
)
diff --git a/synth/utils/helpers.py b/synth/utils/helpers.py
index d3c17eb..d5dd647 100644
--- a/synth/utils/helpers.py
+++ b/synth/utils/helpers.py
@@ -95,40 +95,19 @@ def get_intersecting_arrays(array1, array2):
return filtered_array1, filtered_array2
-def round_time_to_minutes(
- dt: datetime, in_seconds: int, extra_seconds=0
-) -> datetime:
- """round validation time to the closest minute and add extra minutes
+def round_time_to_minutes(dt: datetime, extra_seconds=0) -> datetime:
+ """round validation time to the closest minute and add extra seconds
Args:
dt (datetime): request_time
- in_seconds (int): 60
extra_seconds (int, optional): self.timeout_extra_seconds: 120. Defaults to 0.
Returns:
datetime: rounded-up datetime
"""
- # Define the rounding interval
- rounding_interval = timedelta(seconds=in_seconds)
-
- # Calculate the number of seconds since the start of the day
- seconds = (
- dt - dt.replace(hour=0, minute=0, second=0, microsecond=0)
- ).total_seconds()
-
- # Calculate the next multiple of time_increment in seconds
- next_interval_seconds = (
- (seconds // rounding_interval.total_seconds()) + 1
- ) * rounding_interval.total_seconds()
-
- # Get the rounded-up datetime
- rounded_time = (
- dt.replace(hour=0, minute=0, second=0, microsecond=0)
- + timedelta(seconds=next_interval_seconds)
- + timedelta(seconds=extra_seconds)
- )
-
- return rounded_time
+ return (dt + timedelta(minutes=1)).replace(
+ second=0, microsecond=0
+ ) + timedelta(seconds=extra_seconds)
def from_iso_to_unix_time(iso_time: str):
@@ -161,21 +140,5 @@ def timeout_from_start_time(
return (start_time - current_time).total_seconds()
-def timeout_until(until_time: datetime):
- """
- Calculate the timeout duration from the current time to the until_time.
-
- :param until_time: datetime object representing the end time.
- :return: Timeout duration in seconds.
- """
- # Get current date and time
- current_time = datetime.now(timezone.utc)
-
- # Calculate the timeout duration
- wait_time = (until_time - current_time).total_seconds()
-
- return wait_time if wait_time > 0 else 0
-
-
def convert_list_elements_to_str(items: list[int]) -> list[str]:
return [str(x) for x in items]
diff --git a/synth/utils/uids.py b/synth/utils/uids.py
index 3de9813..4f1a7dd 100644
--- a/synth/utils/uids.py
+++ b/synth/utils/uids.py
@@ -14,7 +14,7 @@ def check_uid_availability(
"""
# Filter non serving axons.
if not metagraph.axons[uid].is_serving:
- bt.logging.debug(f"uid {uid} is not serving")
+ bt.logging.trace(f"uid {uid} is not serving")
return False
# Filter validator permit > 1024 stake.
if metagraph.validator_permit[uid]:
diff --git a/synth/validator/crps_calculation.py b/synth/validator/crps_calculation.py
index ce77dd3..8cf031a 100644
--- a/synth/validator/crps_calculation.py
+++ b/synth/validator/crps_calculation.py
@@ -1,14 +1,6 @@
import numpy as np
from properscoring import crps_ensemble
-# Define scoring intervals in seconds
-scoring_intervals = {
- "5min": 300, # 5 minutes
- "30min": 1800, # 30 minutes
- "3hour": 10800, # 3 hours
- "24hour_abs": 86400, # 24 hours
-}
-
def get_interval_steps(scoring_interval: int, time_increment: int) -> int:
"""
@@ -21,6 +13,7 @@ def calculate_crps_for_miner(
simulation_runs: np.ndarray,
real_price_path: np.ndarray,
time_increment: int,
+ scoring_intervals: dict[str, int],
) -> tuple[float, list[dict]]:
"""
Calculate the total CRPS score for a miner's simulations over specified intervals,
@@ -43,6 +36,7 @@ def calculate_crps_for_miner(
for interval_name, interval_seconds in scoring_intervals.items():
interval_steps = get_interval_steps(interval_seconds, time_increment)
absolute_price = interval_name.endswith("_abs")
+ is_gap = interval_name.endswith("_gap")
# If we are considering absolute prices, adjust the interval steps for potential gaps:
# if only the initial price is present, then decrease the interval step
@@ -64,11 +58,13 @@ def calculate_crps_for_miner(
simulation_runs,
interval_steps,
absolute_price,
+ is_gap,
)
real_changes = calculate_price_changes_over_intervals(
real_price_path.reshape(1, -1),
interval_steps,
absolute_price,
+ is_gap,
)
data_blocks = label_observed_blocks(real_changes[0])
@@ -146,7 +142,10 @@ def label_observed_blocks(arr: np.ndarray) -> np.ndarray:
def calculate_price_changes_over_intervals(
- price_paths: np.ndarray, interval_steps: int, absolute_price=False
+ price_paths: np.ndarray,
+ interval_steps: int,
+ absolute_price=False,
+ is_gap=False,
) -> np.ndarray:
"""
Calculate price changes over specified intervals.
@@ -160,7 +159,11 @@ def calculate_price_changes_over_intervals(
numpy.ndarray: Array of price changes over intervals.
"""
# Get the prices at the interval points
+ # [1, 2, 3, 4, 5, 6, 7] -> [1, 3, 5, 7] if interval_steps is 2
interval_prices = price_paths[:, ::interval_steps]
+ if is_gap:
+ # [1, 2, 3, 4, 5, 6, 7] -> [1, 3] if interval_steps is 2
+ interval_prices = interval_prices[:1]
# Calculate price changes over intervals
if absolute_price:
diff --git a/synth/validator/forward.py b/synth/validator/forward.py
index 424305f..45ed7c6 100644
--- a/synth/validator/forward.py
+++ b/synth/validator/forward.py
@@ -36,8 +36,10 @@
convert_list_elements_to_str,
)
from synth.utils.uids import check_uid_availability
+from synth.validator import prompt_config
from synth.validator.miner_data_handler import MinerDataHandler
from synth.validator.moving_average import (
+ combine_moving_averages,
compute_smoothed_score,
prepare_df_for_moving_average,
print_rewards_df,
@@ -86,45 +88,46 @@ def send_weights_to_bittensor_and_update_weights_history(
def calculate_moving_average_and_update_rewards(
miner_data_handler: MinerDataHandler,
scored_time: datetime,
- cutoff_days: int,
- window_days: float,
- softmax_beta: float,
) -> list[dict]:
- # apply custom moving average rewards
- miner_scores_df = miner_data_handler.get_miner_scores(
- scored_time=scored_time,
- cutoff_days=cutoff_days,
- )
+ prompts = [prompt_config.LOW_FREQUENCY, prompt_config.HIGH_FREQUENCY]
+
+ moving_averages_data: dict[str, list[dict]] = {}
+ for prompt in prompts:
+ miner_scores_df = miner_data_handler.get_miner_scores(
+ scored_time,
+ prompt.window_days,
+ prompt.time_length,
+ )
- df = prepare_df_for_moving_average(miner_scores_df)
+ df = prepare_df_for_moving_average(miner_scores_df)
- moving_averages_data = compute_smoothed_score(
- miner_data_handler=miner_data_handler,
- input_df=df,
- window_days=window_days,
- scored_time=scored_time,
- softmax_beta=softmax_beta,
- )
+ moving_averages = compute_smoothed_score(
+ miner_data_handler,
+ df,
+ scored_time,
+ prompt,
+ )
- if moving_averages_data is None:
- return []
+ if moving_averages is None:
+ continue
- print_rewards_df(moving_averages_data)
+ print_rewards_df(moving_averages, prompt.label)
- miner_data_handler.update_miner_rewards(moving_averages_data)
+ miner_data_handler.update_miner_rewards(moving_averages)
+ moving_averages_data[prompt.label] = moving_averages
- return moving_averages_data
+ return combine_moving_averages(moving_averages_data)
-def calculate_rewards_and_update_scores(
+def calculate_scores(
miner_data_handler: MinerDataHandler,
price_data_provider: PriceDataProvider,
scored_time: datetime,
- cutoff_days: int,
+ prompt: prompt_config.PromptConfig,
) -> bool:
# get latest prediction request from validator
- validator_requests = miner_data_handler.get_latest_prediction_requests(
- scored_time, cutoff_days
+ validator_requests = miner_data_handler.get_validator_requests_to_score(
+ scored_time, prompt.window_days, prompt.time_length
)
if validator_requests is None or len(validator_requests) == 0:
@@ -135,7 +138,6 @@ def calculate_rewards_and_update_scores(
fail_count = 0
for validator_request in validator_requests:
-
bt.logging.debug(f"validator_request_id: {validator_request.id}")
prompt_scores, detailed_info, real_prices = get_rewards(
@@ -152,18 +154,21 @@ def calculate_rewards_and_update_scores(
continue
miner_score_time = validator_request.start_time + timedelta(
- seconds=validator_request.time_length
+ seconds=int(validator_request.time_length)
)
miner_data_handler.set_miner_scores(
- real_prices, validator_request.id, detailed_info, miner_score_time
+ real_prices,
+ int(validator_request.id),
+ detailed_info,
+ miner_score_time,
)
# Success if at least one request succeed
return fail_count != len(validator_requests)
-async def query_available_miners_and_save_responses(
+def query_available_miners_and_save_responses(
base_neuron: BaseValidatorNeuron,
miner_data_handler: MinerDataHandler,
miner_uids: list,
@@ -191,25 +196,21 @@ async def query_available_miners_and_save_responses(
start_time = time.time()
- if base_neuron.config.neuron.use_multiprocess == 1:
- synapses = sync_forward_multiprocess(
- base_neuron.dendrite.keypair,
- base_neuron.dendrite.uuid,
- base_neuron.dendrite.external_ip,
- axons,
- synapse,
- timeout,
- base_neuron.config.neuron.nprocs,
- )
- else:
- synapses = await base_neuron.dendrite.forward(
- axons=axons,
- synapse=synapse,
- timeout=timeout,
- )
+ synapses = sync_forward_multiprocess(
+ base_neuron.dendrite.keypair,
+ base_neuron.dendrite.uuid,
+ base_neuron.dendrite.external_ip,
+ axons,
+ synapse,
+ timeout,
+ base_neuron.config.neuron.nprocs,
+ )
total_process_time = str(time.time() - start_time)
- bt.logging.debug(f"Forwarding took {total_process_time} seconds")
+ bt.logging.debug(
+ f"Forwarding took {total_process_time} seconds",
+ "sync_forward_multiprocess",
+ )
miner_predictions = {}
for i, synapse in enumerate(synapses):
diff --git a/synth/validator/miner_data_handler.py b/synth/validator/miner_data_handler.py
index 47bcfa0..84af0f6 100644
--- a/synth/validator/miner_data_handler.py
+++ b/synth/validator/miner_data_handler.py
@@ -41,7 +41,7 @@
WeightsUpdateHistory,
)
from synth.simulation_input import SimulationInput
-from synth.validator import response_validation_v2
+from synth.validator import prompt_config, response_validation_v2
class MinerDataHandler:
@@ -84,6 +84,29 @@ def get_miner_ids_map(self, connection: Connection):
return miner_Uid_map
+ def get_latest_asset(self, time_length: int) -> str | None:
+ try:
+ with self.engine.connect() as connection:
+ query = (
+ select(
+ ValidatorRequest.asset,
+ )
+ .where(ValidatorRequest.time_length == time_length)
+ .limit(1)
+ .order_by(ValidatorRequest.start_time.desc())
+ )
+
+ result = connection.execute(query).fetchall()
+ if len(result) == 0:
+ return None
+
+ # Return the asset with the least count
+ return str(result[0].asset)
+ except Exception as e:
+ bt.logging.error(f"in get_next_asset (got an exception): {e}")
+ traceback.print_exc(file=sys.stderr)
+ return None
+
@retry(
stop=stop_after_attempt(5),
wait=wait_random_exponential(multiplier=7),
@@ -317,17 +340,21 @@ def get_miner_prediction(
traceback.print_exc(file=sys.stderr)
return None
- def get_latest_prediction_requests(
+ def get_validator_requests_to_score(
self,
scored_time: datetime,
- cutoff_days: int,
+ window_days: int,
+ time_length: int | None = None,
) -> typing.Optional[list[ValidatorRequest]]:
"""
Retrieve the list of IDs of the latest validator requests that (start_time + time_length) < scored_time
- and (start_time + time_length) >= scored_time - cutoff_days.
- This is to ensure that we only get requests that are within the cutoff_days.
+ and (start_time + time_length) >= scored_time - window_days.
+ This is to ensure that we only get requests that are within the window_days.
and exclude records that are already scored
"""
+ if time_length is None:
+ time_length = prompt_config.LOW_FREQUENCY.time_length
+
try:
with self.engine.connect() as connection:
subq = (
@@ -367,13 +394,14 @@ def get_latest_prediction_requests(
and_(
# Compare start_time plus an interval (in seconds) to the scored_time.
window_start < scored_time,
- # Compare start_time plus an interval (in seconds) to the cutoff_days.
- # This is to ensure that we only get requests that are within the cutoff_days.
- # Because we want to include in the moving average only the requests that are within the cutoff_days.
+ # Compare start_time plus an interval (in seconds) to the window_days.
+ # This is to ensure that we only get requests that are within the window_days.
+ # Because we want to include in the moving average only the requests that are within the window_days.
window_start
- >= scored_time - timedelta(days=cutoff_days),
+ >= scored_time - timedelta(days=window_days),
# Exclude records that have a matching miner_prediction via the NOT EXISTS clause.
not_(exists(subq)),
+ ValidatorRequest.time_length == time_length,
)
)
.order_by(ValidatorRequest.start_time.asc())
@@ -446,8 +474,15 @@ def update_metagraph_history(self, metagraph_info: list):
)
traceback.print_exc(file=sys.stderr)
- def get_miner_scores(self, scored_time: datetime, cutoff_days: int):
- min_scored_time = scored_time - timedelta(days=cutoff_days)
+ def get_miner_scores(
+ self,
+ scored_time: datetime,
+ window_days: int,
+ time_length: int | None = None,
+ ):
+ min_scored_time = scored_time - timedelta(days=window_days)
+ if time_length is None:
+ time_length = prompt_config.LOW_FREQUENCY.time_length
try:
with self.engine.connect() as connection:
@@ -458,7 +493,6 @@ def get_miner_scores(self, scored_time: datetime, cutoff_days: int):
MinerScore.scored_time,
MinerScore.score_details_v3,
ValidatorRequest.asset,
- ValidatorRequest.start_time,
)
.select_from(MinerScore)
.join(
@@ -470,7 +504,12 @@ def get_miner_scores(self, scored_time: datetime, cutoff_days: int):
ValidatorRequest.id
== MinerPrediction.validator_requests_id,
)
- .where(MinerScore.scored_time > min_scored_time)
+ .where(
+ and_(
+ MinerScore.scored_time > min_scored_time,
+ ValidatorRequest.time_length == time_length,
+ )
+ )
)
result = connection.execute(query)
diff --git a/synth/validator/moving_average.py b/synth/validator/moving_average.py
index 772e0a3..bf0da9c 100644
--- a/synth/validator/moving_average.py
+++ b/synth/validator/moving_average.py
@@ -9,6 +9,7 @@
from synth.validator.miner_data_handler import MinerDataHandler
+from synth.validator.prompt_config import PromptConfig
from synth.validator.reward import compute_softmax
@@ -16,15 +17,6 @@ def prepare_df_for_moving_average(df):
df = df.copy()
df["scored_time"] = pd.to_datetime(df["scored_time"])
- # 0) Temporary exclude a period
- df["start_time"] = pd.to_datetime(df["start_time"])
- exclude_start = datetime.fromisoformat("2025-11-18 11:53:00+00:00")
- exclude_end = datetime.fromisoformat("2025-11-18 14:08:00+00:00")
- mask_exclude = (df["start_time"] >= exclude_start) & (
- df["start_time"] <= exclude_end
- )
- df = df.loc[~mask_exclude]
-
# 1) compute globals
global_min = df["scored_time"].min()
all_times = sorted(df["scored_time"].unique())
@@ -61,6 +53,7 @@ def prepare_df_for_moving_average(df):
)
# 4) left‐merge the real data onto that grid
+ full["scored_time"] = pd.to_datetime(full["scored_time"])
full = full.merge(df, on=["miner_id", "scored_time"], how="left").merge(
miner_first, on="miner_id", how="left"
)
@@ -131,9 +124,8 @@ def apply_per_asset_coefficients(
def compute_smoothed_score(
miner_data_handler: MinerDataHandler,
input_df: DataFrame,
- window_days: int,
scored_time: datetime,
- softmax_beta: float,
+ prompt_config: PromptConfig,
) -> typing.Optional[list[dict]]:
if input_df.empty:
return None
@@ -149,11 +141,7 @@ def compute_smoothed_score(
group_df["scored_time"] = pd.to_datetime(group_df["scored_time"])
group_df = group_df.sort_values("scored_time")
- # Only consider rows within the last 10 days from scored_time
- min_time = scored_time - pd.Timedelta(days=window_days)
- mask = (group_df["scored_time"] > min_time) & (
- group_df["scored_time"] <= scored_time
- )
+ mask = group_df["scored_time"] <= scored_time
window_df = group_df.loc[mask]
# Drop NaN prompt_score_v3
@@ -190,7 +178,7 @@ def compute_smoothed_score(
r["rolling_avg"] for r in filtered_moving_averages_data
]
reward_weight_list = compute_softmax(
- np.array(rolling_avg_list), softmax_beta
+ np.array(rolling_avg_list), prompt_config.softmax_beta
)
rewards = []
@@ -204,15 +192,35 @@ def compute_smoothed_score(
"miner_id": item["miner_id"],
"miner_uid": item["miner_uid"],
"smoothed_score": item["rolling_avg"],
- "reward_weight": float(reward_weight),
+ "reward_weight": float(reward_weight)
+ * prompt_config.smoothed_score_coefficient,
"updated_at": scored_time.isoformat(),
+ "prompt_name": prompt_config.label,
}
)
return rewards
-def print_rewards_df(moving_averages_data):
- bt.logging.info("Scored responses moving averages:")
+def print_rewards_df(moving_averages_data: list[dict], label: str = ""):
+ bt.logging.info(f"Scored responses moving averages {label}")
df = pd.DataFrame.from_dict(moving_averages_data)
bt.logging.info(df.to_string())
+
+
+def combine_moving_averages(
+ moving_averages_data: dict[str, list[dict]],
+) -> list[dict]:
+ map_miner_reward: dict[int, dict] = {}
+
+ for moving_averages in list(moving_averages_data.values()):
+ for reward in moving_averages:
+ miner_id = reward["miner_id"]
+ if miner_id in map_miner_reward:
+ map_miner_reward[miner_id]["reward_weight"] += reward[
+ "reward_weight"
+ ]
+ else:
+ map_miner_reward[miner_id] = reward
+
+ return list(map_miner_reward.values())
diff --git a/synth/validator/price_data_provider.py b/synth/validator/price_data_provider.py
index 59a7031..a505f71 100644
--- a/synth/validator/price_data_provider.py
+++ b/synth/validator/price_data_provider.py
@@ -37,7 +37,7 @@ class PriceDataProvider:
reraise=True,
before=before_log(bt.logging._logger, logging.DEBUG),
)
- def fetch_data(self, validator_request: ValidatorRequest) -> list[dict]:
+ def fetch_data(self, validator_request: ValidatorRequest) -> list:
"""
Fetch real prices data from an external REST service.
Returns an array of time points with prices.
@@ -51,7 +51,7 @@ def fetch_data(self, validator_request: ValidatorRequest) -> list[dict]:
end_time_int = start_time_int + validator_request.time_length
params = {
- "symbol": self._get_token_mapping(validator_request.asset),
+ "symbol": self._get_token_mapping(str(validator_request.asset)),
"resolution": 1,
"from": start_time_int,
"to": end_time_int,
@@ -65,8 +65,8 @@ def fetch_data(self, validator_request: ValidatorRequest) -> list[dict]:
transformed_data = self._transform_data(
data,
start_time_int,
- validator_request.time_increment,
- validator_request.time_length,
+ int(validator_request.time_increment),
+ int(validator_request.time_length),
)
return transformed_data
diff --git a/synth/validator/prompt_config.py b/synth/validator/prompt_config.py
new file mode 100644
index 0000000..8923ecf
--- /dev/null
+++ b/synth/validator/prompt_config.py
@@ -0,0 +1,67 @@
+from dataclasses import dataclass
+
+
+@dataclass
+class PromptConfig:
+ label: str
+ time_length: int
+ time_increment: int
+ initial_delay: int
+ total_cycle_minutes: int
+ timeout_extra_seconds: int
+ scoring_intervals: dict[str, int] # Define scoring intervals in seconds.
+ window_days: int
+ softmax_beta: float
+ smoothed_score_coefficient: float
+ num_simulations: int = 1000
+
+
+LOW_FREQUENCY = PromptConfig(
+ label="low",
+ time_length=86400,
+ time_increment=300,
+ initial_delay=60, # avoid 2 prompts to start simultaneously
+ total_cycle_minutes=60,
+ timeout_extra_seconds=60,
+ scoring_intervals={
+ "5min": 300, # 5 minutes
+ "30min": 1800, # 30 minutes
+ "3hour": 10800, # 3 hours
+ "24hour_abs": 86400, # 24 hours
+ },
+ window_days=10,
+ softmax_beta=-0.1,
+ smoothed_score_coefficient=0.5,
+)
+
+HIGH_FREQUENCY = PromptConfig(
+ label="high",
+ time_length=3600,
+ time_increment=60,
+ initial_delay=0,
+ total_cycle_minutes=12,
+ timeout_extra_seconds=60,
+ scoring_intervals={
+ "1min": 60,
+ "2min": 120,
+ "5min": 300,
+ "15min": 900,
+ "30min": 1800,
+ "60min_abs": 3600,
+ "0_5min_gaps": 300,
+ "0_10min_gaps": 600,
+ "0_15min_gaps": 900,
+ "0_20min_gaps": 1200,
+ "0_25min_gaps": 1500,
+ "0_30min_gaps": 1800,
+ "0_35min_gaps": 2100,
+ "0_40min_gaps": 2400,
+ "0_45min_gaps": 2700,
+ "0_50min_gaps": 3000,
+ "0_55min_gaps": 3300,
+ "0_60min_gaps": 3600,
+ },
+ window_days=1,
+ softmax_beta=-0.05,
+ smoothed_score_coefficient=0.5,
+)
diff --git a/synth/validator/response_validation_v2.py b/synth/validator/response_validation_v2.py
index bce1cf7..07f0c90 100644
--- a/synth/validator/response_validation_v2.py
+++ b/synth/validator/response_validation_v2.py
@@ -45,6 +45,8 @@ def validate_response_type(response) -> typing.Optional[str]:
if not isinstance(response[1], int):
return f"Time increment format is incorrect: expected int, got {type(response[1])}"
+ return None
+
def validate_responses(
response,
diff --git a/synth/validator/reward.py b/synth/validator/reward.py
index a5b2fee..b69c031 100644
--- a/synth/validator/reward.py
+++ b/synth/validator/reward.py
@@ -31,13 +31,13 @@
from synth.validator.miner_data_handler import MinerDataHandler
from synth.validator.price_data_provider import PriceDataProvider
from synth.validator import response_validation_v2
+from synth.validator import prompt_config
def reward(
miner_data_handler: MinerDataHandler,
miner_uid: int,
- time_increment: int,
- validator_request_id: int,
+ validator_request: ValidatorRequest,
real_prices: list[float],
):
"""
@@ -47,9 +47,8 @@ def reward(
Returns:
- float: The reward value for the miner.
"""
-
miner_prediction = miner_data_handler.get_miner_prediction(
- miner_uid, validator_request_id
+ miner_uid, int(validator_request.id)
)
if miner_prediction is None:
@@ -62,14 +61,22 @@ def reward(
if len(real_prices) == 0:
return -1, [], miner_prediction
- predictions_path = adjust_predictions(miner_prediction.prediction)
+ predictions_path = adjust_predictions(list(miner_prediction.prediction))
simulation_runs = np.array(predictions_path).astype(float)
+ scoring_intervals = (
+ prompt_config.HIGH_FREQUENCY.scoring_intervals
+ if validator_request.time_length
+ == prompt_config.HIGH_FREQUENCY.time_length
+ else prompt_config.LOW_FREQUENCY.scoring_intervals
+ )
+
try:
score, detailed_crps_data = calculate_crps_for_miner(
simulation_runs,
np.array(real_prices),
- time_increment,
+ int(validator_request.time_increment),
+ scoring_intervals,
)
except Exception as e:
bt.logging.error(
@@ -105,7 +112,7 @@ def get_rewards(
"""
miner_uids = miner_data_handler.get_miner_uid_of_prediction_request(
- validator_request.id
+ int(validator_request.id)
)
if miner_uids is None:
@@ -127,8 +134,7 @@ def get_rewards(
score, detailed_crps_data, miner_prediction = reward(
miner_data_handler,
miner_uid,
- validator_request.time_increment,
- validator_request.id,
+ validator_request,
real_prices,
)
scores.append(score)
@@ -192,10 +198,8 @@ def compute_softmax(score_values: np.ndarray, beta: float) -> np.ndarray:
bt.logging.info(f"Going to use the following value of beta: {beta}")
exp_scores = np.exp(beta * score_values)
- softmax_scores_valid = exp_scores / np.sum(exp_scores)
- softmax_scores = softmax_scores_valid
-
- return softmax_scores
+ softmax_scores_valid: np.ndarray = exp_scores / np.sum(exp_scores)
+ return softmax_scores_valid
def clean_numpy_in_crps_data(crps_data: list) -> list:
diff --git a/tests/test_calculate_crps.py b/tests/test_calculate_crps.py
index d05fb06..de6f819 100644
--- a/tests/test_calculate_crps.py
+++ b/tests/test_calculate_crps.py
@@ -2,6 +2,7 @@
import numpy as np
+from synth.validator import prompt_config
from synth.validator.crps_calculation import (
calculate_crps_for_miner,
label_observed_blocks,
@@ -19,6 +20,7 @@ def test_calculate_crps_for_miner_1(self):
np.array(predictions_path),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
self.assertEqual(sum_all_scores, 284.1200564488584)
@@ -31,6 +33,7 @@ def test_calculate_crps_for_miner_1_b(self):
np.array(predictions_path),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
self.assertEqual(sum_all_scores, 284.1200564488584)
@@ -43,6 +46,7 @@ def test_calculate_crps_for_miner_zero(self):
np.array(predictions_path),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
self.assertEqual(sum_all_scores, 0)
@@ -55,6 +59,7 @@ def test_calculate_crps_for_miner_2(self):
np.array([predictions_path]),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
self.assertEqual(sum_all_scores, 479.6904902048716)
@@ -68,6 +73,7 @@ def test_calculate_crps_for_miner_3(self):
np.array([predictions_path]),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
self.assertEqual(sum_all_scores, 4737.272133130346)
@@ -119,6 +125,7 @@ def test_calculate_crps_for_miner_4(self):
np.array(predictions_path),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
self.assertEqual(sum_all_scores, 13413.599141058676)
@@ -135,6 +142,7 @@ def test_calculate_crps_for_miner_5(self):
np.array([predictions_path]),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
self.assertEqual(sum_all_scores, 0.0)
@@ -151,6 +159,7 @@ def test_calculate_crps_for_miner_6(self):
np.array([predictions_path]),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
self.assertEqual(sum_all_scores, 0.0)
@@ -167,6 +176,7 @@ def test_calculate_crps_for_miner_7(self):
np.array([predictions_path]),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
self.assertEqual(sum_all_scores, 0.0)
@@ -193,6 +203,7 @@ def test_calculate_crps_for_miner_8(self):
np.array([predictions_path]),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
self.assertEqual(sum_all_scores, 0.0)
@@ -211,12 +222,14 @@ def test_calculate_crps_for_miner_9(self):
np.array([predictions_path]),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
sum_all_scores_2, _ = calculate_crps_for_miner(
np.array([predictions_path]),
np.array(real_price_path_full),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
with self.subTest("Check sum_all_scores equals expected"):
@@ -236,6 +249,7 @@ def test_calculate_crps_for_miner_10(self):
np.array([predictions_path]),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
self.assertEqual(np.isnan(sum_all_scores), True)
@@ -249,6 +263,7 @@ def test_calculate_crps_for_miner_11(self):
np.array([predictions_path]),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
self.assertEqual(sum_all_scores, 1061.3650577065207)
@@ -272,6 +287,7 @@ def test_calculate_crps_for_miner_12(self):
np.array([predictions_path]).astype(float),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
)
self.assertEqual(sum_all_scores, 12697.728694070156)
@@ -295,6 +311,31 @@ def test_calculate_crps_for_miner_13(self):
np.array([predictions_path]).astype(float),
np.array(real_price_path),
time_increment,
+ prompt_config.LOW_FREQUENCY.scoring_intervals,
+ )
+
+ self.assertEqual(sum_all_scores, -1)
+
+ def test_calculate_crps_for_miner_gap_1(self):
+ time_increment = 300 # 300 seconds = 5 minutes
+ real_price_path = [50, 60, 65, 80, 90, 94, 101, 120, 130]
+ predictions_path = [
+ 0.00011997788254371478,
+ 0,
+ 70,
+ 82.5,
+ 89.2,
+ 100,
+ 110,
+ 123,
+ 131,
+ ]
+
+ sum_all_scores, _ = calculate_crps_for_miner(
+ np.array([predictions_path]).astype(float),
+ np.array(real_price_path),
+ time_increment,
+ prompt_config.HIGH_FREQUENCY.scoring_intervals,
)
self.assertEqual(sum_all_scores, -1)
diff --git a/tests/test_forward.py b/tests/test_forward.py
index babd711..fd45ed4 100644
--- a/tests/test_forward.py
+++ b/tests/test_forward.py
@@ -12,11 +12,12 @@
from synth.validator import response_validation_v2
from synth.validator.forward import (
calculate_moving_average_and_update_rewards,
- calculate_rewards_and_update_scores,
+ calculate_scores,
)
from synth.db.models import Miner, MinerReward
from synth.validator.miner_data_handler import MinerDataHandler
from synth.validator.price_data_provider import PriceDataProvider
+from synth.validator import prompt_config
from tests.utils import prepare_random_predictions
@@ -28,19 +29,16 @@ def test_calculate_rewards_and_update_scores(db_engine: Engine):
price_data_provider = PriceDataProvider()
- success = calculate_rewards_and_update_scores(
+ success = calculate_scores(
miner_data_handler=handler,
price_data_provider=price_data_provider,
scored_time=scored_time,
- cutoff_days=7,
+ prompt=prompt_config.LOW_FREQUENCY,
)
assert success
- miner_scores_df = handler.get_miner_scores(
- scored_time=scored_time,
- cutoff_days=2,
- )
+ miner_scores_df = handler.get_miner_scores(scored_time, 10)
assert len(miner_scores_df) == len(miner_uids)
@@ -55,11 +53,11 @@ def test_calculate_moving_average_and_update_rewards(db_engine: Engine):
price_data_provider = PriceDataProvider()
- success = calculate_rewards_and_update_scores(
+ success = calculate_scores(
miner_data_handler=handler,
price_data_provider=price_data_provider,
scored_time=scored_time,
- cutoff_days=7,
+ prompt=prompt_config.LOW_FREQUENCY,
)
assert success
@@ -67,9 +65,6 @@ def test_calculate_moving_average_and_update_rewards(db_engine: Engine):
moving_averages_data = calculate_moving_average_and_update_rewards(
miner_data_handler=handler,
scored_time=scored_time,
- cutoff_days=4,
- window_days=2,
- softmax_beta=-0.003,
)
print("moving_averages_data", moving_averages_data)
@@ -150,17 +145,14 @@ def test_calculate_moving_average_and_update_rewards_new_miner(
# scored time is start time + 24 hours and +4 minutes because new prompt every 64 minutes
scored_time = start_time + timedelta(days=1, minutes=4)
- success = calculate_rewards_and_update_scores(
+ success = calculate_scores(
miner_data_handler=handler,
price_data_provider=price_data_provider,
scored_time=scored_time,
- cutoff_days=7,
+ prompt=prompt_config.LOW_FREQUENCY,
)
- miner_scores_df = handler.get_miner_scores(
- scored_time=scored_time,
- cutoff_days=4,
- )
+ miner_scores_df = handler.get_miner_scores(scored_time, 10)
print("miner_scores_df", miner_scores_df)
@@ -169,9 +161,6 @@ def test_calculate_moving_average_and_update_rewards_new_miner(
moving_averages_data = calculate_moving_average_and_update_rewards(
miner_data_handler=handler,
scored_time=scored_time,
- cutoff_days=4,
- window_days=2,
- softmax_beta=-0.003,
)
print("moving_averages_data", moving_averages_data)
@@ -277,17 +266,14 @@ def test_calculate_moving_average_and_update_rewards_new_miner_registration(
# scored time is start time + 24 hours and +4 minutes because new prompt every 64 minutes
scored_time = start_time + timedelta(days=1, minutes=4)
- success = calculate_rewards_and_update_scores(
+ success = calculate_scores(
miner_data_handler=handler,
price_data_provider=price_data_provider,
scored_time=scored_time,
- cutoff_days=7,
+ prompt=prompt_config.LOW_FREQUENCY,
)
- miner_scores_df = handler.get_miner_scores(
- scored_time=scored_time,
- cutoff_days=4,
- )
+ miner_scores_df = handler.get_miner_scores(scored_time, 10)
print("miner_scores_df: ", miner_scores_df)
@@ -296,9 +282,6 @@ def test_calculate_moving_average_and_update_rewards_new_miner_registration(
moving_averages_data = calculate_moving_average_and_update_rewards(
miner_data_handler=handler,
scored_time=scored_time,
- cutoff_days=4,
- window_days=2,
- softmax_beta=-0.003,
)
print("moving_averages_data", moving_averages_data)
@@ -317,7 +300,7 @@ def test_calculate_moving_average_and_update_rewards_new_miner_registration(
miner_weights = [
item["reward_weight"] for item in moving_averages_data
]
- assert_almost_equal(sum(miner_weights), 1, decimal=12)
+ assert_almost_equal(sum(miner_weights), 0.5, decimal=12)
def test_calculate_moving_average_and_update_rewards_only_invalid(
@@ -397,17 +380,14 @@ def test_calculate_moving_average_and_update_rewards_only_invalid(
# scored time is start time + 24 hours and +4 minutes because new prompt every 64 minutes
scored_time = start_time + timedelta(days=1, minutes=4)
- success = calculate_rewards_and_update_scores(
+ success = calculate_scores(
miner_data_handler=handler,
price_data_provider=price_data_provider,
scored_time=scored_time,
- cutoff_days=7,
+ prompt=prompt_config.LOW_FREQUENCY,
)
- miner_scores_df = handler.get_miner_scores(
- scored_time=scored_time,
- cutoff_days=4,
- )
+ miner_scores_df = handler.get_miner_scores(scored_time, 10)
print("miner_scores_df", miner_scores_df)
@@ -416,9 +396,6 @@ def test_calculate_moving_average_and_update_rewards_only_invalid(
moving_averages_data = calculate_moving_average_and_update_rewards(
miner_data_handler=handler,
scored_time=scored_time,
- cutoff_days=4,
- window_days=2,
- softmax_beta=-0.003,
)
print("moving_averages_data", moving_averages_data)
diff --git a/tests/test_helpers.py b/tests/test_helpers.py
index 3b97b21..d6dc823 100644
--- a/tests/test_helpers.py
+++ b/tests/test_helpers.py
@@ -1,5 +1,6 @@
import unittest
-from datetime import datetime, timedelta, timezone
+from datetime import datetime
+
from synth.utils.helpers import (
convert_prices_to_time_format,
@@ -8,7 +9,6 @@
from_iso_to_unix_time,
get_current_time,
round_to_8_significant_digits,
- timeout_until,
)
@@ -75,7 +75,7 @@ def test_get_intersecting_arrays(self):
)
def test_round_time_to_minutes(self):
- time_increment = 60
+ time_increment = 0
self.assertEqual(
round_time_to_minutes(
@@ -92,50 +92,41 @@ def test_round_time_to_minutes(self):
"2024-11-25T19:04:00",
)
- def test_round_time_to_five_minutes(self):
- time_increment = 300
-
+ def test_round_time_add_extra(self):
+ # add three extra minutes
dt_str_1 = "2024-11-25T19:01:59.940515"
- dt_str_2 = "2024-11-25T19:03:59.940515"
-
result_1 = round_time_to_minutes(
- datetime.fromisoformat(dt_str_1), time_increment
+ datetime.fromisoformat(dt_str_1), 60 * 3
)
- result_2 = round_time_to_minutes(
- datetime.fromisoformat(dt_str_2), time_increment
- )
-
self.assertEqual(result_1.isoformat(), "2024-11-25T19:05:00")
+
+ # add one extra minute
+ dt_str_2 = "2024-11-25T19:03:59.940515"
+ result_2 = round_time_to_minutes(datetime.fromisoformat(dt_str_2), 60)
self.assertEqual(result_2.isoformat(), "2024-11-25T19:05:00")
- def test_round_time_to_minutes_plus_two_extra(self):
- dt_str_1 = "2024-11-25T19:01:59.940515"
- result_1 = round_time_to_minutes(
- datetime.fromisoformat(dt_str_1), 60, 120
- )
- self.assertEqual(result_1.isoformat(), "2024-11-25T19:04:00")
+ def test_round_time_add_extra_seconds(self):
+ dt_str_1 = "2024-11-25T19:11:46.940515"
+ result_1 = round_time_to_minutes(datetime.fromisoformat(dt_str_1), 10)
+ self.assertEqual(result_1.isoformat(), "2024-11-25T19:12:10")
dt_str_2 = "2024-11-25T19:03:09.659353"
- result_2 = round_time_to_minutes(
- datetime.fromisoformat(dt_str_2), 60, 120
- )
+ result_2 = round_time_to_minutes(datetime.fromisoformat(dt_str_2), 120)
self.assertEqual(result_2.isoformat(), "2024-11-25T19:06:00")
def test_round_time_to_two_minutes(self):
- time_increment = 120
extra_seconds = 60
dt_str_1 = "2024-11-25T19:01:59.940515"
- dt_str_2 = "2024-11-25T19:03:59.940515"
-
result_1 = round_time_to_minutes(
- datetime.fromisoformat(dt_str_1), time_increment, extra_seconds
+ datetime.fromisoformat(dt_str_1), extra_seconds
)
+ self.assertEqual(result_1.isoformat(), "2024-11-25T19:03:00")
+
+ dt_str_2 = "2024-11-25T19:03:59.940515"
result_2 = round_time_to_minutes(
- datetime.fromisoformat(dt_str_2), time_increment, extra_seconds
+ datetime.fromisoformat(dt_str_2), extra_seconds
)
-
- self.assertEqual(result_1.isoformat(), "2024-11-25T19:03:00")
self.assertEqual(result_2.isoformat(), "2024-11-25T19:05:00")
def test_from_iso_to_unix_time(self):
@@ -148,25 +139,3 @@ def test_from_iso_to_unix_time(self):
self.assertEqual(
from_iso_to_unix_time("2025-08-05T14:56:00+00:00"), 1754405760
)
-
- def test_timeout_until(self):
- # Arrange: Set a future time 10 seconds from now
- future_time = datetime.now(timezone.utc) + timedelta(seconds=10)
-
- # Act: Call the timeout_until function
- timeout = timeout_until(future_time)
-
- # Assert: The timeout should be approximately 10 seconds
- assert (
- 9 <= timeout <= 10
- ), f"Expected timeout to be around 10 seconds, got {timeout}"
-
- def test_timeout_until_past_time(self):
- # Arrange: Set a past time 10 seconds ago
- past_time = datetime.now(timezone.utc) - timedelta(seconds=10)
-
- # Act: Call the timeout_until function
- timeout = timeout_until(past_time)
-
- # Assert: The timeout should be negative
- assert timeout == 0, f"Expected timeout to be 0, got {timeout}"
diff --git a/tests/test_miner_data_handler.py b/tests/test_miner_data_handler.py
index 4f33a6a..eccf836 100644
--- a/tests/test_miner_data_handler.py
+++ b/tests/test_miner_data_handler.py
@@ -56,13 +56,19 @@ def test_get_values_within_range(db_engine: Engine):
handler = MinerDataHandler(db_engine)
handler.save_responses(simulation_data, simulation_input, datetime.now())
- validator_requests = handler.get_latest_prediction_requests(scored_time, 7)
+ validator_requests = handler.get_validator_requests_to_score(
+ scored_time, 7
+ )
+ assert validator_requests is not None
assert len(validator_requests) == 1
- result = handler.get_miner_prediction(miner_uid, validator_requests[0].id)
+ result = handler.get_miner_prediction(
+ miner_uid, int(validator_requests[0].id)
+ )
# get only second element from the result tuple
# that corresponds to the prediction result
+ assert result is not None
prediction = result.prediction
assert len(prediction) == 1
@@ -110,8 +116,11 @@ def test_get_values_ongoing_range(db_engine: Engine):
handler = MinerDataHandler(db_engine)
handler.save_responses(simulation_data, simulation_input, datetime.now())
- validator_requests = handler.get_latest_prediction_requests(scored_time, 7)
+ validator_requests = handler.get_validator_requests_to_score(
+ scored_time, 7
+ )
+ assert validator_requests is not None
assert len(validator_requests) == 0
@@ -176,10 +185,15 @@ def test_multiple_records_for_same_miner(db_engine: Engine):
simulation_data_2, simulation_input_2, datetime.now()
)
- validator_requests = handler.get_latest_prediction_requests(scored_time, 7)
+ validator_requests = handler.get_validator_requests_to_score(
+ scored_time, 7
+ )
+ assert validator_requests is not None
assert len(validator_requests) == 2
- result = handler.get_miner_prediction(miner_uid, validator_requests[1].id)
+ result = handler.get_miner_prediction(
+ miner_uid, int(validator_requests[1].id)
+ )
assert result is not None
@@ -258,13 +272,19 @@ def test_multiple_records_for_same_miner_with_overlapping(db_engine: Engine):
simulation_data_2, simulation_input_2, datetime.now()
)
- validator_requests = handler.get_latest_prediction_requests(scored_time, 7)
+ validator_requests = handler.get_validator_requests_to_score(
+ scored_time, 7
+ )
+ assert validator_requests is not None
assert len(validator_requests) == 1
- result = handler.get_miner_prediction(miner_uid, validator_requests[0].id)
+ result = handler.get_miner_prediction(
+ miner_uid, int(validator_requests[0].id)
+ )
# get only second element from the result tuple
# that corresponds to the prediction result
+ assert result is not None
prediction = result.prediction
assert len(prediction) == 1
@@ -285,7 +305,10 @@ def test_no_data_for_miner(db_engine: Engine):
handler = MinerDataHandler(db_engine)
- validator_requests = handler.get_latest_prediction_requests(scored_time, 7)
+ validator_requests = handler.get_validator_requests_to_score(
+ scored_time, 7
+ )
+ assert validator_requests is not None
assert len(validator_requests) == 0
@@ -322,10 +345,16 @@ def test_get_values_incorrect_format(db_engine: Engine):
handler = MinerDataHandler(db_engine)
handler.save_responses(simulation_data, simulation_input, datetime.now())
- validator_requests = handler.get_latest_prediction_requests(scored_time, 7)
+ validator_requests = handler.get_validator_requests_to_score(
+ scored_time, 7
+ )
+ assert validator_requests is not None
assert len(validator_requests) == 1
- result = handler.get_miner_prediction(miner_uid, validator_requests[0].id)
+ result = handler.get_miner_prediction(
+ miner_uid, int(validator_requests[0].id)
+ )
+ assert result is not None
prediction = result.prediction
format_validation = result.format_validation
@@ -340,8 +369,10 @@ def test_set_get_scores(db_engine: Engine):
scored_time = datetime.fromisoformat("2024-11-27T00:00:00+00:00")
handler, _, _ = prepare_random_predictions(db_engine, start_time)
- validator_requests = handler.get_latest_prediction_requests(scored_time, 7)
-
+ validator_requests = handler.get_validator_requests_to_score(
+ scored_time, 7
+ )
+ assert validator_requests is not None
assert len(validator_requests) == 1
prompt_scores, detailed_info, real_prices = get_rewards(
@@ -353,12 +384,12 @@ def test_set_get_scores(db_engine: Engine):
assert prompt_scores is not None
handler.set_miner_scores(
- real_prices, validator_requests[0].id, detailed_info, scored_time
+ real_prices, int(validator_requests[0].id), detailed_info, scored_time
)
miner_scores_df = handler.get_miner_scores(
scored_time=scored_time,
- cutoff_days=4,
+ window_days=4,
)
print("miner_scores_df", miner_scores_df)
diff --git a/tests/test_moving_average.py b/tests/test_moving_average.py
index 8ca34f2..6ee1dee 100644
--- a/tests/test_moving_average.py
+++ b/tests/test_moving_average.py
@@ -6,6 +6,7 @@
from synth.validator.miner_data_handler import MinerDataHandler
from synth.validator.moving_average import compute_smoothed_score
+from synth.validator.prompt_config import LOW_FREQUENCY
def read_csv(file_name):
@@ -18,7 +19,6 @@ def test_moving_average_1(db_engine: Engine):
handler = MinerDataHandler(db_engine)
scored_time = datetime.fromisoformat("2025-02-21T17:23:00+00:00")
- window_days = 2
df = read_csv("cutoff_data_4_days.csv")
df["scored_time"] = pd.to_datetime(df["scored_time"])
@@ -26,9 +26,8 @@ def test_moving_average_1(db_engine: Engine):
moving_averages_data = compute_smoothed_score(
handler,
input_df=df,
- window_days=window_days,
scored_time=scored_time,
- softmax_beta=-0.003,
+ prompt_config=LOW_FREQUENCY,
)
# The miner id you want to search for
@@ -54,7 +53,6 @@ def test_moving_average_2(db_engine: Engine):
handler = MinerDataHandler(db_engine)
scored_time = datetime.fromisoformat("2025-02-21T17:23:00+00:00")
- window_days = 1
df = read_csv("cutoff_data_2_days.csv")
df["scored_time"] = pd.to_datetime(df["scored_time"])
@@ -62,9 +60,8 @@ def test_moving_average_2(db_engine: Engine):
moving_averages_data = compute_smoothed_score(
handler,
input_df=df,
- window_days=window_days,
scored_time=scored_time,
- softmax_beta=-0.003,
+ prompt_config=LOW_FREQUENCY,
)
# The miner id you want to search for
diff --git a/tests/test_response_validation.py b/tests/test_response_validation.py
index 2833611..cd4bc42 100644
--- a/tests/test_response_validation.py
+++ b/tests/test_response_validation.py
@@ -103,12 +103,12 @@ def test_validate_responses_incorrect_number_of_paths():
)
assert result == "Number of paths is incorrect: expected 2, got 0"
- response = (int(start_time.timestamp()), time_increment, [123.45])
+ response2 = (int(start_time.timestamp()), time_increment, [123.45])
request_time = start_time
process_time_str = "0"
result = validate_responses(
- response, simulation_input, request_time, process_time_str
+ response2, simulation_input, request_time, process_time_str
)
assert result == "Number of paths is incorrect: expected 2, got 1"
@@ -174,19 +174,19 @@ def test_validate_responses_incorrect_start_time():
== "Start time format is incorrect: expected int, got "
)
- response = (start_time.timestamp(), time_increment, [123.45] * 11)
+ response2 = (start_time.timestamp(), time_increment, [123.45] * 11)
request_time = start_time
process_time_str = "0"
result = validate_responses(
- response, simulation_input, request_time, process_time_str
+ response2, simulation_input, request_time, process_time_str
)
assert (
result
== "Start time format is incorrect: expected int, got "
)
- response = (
+ response3 = (
int(start_time.timestamp()) + 1,
time_increment,
[123.45] * 11,
@@ -195,7 +195,7 @@ def test_validate_responses_incorrect_start_time():
process_time_str = "0"
result = validate_responses(
- response, simulation_input, request_time, process_time_str
+ response3, simulation_input, request_time, process_time_str
)
assert (
result
@@ -222,7 +222,7 @@ def test_validate_responses_incorrect_time_increment():
== "Time increment format is incorrect: expected int, got "
)
- response = (
+ response2 = (
int(start_time.timestamp()),
time_increment + 1,
[123.45] * 11,
@@ -231,7 +231,7 @@ def test_validate_responses_incorrect_time_increment():
process_time_str = "0"
result = validate_responses(
- response, simulation_input, request_time, process_time_str
+ response2, simulation_input, request_time, process_time_str
)
assert result == "Time increment is incorrect: expected 1, got 2"
diff --git a/tests/test_rewards.py b/tests/test_rewards.py
index 3e06f88..c33d10c 100644
--- a/tests/test_rewards.py
+++ b/tests/test_rewards.py
@@ -78,7 +78,9 @@ def test_get_rewards(db_engine):
price_data_provider = PriceDataProvider()
- validator_requests = handler.get_latest_prediction_requests(scored_time, 7)
+ validator_requests = handler.get_validator_requests_to_score(
+ scored_time, 7
+ )
prompt_scores, detailed_info, real_prices = get_rewards(
handler,
diff --git a/tests/test_simulations.py b/tests/test_simulations.py
index b106213..707f830 100644
--- a/tests/test_simulations.py
+++ b/tests/test_simulations.py
@@ -33,7 +33,7 @@ def test_run():
)
current_time = get_current_time()
- start_time = round_time_to_minutes(current_time, 60, 120)
+ start_time = round_time_to_minutes(current_time, 120)
simulation_input.start_time = start_time.isoformat()
print("start_time", simulation_input.start_time)
diff --git a/validator.config.js b/validator.config.js
index 50b5384..31e20a6 100644
--- a/validator.config.js
+++ b/validator.config.js
@@ -4,7 +4,7 @@ module.exports = {
name: "validator",
interpreter: "python3",
script: "./neurons/validator.py",
- args: "--netuid 50 --logging.debug --wallet.name validator --wallet.hotkey default --neuron.axon_off true --neuron.vpermit_tao_limit 999999 --ewma.window_days 10 --ewma.cutoff_days 10 --softmax.beta -0.1",
+ args: "--netuid 50 --logging.debug --wallet.name validator --wallet.hotkey default --neuron.axon_off true --neuron.vpermit_tao_limit 999999 --ewma.window_days 10 --softmax.beta -0.05",
env: {
PYTHONPATH: ".",
},
diff --git a/validator.test.config.js b/validator.test.config.js
index 13e0105..a5dbe06 100644
--- a/validator.test.config.js
+++ b/validator.test.config.js
@@ -4,7 +4,7 @@ module.exports = {
name: "validator",
interpreter: "python3",
script: "./neurons/validator.py",
- args: "--netuid 247 --logging.debug --logging.trace --subtensor.network test --wallet.name validator --wallet.hotkey default --neuron.axon_off true --ewma.window_days 10 --ewma.cutoff_days 10 --softmax.beta -0.1",
+ args: "--netuid 247 --logging.debug --logging.trace --subtensor.network test --wallet.name validator --wallet.hotkey default --neuron.axon_off true --ewma.window_days 10 --softmax.beta -0.05",
env: {
PYTHONPATH: ".",
},