diff --git a/README.md b/README.md index 984de96..6cf6aaa 100644 --- a/README.md +++ b/README.md @@ -262,7 +262,7 @@ $ nohup python neurons/validator.py --netuid 8 --wallet.name --wallet.h # Running a Miner -If you're running a miner, you should see three types of requests: LiveForwardHash, LiveForward, and LiveBackward. +If you're running a miner, you should see two types of requests: LiveForwardHash and LiveForward. LiveForwardHash is requested first, which will be a hash of your predictions. LiveForward will be requested 60 seconds later which will request the actual predictions made (non-hashed). Using the hash and the actual predictions, validators can validate the @@ -270,7 +270,6 @@ authenticity of the predictions made, ensuring no participants are copying anoth - **LiveForwardHash** - will be when you provide a hash of your predictions. - **LiveForward** - will be when your miner provides your actual predictions. -- **LiveBackward** - will receive the results that occurred live if you want to use them for any updating purposes. You'll receive rewards for your predictions `~10 hours` after making them. Therefore, if you start running on the network, you should expect a lag in receiving rewards. Predictions are reviewed and rewarded every 30 minutes. diff --git a/neurons/miner.py b/neurons/miner.py index e98cdbd..a2eab55 100644 --- a/neurons/miner.py +++ b/neurons/miner.py @@ -96,13 +96,13 @@ def get_config(): # TODO: Move this into a stream mining solution class, so each stream can be predicted using different sources, models, etc... def get_predictions( - tims_ms: int, + time_ms: int, feature_source: FeatureSource, feature_scaler: FeatureScaler, model: BaseMiningModel, ): # TODO: interval should come from the stream definition - lookback_time_ms = tims_ms - (model.sample_count * INTERVAL_MS) + lookback_time_ms = time_ms - (model.sample_count * INTERVAL_MS) feature_samples = feature_source.get_feature_samples( lookback_time_ms, INTERVAL_MS, model.sample_count @@ -499,21 +499,6 @@ def live_f(synapse: template.protocol.LiveForward) -> template.protocol.LiveForw except Exception as e: bt.logging.error(f"error returning synapse to vali: {e}") - # def lb_blacklist_fn(synapse: template.protocol.LiveBackward) -> Tuple[bool, str]: - # # standardizing not accepting lb for now. Miner can override if they'd like. - # return False, synapse.dendrite.hotkey - # - # def lb_priority_fn(synapse: template.protocol.LiveBackward) -> float: - # caller_uid = metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. - # prirority = float( metagraph.S[ caller_uid ] ) # Return the stake as the priority. - # bt.logging.trace(f'Prioritizing {synapse.dendrite.hotkey} with value: ', prirority) - # return prirority - - # def live_b(synapse: template.protocol.LiveBackward) -> template.protocol.LiveBackward: - # bt.logging.debug(f'received lb with length {len(synapse.samples.numpy())}') - # synapse.received = True - # return synapse - # Build and link miner functions to the axon. # The axon handles request processing, allowing validators to send this process requests. bt.logging.info(f"setting port [{config.axon.port}]") @@ -624,4 +609,4 @@ def live_f(synapse: template.protocol.LiveForward) -> template.protocol.LiveForw # This is the main function, which runs the miner. if __name__ == "__main__": - main(get_config()) \ No newline at end of file + main(get_config()) diff --git a/neurons/validator.py b/neurons/validator.py index 22df2b7..f6aa242 100644 --- a/neurons/validator.py +++ b/neurons/validator.py @@ -18,7 +18,6 @@ ) from template.protocol import ( LiveForward, - LiveBackward, LiveForwardHash, ) import time @@ -190,7 +189,9 @@ def run_time_series_validation( # check for invalid miner pred for miner_pred in predictions: if math.isnan(miner_pred): - raise ValueError(f"invalid miner preds [{miner_pred}]") + raise ValueError( + f"invalid miner preds [{miner_pred}]" + ) hashed_predictions = HashingUtils.hash_predictions( miner_hotkey, str(predictions.tolist()) @@ -287,30 +288,7 @@ def run_time_series_validation( # TODO: Improve validators to allow multiple features in predictions validation_array = validation_array.flatten() - bt.logging.info( - "results gathered sending back to miners via backprop and weighing" - ) - - # Send back the results for backprop so miners can learn - results = bt.tensor(validation_array) - - results_backprop_proto = LiveBackward( - request_uuid=request_uuid, - stream_id=stream_type, - samples=results, - topic_id=request_df.topic_id, - ) - - try: - dendrite.query( - metagraph.axons, results_backprop_proto, deserialize=True - ) - bt.logging.info("live results sent back to miners") - except Exception: # noqa - traceback.print_exc() - bt.logging.info( - "failed sending back results to miners and continuing..." - ) + bt.logging.info("results gathered sending back to miners via weighing") scores = {} for miner_uid, miner_preds in vali_request.predictions.items():