Skip to content

Commit

Permalink
build
Browse files Browse the repository at this point in the history
  • Loading branch information
ashioyajotham committed Jan 26, 2025
1 parent a0ceddb commit 7dc3c2d
Show file tree
Hide file tree
Showing 8 changed files with 328 additions and 48 deletions.
1 change: 1 addition & 0 deletions config/model.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ llm:
fingpt:
base_model: "tiiuae/falcon-7b"
peft_model: "FinGPT/fingpt-mt_falcon-7b_lora"
from_remote: true
cache:
base_dir: "%LOCALAPPDATA%/fingpt_trader"
model_dir: "models"
Expand Down
39 changes: 38 additions & 1 deletion config/trading.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -70,4 +70,41 @@ robo:
min_score: 0.7
environmental_weight: 0.4
social_weight: 0.3
governance_weight: 0.3
governance_weight: 0.3

# Exchange settings
exchanges:
- name: "binance"
api_key: "${BINANCE_API_KEY}"
api_secret: "${BINANCE_API_SECRET}"

# Risk management settings
risk:
thresholds:
volatility: 3.0 # Circuit breaker volatility threshold
volume: 5.0 # Volume spike threshold
spread: 0.05 # Maximum bid-ask spread
imbalance: 0.7 # Order book imbalance threshold

limits:
max_drawdown: 0.15
var_limit: 0.02
max_leverage: 2.0
position_limit: 0.1

regime_controls:
NORMAL:
position_scale: 1.0
HIGH_VOL:
position_scale: 0.5
STRESS:
position_scale: 0.25
CRISIS:
position_scale: 0.0
LOW_LIQUIDITY:
position_scale: 0.3

# Logging configuration
logging:
level: INFO
format: '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
104 changes: 69 additions & 35 deletions models/llm/fingpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ def __init__(self, config: Dict[str, Any]):
model_config = config.get('llm', {}).get('fingpt', {})
self.base_model = model_config.get('base_model', "tiiuae/falcon-7b")
self.peft_model = model_config.get('peft_model', "FinGPT/fingpt-mt_falcon-7b_lora")
self.from_remote = model_config.get('from_remote', True)

# Setup cache directories
cache_config = model_config.get('cache', {})
Expand All @@ -57,57 +58,90 @@ def __init__(self, config: Dict[str, Any]):
def _load_model(self):
"""Initialize model with proper device handling"""
try:
# 1. Initialize tokenizer from base model
# Use model loading function similar to example
from transformers import AutoModelForCausalLM, AutoTokenizer

# 1. Initialize tokenizer
self.tokenizer = AutoTokenizer.from_pretrained(
self.base_model,
"tiiuae/falcon-7b",
cache_dir=str(self.model_cache_dir),
token=self.token,
trust_remote_code=True
)

# 2. Initialize base model with quantization
base_model = AutoModelForCausalLM.from_pretrained(
"tiiuae/falcon-7b",
cache_dir=str(self.model_cache_dir),
token=self.token,
trust_remote_code=True,
cache_dir=str(self.model_cache_dir)
load_in_8bit=True, # Enable 8-bit quantization
device_map="auto"
)

# 2. Initialize model with accelerate
with init_empty_weights():
model = AutoModelForCausalLM.from_pretrained(
self.base_model,
token=self.token,
trust_remote_code=True,
torch_dtype=torch.float16,
cache_dir=str(self.model_cache_dir)
)

# 3. Download PEFT model if needed and get local path
peft_path = self._ensure_peft_model_downloaded()

# 4. Load and dispatch model with offload folder
self.model = load_checkpoint_and_dispatch(
model,
checkpoint=peft_path,
device_map="auto",
no_split_module_classes=["FalconDecoderLayer"],
dtype=torch.float16,
offload_folder=str(self.offload_folder)

# 3. Load PEFT adapter
peft_model_id = self.peft_model if self.from_remote else "finetuned_models/MT-falcon-linear_202309210126"
self.model = PeftModel.from_pretrained(
base_model,
peft_model_id,
token=self.token
)


# 4. Set to evaluation mode
self.model.eval()

except Exception as e:
raise RuntimeError(f"Failed to load FinGPT model: {str(e)}")

def _ensure_peft_model_downloaded(self) -> str:
"""Download PEFT model and return local path"""
from huggingface_hub import snapshot_download
from huggingface_hub import snapshot_download, hf_hub_download
import shutil

try:
# Download model files to checkpoint directory
local_path = snapshot_download(
repo_id=self.peft_model,
token=self.token,
cache_dir=str(self.checkpoint_dir),
local_files_only=False # Force check for updates
)
return local_path
# Setup model directory
model_dir = self.checkpoint_dir / self.peft_model.split('/')[-1]
if model_dir.exists():
shutil.rmtree(model_dir)
model_dir.mkdir(parents=True, exist_ok=True)

# Download required files individually
required_files = ['config.json', 'adapter_config.json', 'adapter_model.bin']
for filename in required_files:
try:
file_path = hf_hub_download(
repo_id=self.peft_model,
filename=filename,
token=self.token,
cache_dir=str(model_dir),
local_files_only=False,
resume_download=True
)
# Copy to final location if needed
if Path(file_path).parent != model_dir:
shutil.copy2(file_path, model_dir / filename)
except Exception as e:
raise RuntimeError(f"Failed to download {filename}: {str(e)}")

return str(model_dir)

except Exception as e:
raise RuntimeError(f"Failed to download PEFT model: {str(e)}")

def _ensure_model_files(self):
"""Ensure model files are downloaded"""
cache_dir = Path(os.getenv('LOCALAPPDATA')) / 'fingpt_trader/models'
cache_dir.mkdir(parents=True, exist_ok=True)

if not (cache_dir / 'checkpoints').exists():
# Force download model files
from huggingface_hub import snapshot_download
snapshot_download(
repo_id="FinGPT/fingpt-mt_falcon-7b_lora",
cache_dir=str(cache_dir),
token=self.token
)

async def load_model(self) -> None:
"""Load FinGPT Falcon model"""
await self._load_falcon_model(
Expand Down
84 changes: 82 additions & 2 deletions models/portfolio/rebalancing.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
from typing import Dict, List, Tuple

from typing import Dict, List, Tuple, Optional
import numpy as np
import pandas as pd

from models.portfolio.risk import MarketRegime, MarketRegimeDetector, CircuitBreaker

import logging
logger = logging.getLogger(__name__)

class Portfolio:
def __init__(self):
Expand Down Expand Up @@ -73,3 +77,79 @@ def _generate_rebalance_trades(self) -> List[Tuple[str, float]]:
trades.append((symbol, trade_quantity))

return trades


class PortfolioRebalancer:
def __init__(self, config: Dict):
self.config = config
self.regime_detector = MarketRegimeDetector()
self.circuit_breaker = CircuitBreaker(config.get('risk', {}).get('thresholds', {}))

# Regime-based rebalancing thresholds
self.regime_thresholds = {
MarketRegime.NORMAL: 0.05, # 5% deviation trigger
MarketRegime.HIGH_VOL: 0.08, # 8% in high volatility
MarketRegime.STRESS: 0.10, # 10% in stress
MarketRegime.CRISIS: 1.0, # No rebalancing in crisis
MarketRegime.LOW_LIQUIDITY: 0.15 # 15% in low liquidity
}

async def check_rebalance_needed(
self,
current_weights: Dict[str, float],
target_weights: Dict[str, float],
market_data: Dict
) -> bool:
"""Check if rebalancing is needed based on market regime"""
# Check circuit breaker first
if self.circuit_breaker.check_conditions(market_data):
return False # Don't rebalance if circuit breaker triggered

# Detect current market regime
current_regime = self.regime_detector.detect_regime(market_data)
threshold = self.regime_thresholds[current_regime]

# Calculate maximum deviation
max_deviation = 0.0
for asset in target_weights:
current = current_weights.get(asset, 0.0)
target = target_weights.get(asset, 0.0)
deviation = abs(current - target)
max_deviation = max(max_deviation, deviation)

return max_deviation > threshold

async def calculate_rebalance_trades(
self,
current_positions: Dict[str, float],
target_weights: Dict[str, float],
market_data: Dict
) -> Optional[Dict[str, float]]:
"""Calculate required trades for rebalancing"""
try:
if not await self.check_rebalance_needed(
current_positions, target_weights, market_data
):
return None

# Calculate trades considering market impact
trades = {}
total_value = sum(current_positions.values())

for asset, target in target_weights.items():
current = current_positions.get(asset, 0.0)
target_value = total_value * target
trade_size = target_value - current

# Apply market regime-based size limits
regime = self.regime_detector.detect_regime(market_data)
if regime != MarketRegime.NORMAL:
trade_size *= self.regime_thresholds[regime]

trades[asset] = trade_size

return trades

except Exception as e:
logger.error(f"Error calculating rebalance trades: {str(e)}")
return None
2 changes: 1 addition & 1 deletion models/sentiment/analyzer.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

class SentimentAnalyzer:
def __init__(self, model_config: Dict):
self.config = model_config or {}
self.config = model_config or {} # gets from config/ directory
self.fingpt = FinGPT(self.config.get("fingpt_config", {}))
self.min_confidence = self.config.get("min_confidence", 0.6)
self.batch_size = model_config.get("batch_size", 16)
Expand Down
59 changes: 55 additions & 4 deletions services/monitoring/system_monitor.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,39 @@
import sys
import time
from pathlib import Path
from typing import Dict, List
from typing import Dict, List, Optional
from datetime import datetime
import logging

import psutil

from models.portfolio.risk import MarketRegimeDetector, CircuitBreaker, MarketRegime

# Add project root to path
root_dir = str(Path(__file__).parent.parent)
sys.path.insert(0, root_dir)
from services.base_service import BaseService

logger = logging.getLogger(__name__)

class SystemMonitor(BaseService):
def __init__(self, config: Dict = None):
def __init__(self, config: Optional[Dict] = None):
super().__init__(config)
self.metrics = {}
self.service_status = {}
self.config = config or {}
self.metrics: Dict[str, float] = {}
self.service_status: Dict[str, str] = {}
self.metrics_history: List[Dict] = []

# Initialize risk monitoring
risk_config = config.get('risk', {})
self.regime_detector = MarketRegimeDetector()
self.circuit_breaker = CircuitBreaker(risk_config.get('thresholds', {}))
self.current_regime: Optional[MarketRegime] = None

# Configure logging
log_level = config.get('logging', {}).get('level', 'INFO')
logger.setLevel(getattr(logging, log_level))

self.thresholds = {
"cpu_percent": 80.0,
"memory_percent": 85.0,
Expand Down Expand Up @@ -56,3 +74,36 @@ async def check_thresholds(self) -> List[str]:
alerts.append(f"{metric} exceeded threshold: {metrics[metric]}")

return alerts

async def update_metrics(self, market_data: Dict) -> None:
"""Update system metrics and risk status"""
try:
# Update market regime
self.current_regime = self.regime_detector.detect_regime(market_data)

# Check circuit breaker
circuit_breaker_triggered = self.circuit_breaker.check_conditions(market_data)

# Record metrics
timestamp = datetime.now().isoformat()
metrics = {
'timestamp': timestamp,
'market_regime': self.current_regime.value,
'circuit_breaker': circuit_breaker_triggered,
'volatility': market_data.get('volatility', 0),
'liquidity': market_data.get('liquidity', 0),
'correlation': market_data.get('correlation', 0)
}

self.metrics.update(metrics)
self.metrics_history.append(metrics)

# Log significant changes
if circuit_breaker_triggered:
logger.warning("Circuit breaker triggered")
if self.current_regime in [MarketRegime.STRESS, MarketRegime.CRISIS]:
logger.warning(f"Market regime changed to {self.current_regime.value}")

except Exception as e:
logger.error(f"Error updating system metrics: {str(e)}")
raise
Loading

0 comments on commit 7dc3c2d

Please sign in to comment.