diff --git a/.DS_Store b/.DS_Store
deleted file mode 100644
index 704ecca..0000000
Binary files a/.DS_Store and /dev/null differ
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..35ad647
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,2 @@
+.venv
+cache
\ No newline at end of file
diff --git a/.env.example b/.env.example
new file mode 100644
index 0000000..c59511e
--- /dev/null
+++ b/.env.example
@@ -0,0 +1,3 @@
+RPC_URL=""
+BACKEND_URL=http://localhost:8000
+DEV=true
\ No newline at end of file
diff --git a/.github/workflows/master.yaml b/.github/workflows/master.yaml
new file mode 100644
index 0000000..1bf78f0
--- /dev/null
+++ b/.github/workflows/master.yaml
@@ -0,0 +1,91 @@
+name: Build Image And Deploy
+on:
+ push:
+ branches: [master, sina/risk-dashboard-monorepo-update] # Temporarily allow this branch run the workflow
+
+jobs:
+ build-frontend:
+ runs-on: ubicloud
+ env:
+ ECR_REPO_URI: 875427118836.dkr.ecr.eu-west-1.amazonaws.com/v2-risk-dashboard
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@v3
+
+ - name: Configure AWS credentials
+ uses: aws-actions/configure-aws-credentials@master
+ with:
+ aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PROD }}
+ aws-secret-access-key: ${{ secrets.AWS_SECRET_KEY_PROD }}
+ aws-region: "eu-west-1"
+
+ - name: Log in to Amazon ECR
+ id: login-ecr
+ uses: aws-actions/amazon-ecr-login@v2
+
+ - name: Install kubectl
+ run: |
+ curl -o kubectl https://s3.us-west-2.amazonaws.com/amazon-eks/1.23.7/2022-06-29/bin/linux/amd64/kubectl
+ curl https://s3.us-west-2.amazonaws.com/amazon-eks/1.23.7/2022-06-29/bin/linux/amd64/kubectl.sha256
+ openssl sha1 -sha256 kubectl
+ chmod +x ./kubectl
+ kubectl version --client=true
+
+ - name: Docker build
+ run: |
+ ECR_REPO_URI=875427118836.dkr.ecr.eu-west-1.amazonaws.com/v2-risk-dashboard
+ docker build -f ./Dockerfile-frontend -t $ECR_REPO_URI:${{github.sha}}-frontend -t $ECR_REPO_URI:latest-frontend .
+ docker push $ECR_REPO_URI:${{github.sha}}-frontend
+ docker push $ECR_REPO_URI:latest-frontend
+
+ build-backend:
+ runs-on: ubicloud
+ env:
+ ECR_REPO_URI: 875427118836.dkr.ecr.eu-west-1.amazonaws.com/v2-risk-dashboard
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@v3
+
+ - name: Configure AWS credentials
+ uses: aws-actions/configure-aws-credentials@master
+ with:
+ aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PROD }}
+ aws-secret-access-key: ${{ secrets.AWS_SECRET_KEY_PROD }}
+ aws-region: "eu-west-1"
+
+ - name: Log in to Amazon ECR
+ id: login-ecr
+ uses: aws-actions/amazon-ecr-login@v2
+
+ - name: Docker build
+ run: |
+ ECR_REPO_URI=875427118836.dkr.ecr.eu-west-1.amazonaws.com/v2-risk-dashboard
+ docker build -f ./Dockerfile-backend -t $ECR_REPO_URI:${{github.sha}}-backend -t $ECR_REPO_URI:latest-backend .
+ docker push $ECR_REPO_URI:${{github.sha}}-backend
+ docker push $ECR_REPO_URI:latest-backend
+
+ deploy:
+ runs-on: ubicloud
+ needs: [build-frontend, build-backend]
+ env:
+ ECR_REPO_URI: 875427118836.dkr.ecr.eu-west-1.amazonaws.com/v2-risk-dashboard
+ steps:
+ - name: Checkout Code
+ uses: actions/checkout@v3
+
+ - name: Configure AWS credentials
+ uses: aws-actions/configure-aws-credentials@master
+ with:
+ aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_PROD }}
+ aws-secret-access-key: ${{ secrets.AWS_SECRET_KEY_PROD }}
+ aws-region: "eu-west-1"
+
+ - name: Log in to Amazon ECR
+ id: login-ecr
+ uses: aws-actions/amazon-ecr-login@v2
+
+ - name: Restart deployment
+ run: |
+ aws eks update-kubeconfig --name drift-prod-cluster --region eu-west-1 --role-arn arn:aws:iam::875427118836:role/k8sAdmin
+ kubectl rollout restart -n mainnet-beta deployment/risk-dashboard-frontend
+ kubectl rollout restart -n mainnet-beta deployment/risk-dashboard-backend
diff --git a/.gitignore b/.gitignore
index ca8b4e9..eb98fc4 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,5 +1,7 @@
venv
.mypy_cache
-/__pycache__/
-/src/__pycache__/
-/src/sections/__pycache__
\ No newline at end of file
+**/__pycache__/
+.env
+.venv
+pickles/*
+cache
\ No newline at end of file
diff --git a/.streamlit/config.toml b/.streamlit/config.toml
new file mode 100644
index 0000000..79fb1af
--- /dev/null
+++ b/.streamlit/config.toml
@@ -0,0 +1,8 @@
+[server]
+runOnSave = true
+
+[theme]
+base = "dark"
+
+[browser]
+gatherUsageStats = false
diff --git a/Dockerfile-backend b/Dockerfile-backend
new file mode 100644
index 0000000..dd13c0d
--- /dev/null
+++ b/Dockerfile-backend
@@ -0,0 +1,10 @@
+FROM python:3.10-slim-bullseye
+
+WORKDIR /app
+COPY . /app
+
+RUN apt-get update && apt-get install -y gcc python3-dev
+RUN pip install --trusted-host pypi.python.org -r requirements.txt
+EXPOSE 8000
+
+CMD ["uvicorn", "backend.app:app", "--host", "0.0.0.0", "--port", "8000"]
diff --git a/Dockerfile b/Dockerfile-frontend
similarity index 75%
rename from Dockerfile
rename to Dockerfile-frontend
index 0668adf..26f6daf 100644
--- a/Dockerfile
+++ b/Dockerfile-frontend
@@ -4,6 +4,9 @@ WORKDIR /app
COPY . /app
+# Install necessary build tools and Python headers
+RUN apt-get update && apt-get install -y gcc python3-dev
+
# Install any needed packages specified in requirements.txt
RUN pip install --trusted-host pypi.python.org -r requirements.txt
diff --git a/README.md b/README.md
index 351430f..d190727 100644
--- a/README.md
+++ b/README.md
@@ -2,11 +2,12 @@
Quick Start:
-1. Run `export RPC_URL"YOUR_RPC_HERE"` in terminal for static RPC
-2. Create new venv `python3 -m venv venv`
-3. Activate venv `source venv/bin/activate`
+1. Copy .env.example to .env and set RPC_URL
+2. Create new venv `python -m venv .venv`
+3. Activate venv `.venv/bin/activate`
4. Install dependencies `pip install -r requirements.txt`
-5. `streamlit run src/main.py`
+5. Run the frontend with `streamlit run src/main.py`
+6. Run the backend with `uvicorn src.main:app --host 0.0.0.0 --port 8000`
Current Metrics:
1. Largest perp positions
@@ -14,5 +15,3 @@ Current Metrics:
3. Account health distribution
4. Most levered perp positions > $1m notional
5. Most levered spot borrows > $750k notional
-
-WIP Metrics:
diff --git a/backend/api/__init__.py b/backend/api/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/backend/api/asset_liability.py b/backend/api/asset_liability.py
new file mode 100644
index 0000000..9f9843c
--- /dev/null
+++ b/backend/api/asset_liability.py
@@ -0,0 +1,28 @@
+from backend.state import BackendRequest
+from backend.state import BackendState
+from backend.utils.matrix import get_matrix
+from backend.utils.user_metrics import get_usermap_df
+from backend.utils.waiting_for import waiting_for
+from driftpy.drift_client import DriftClient
+from driftpy.pickle.vat import Vat
+from fastapi import APIRouter
+
+
+router = APIRouter()
+
+
+@router.get("/matrix/{mode}/{perp_market_index}")
+async def get_asset_liability_matrix(
+ request: BackendRequest, mode: int, perp_market_index: int
+):
+ backend_state: BackendState = request.state.backend_state
+ vat: Vat = backend_state.vat
+ drift_client: DriftClient = backend_state.dc
+
+ with waiting_for("Getting asset liability matrix"):
+ res, df = await get_matrix(drift_client, vat, mode, perp_market_index)
+
+ return {
+ "res": res.to_dict(),
+ "df": df.to_dict(),
+ }
diff --git a/src/health_utils.py b/backend/api/health.py
similarity index 61%
rename from src/health_utils.py
rename to backend/api/health.py
index 059f368..3e04394 100644
--- a/src/health_utils.py
+++ b/backend/api/health.py
@@ -1,119 +1,46 @@
-import asyncio
import heapq
-import time
-import os
-from asyncio import AbstractEventLoop
-import plotly.express as px # type: ignore
-import pandas as pd # type: ignore
-
-from typing import Any
-
-from solana.rpc.async_api import AsyncClient
-
-from anchorpy import Wallet
-
-import streamlit as st
-from driftpy.drift_user import DriftUser
-from driftpy.drift_client import DriftClient
-from driftpy.account_subscription_config import AccountSubscriptionConfig
-from driftpy.constants.numeric_constants import (
- BASE_PRECISION,
- SPOT_BALANCE_PRECISION,
- PRICE_PRECISION,
-)
-from driftpy.types import is_variant
+from backend.state import BackendRequest
+from driftpy.constants import BASE_PRECISION
+from driftpy.constants import PRICE_PRECISION
+from driftpy.constants import SPOT_BALANCE_PRECISION
from driftpy.pickle.vat import Vat
-from driftpy.constants.spot_markets import mainnet_spot_market_configs, devnet_spot_market_configs
-from driftpy.constants.perp_markets import mainnet_perp_market_configs, devnet_perp_market_configs
-
-from utils import load_newest_files, load_vat, to_financial
-
-def get_largest_perp_positions(vat: Vat):
- top_positions: list[Any] = []
-
- for user in vat.users.values():
- for position in user.get_user_account().perp_positions:
- if position.base_asset_amount > 0:
- market_price = vat.perp_oracles.get(position.market_index)
- if market_price is not None:
- market_price_ui = market_price.price / PRICE_PRECISION
- base_asset_value = (
- abs(position.base_asset_amount) / BASE_PRECISION
- ) * market_price_ui
- heap_item = (
- to_financial(base_asset_value),
- user.user_public_key,
- position.market_index,
- position.base_asset_amount / BASE_PRECISION,
- )
-
- if len(top_positions) < 10:
- heapq.heappush(top_positions, heap_item)
- else:
- heapq.heappushpop(top_positions, heap_item)
-
- positions = sorted(
- (value, pubkey, market_idx, amt)
- for value, pubkey, market_idx, amt in top_positions
- )
-
- positions.reverse()
-
- data = {
- "Market Index": [pos[2] for pos in positions],
- "Value": [f"${pos[0]:,.2f}" for pos in positions],
- "Base Asset Amount": [f"{pos[3]:,.2f}" for pos in positions],
- "Public Key": [pos[1] for pos in positions],
- }
-
- return data
-
-
-def get_largest_spot_borrows(vat: Vat):
- top_borrows: list[Any] = []
+from driftpy.types import is_variant
+from fastapi import APIRouter
+import pandas as pd
- for user in vat.users.values():
- for position in user.get_user_account().spot_positions:
- if position.scaled_balance > 0 and is_variant(
- position.balance_type, "Borrow"
- ):
- market_price = vat.spot_oracles.get(position.market_index)
- if market_price is not None:
- market_price_ui = market_price.price / PRICE_PRECISION
- borrow_value = (
- position.scaled_balance / SPOT_BALANCE_PRECISION
- ) * market_price_ui
- heap_item = (
- to_financial(borrow_value),
- user.user_public_key,
- position.market_index,
- position.scaled_balance / SPOT_BALANCE_PRECISION,
- )
- if len(top_borrows) < 10:
- heapq.heappush(top_borrows, heap_item)
- else:
- heapq.heappushpop(top_borrows, heap_item)
+router = APIRouter()
- borrows = sorted(
- (value, pubkey, market_idx, amt)
- for value, pubkey, market_idx, amt in top_borrows
- )
- borrows.reverse()
+def to_financial(num: float):
+ """
+ Helper function to format a number to a financial format.
+ """
+ num_str = str(num)
+ decimal_pos = num_str.find(".")
+ if decimal_pos != -1:
+ return float(num_str[: decimal_pos + 3])
+ return num
- data = {
- "Market Index": [pos[2] for pos in borrows],
- "Value": [f"${pos[0]:,.2f}" for pos in borrows],
- "Scaled Balance": [f"{pos[3]:,.2f}" for pos in borrows],
- "Public Key": [pos[1] for pos in borrows],
- }
- return data
+@router.get("/health_distribution")
+def get_account_health_distribution(request: BackendRequest):
+ """
+ Get the distribution of account health across different ranges.
+ This endpoint calculates the distribution of account health for all users,
+ categorizing them into health ranges and summing up the total collateral
+ in each range.
-def get_account_health_distribution(vat: Vat):
+ Returns:
+ list[dict]: A list of dictionaries containing the health distribution data.
+ Each dictionary has the following keys:
+ - Health Range (str): The health percentage range (e.g., '0-10%')
+ - Counts (int): The number of accounts in this range
+ - Notional Values (float): The total collateral value in this range
+ """
+ vat: Vat = request.state.backend_state.vat
health_notional_distributions = {
"0-10%": 0,
"10-20%": 0,
@@ -140,8 +67,13 @@ def get_account_health_distribution(vat: Vat):
}
for user in vat.users.values():
- total_collateral = user.get_total_collateral() / PRICE_PRECISION
- current_health = user.get_health()
+ # print(user.user_public_key)
+ try:
+ total_collateral = user.get_total_collateral() / PRICE_PRECISION
+ current_health = user.get_health()
+ except Exception as e:
+ print(f"==> Error from health [{user.user_public_key}] ", e)
+ continue
match current_health:
case _ if current_health < 10:
health_notional_distributions["0-10%"] += total_collateral
@@ -181,27 +113,93 @@ def get_account_health_distribution(vat: Vat):
}
)
- fig = px.bar(
- df,
- x="Health Range",
- y="Counts",
- title="Health Distribution",
- hover_data={"Notional Values": ":,"}, # Custom format for notional values
- labels={"Counts": "Num Users", "Notional Values": "Notional Value ($)"},
- )
+ return df.to_dict(orient="records")
+
+
+@router.get("/largest_perp_positions")
+def get_largest_perp_positions(request: BackendRequest):
+ """
+ Get the top 10 largest perpetual positions by value.
+
+ This endpoint retrieves the largest perpetual positions across all users,
+ calculated based on the current market prices.
- fig.update_traces(
- hovertemplate="Health Range: %{x}
Count: %{y}
Notional Value: $%{customdata[0]:,.0f}"
+ Returns:
+ dict: A dictionary containing lists of data for the top 10 positions:
+ - Market Index (list[int]): The market indices of the top positions
+ - Value (list[str]): The formatted dollar values of the positions
+ - Base Asset Amount (list[str]): The formatted base asset amounts
+ - Public Key (list[str]): The public keys of the position holders
+ """
+ vat: Vat = request.state.backend_state.vat
+ top_positions: list[tuple[float, str, int, float]] = []
+
+ for user in vat.users.values():
+ for position in user.get_user_account().perp_positions:
+ if position.base_asset_amount > 0:
+ market_price = vat.perp_oracles.get(position.market_index)
+ if market_price is not None:
+ market_price_ui = market_price.price / PRICE_PRECISION
+ base_asset_value = (
+ abs(position.base_asset_amount) / BASE_PRECISION
+ ) * market_price_ui
+ heap_item = (
+ to_financial(base_asset_value),
+ user.user_public_key,
+ position.market_index,
+ position.base_asset_amount / BASE_PRECISION,
+ )
+
+ if len(top_positions) < 10:
+ heapq.heappush(top_positions, heap_item)
+ else:
+ heapq.heappushpop(top_positions, heap_item)
+
+ positions = sorted(
+ (value, pubkey, market_idx, amt)
+ for value, pubkey, market_idx, amt in top_positions
)
- return fig
+ positions.reverse()
+ data = {
+ "Market Index": [pos[2] for pos in positions],
+ "Value": [f"${pos[0]:,.2f}" for pos in positions],
+ "Base Asset Amount": [f"{pos[3]:,.2f}" for pos in positions],
+ "Public Key": [pos[1] for pos in positions],
+ }
-def get_most_levered_perp_positions_above_1m(vat: Vat):
- top_positions: list[Any] = []
+ return data
+
+
+@router.get("/most_levered_perp_positions_above_1m")
+def get_most_levered_perp_positions_above_1m(request: BackendRequest):
+ """
+ Get the top 10 most leveraged perpetual positions with value above $1 million.
+
+ This endpoint calculates the leverage of each perpetual position with a value
+ over $1 million and returns the top 10 most leveraged positions.
+
+ Returns:
+ dict: A dictionary containing lists of data for the top 10 leveraged positions:
+ - Market Index (list[int]): The market indices of the top positions
+ - Value (list[str]): The formatted dollar values of the positions
+ - Base Asset Amount (list[str]): The formatted base asset amounts
+ - Leverage (list[str]): The formatted leverage ratios
+ - Public Key (list[str]): The public keys of the position holders
+ """
+ vat: Vat = request.state.backend_state.vat
+ top_positions: list[tuple[float, str, int, float, float]] = []
for user in vat.users.values():
- total_collateral = user.get_total_collateral() / PRICE_PRECISION
+ try:
+ total_collateral = user.get_total_collateral() / PRICE_PRECISION
+ except Exception as e:
+ print(
+ f"==> Error from get_most_levered_perp_positions_above_1m [{user.user_public_key}] ",
+ e,
+ )
+ continue
if total_collateral > 0:
for position in user.get_user_account().perp_positions:
if position.base_asset_amount > 0:
@@ -246,11 +244,92 @@ def get_most_levered_perp_positions_above_1m(vat: Vat):
return data
-def get_most_levered_spot_borrows_above_1m(vat: Vat):
- top_borrows: list[Any] = []
+@router.get("/largest_spot_borrows")
+def get_largest_spot_borrows(request: BackendRequest):
+ """
+ Get the top 10 largest spot borrowing positions by value.
+
+ This endpoint retrieves the largest spot borrowing positions across all users,
+ calculated based on the current market prices.
+
+ Returns:
+ dict: A dictionary containing lists of data for the top 10 borrowing positions:
+ - Market Index (list[int]): The market indices of the top borrows
+ - Value (list[str]): The formatted dollar values of the borrows
+ - Scaled Balance (list[str]): The formatted scaled balances of the borrows
+ - Public Key (list[str]): The public keys of the borrowers
+ """
+ vat: Vat = request.state.backend_state.vat
+ top_borrows: list[tuple[float, str, int, float]] = []
for user in vat.users.values():
- total_collateral = user.get_total_collateral() / PRICE_PRECISION
+ for position in user.get_user_account().spot_positions:
+ if position.scaled_balance > 0 and is_variant(
+ position.balance_type, "Borrow"
+ ):
+ market_price = vat.spot_oracles.get(position.market_index)
+ if market_price is not None:
+ market_price_ui = market_price.price / PRICE_PRECISION
+ borrow_value = (
+ position.scaled_balance / SPOT_BALANCE_PRECISION
+ ) * market_price_ui
+ heap_item = (
+ to_financial(borrow_value),
+ user.user_public_key,
+ position.market_index,
+ position.scaled_balance / SPOT_BALANCE_PRECISION,
+ )
+
+ if len(top_borrows) < 10:
+ heapq.heappush(top_borrows, heap_item)
+ else:
+ heapq.heappushpop(top_borrows, heap_item)
+
+ borrows = sorted(
+ (value, pubkey, market_idx, amt)
+ for value, pubkey, market_idx, amt in top_borrows
+ )
+
+ borrows.reverse()
+
+ data = {
+ "Market Index": [pos[2] for pos in borrows],
+ "Value": [f"${pos[0]:,.2f}" for pos in borrows],
+ "Scaled Balance": [f"{pos[3]:,.2f}" for pos in borrows],
+ "Public Key": [pos[1] for pos in borrows],
+ }
+
+ return data
+
+
+@router.get("/most_levered_spot_borrows_above_1m")
+def get_most_levered_spot_borrows_above_1m(request: BackendRequest):
+ """
+ Get the top 10 most leveraged spot borrowing positions with value above $750,000.
+
+ This endpoint calculates the leverage of each spot borrowing position with a value
+ over $750,000 and returns the top 10 most leveraged positions.
+
+ Returns:
+ dict: A dictionary containing lists of data for the top 10 leveraged borrowing positions:
+ - Market Index (list[int]): The market indices of the top borrows
+ - Value (list[str]): The formatted dollar values of the borrows
+ - Scaled Balance (list[str]): The formatted scaled balances of the borrows
+ - Leverage (list[str]): The formatted leverage ratios
+ - Public Key (list[str]): The public keys of the borrowers
+ """
+ vat: Vat = request.state.backend_state.vat
+ top_borrows: list[tuple[float, str, int, float, float]] = []
+
+ for user in vat.users.values():
+ try:
+ total_collateral = user.get_total_collateral() / PRICE_PRECISION
+ except Exception as e:
+ print(
+ f"==> Error from get_most_levered_spot_borrows_above_1m [{user.user_public_key}] ",
+ e,
+ )
+ raise e
if total_collateral > 0:
for position in user.get_user_account().spot_positions:
if (
@@ -293,4 +372,4 @@ def get_most_levered_spot_borrows_above_1m(vat: Vat):
"Public Key": [pos[1] for pos in borrows],
}
- return data
\ No newline at end of file
+ return data
diff --git a/backend/api/liquidation.py b/backend/api/liquidation.py
new file mode 100644
index 0000000..a592268
--- /dev/null
+++ b/backend/api/liquidation.py
@@ -0,0 +1,45 @@
+from backend.state import BackendRequest
+from driftpy.constants import BASE_PRECISION
+from driftpy.constants import PRICE_PRECISION
+from driftpy.pickle.vat import Vat
+from fastapi import APIRouter
+
+
+router = APIRouter()
+
+
+@router.get("/liquidation-curve/{market_index}")
+def get_liquidation_curve(request: BackendRequest, market_index: int):
+ vat: Vat = request.state.backend_state.vat
+ liquidations_long: list[tuple[float, float]] = []
+ liquidations_short: list[tuple[float, float]] = []
+ market_price = vat.perp_oracles.get(market_index)
+ market_price_ui = market_price.price / PRICE_PRECISION
+ for user in vat.users.user_map.values():
+ perp_position = user.get_perp_position(market_index)
+ if perp_position is not None:
+ liquidation_price = user.get_perp_liq_price(market_index)
+ if liquidation_price is not None:
+ liquidation_price_ui = liquidation_price / PRICE_PRECISION
+ position_size = abs(perp_position.base_asset_amount) / BASE_PRECISION
+ position_notional = position_size * market_price_ui
+ is_zero = round(position_notional) == 0
+ is_short = perp_position.base_asset_amount < 0
+ is_long = perp_position.base_asset_amount > 0
+ if is_zero:
+ continue
+ if is_short and liquidation_price_ui > market_price_ui:
+ liquidations_short.append((liquidation_price_ui, position_notional))
+ elif is_long and liquidation_price_ui < market_price_ui:
+ liquidations_long.append((liquidation_price_ui, position_notional))
+ else:
+ pass
+
+ liquidations_long.sort(key=lambda x: x[0])
+ liquidations_short.sort(key=lambda x: x[0])
+
+ return {
+ "liquidations_long": liquidations_long,
+ "liquidations_short": liquidations_short,
+ "market_price_ui": market_price_ui,
+ }
diff --git a/backend/api/metadata.py b/backend/api/metadata.py
new file mode 100644
index 0000000..d9621aa
--- /dev/null
+++ b/backend/api/metadata.py
@@ -0,0 +1,15 @@
+from backend.state import BackendRequest
+from backend.state import BackendState
+from fastapi import APIRouter
+
+
+router = APIRouter()
+
+
+@router.get("/")
+def get_metadata(request: BackendRequest):
+ backend_state: BackendState = request.state.backend_state
+ return {
+ "pickle_file": backend_state.current_pickle_path,
+ "last_oracle_slot": backend_state.vat.register_oracle_slot,
+ }
diff --git a/backend/api/price_shock.py b/backend/api/price_shock.py
new file mode 100644
index 0000000..0b49a99
--- /dev/null
+++ b/backend/api/price_shock.py
@@ -0,0 +1,36 @@
+from typing import Optional
+
+from backend.state import BackendRequest
+from backend.state import BackendState
+from backend.utils.matrix import get_usermap_df
+from driftpy.drift_client import DriftClient
+from driftpy.pickle.vat import Vat
+from fastapi import APIRouter
+
+
+router = APIRouter()
+
+
+@router.get("/usermap")
+async def get_price_shock(
+ request: BackendRequest,
+ oracle_distortion: float = 0.1,
+ asset_group: Optional[str] = None,
+ n_scenarios: int = 5,
+):
+ backend_state: BackendState = request.state.backend_state
+ vat: Vat = backend_state.vat
+ drift_client: DriftClient = backend_state.dc
+
+ result = await get_usermap_df(
+ drift_client,
+ vat.users,
+ "oracles",
+ oracle_distortion,
+ asset_group,
+ n_scenarios,
+ )
+
+ print(result)
+
+ return {}
diff --git a/backend/api/snapshot.py b/backend/api/snapshot.py
new file mode 100644
index 0000000..928f8e6
--- /dev/null
+++ b/backend/api/snapshot.py
@@ -0,0 +1,14 @@
+from backend.state import BackendRequest
+from backend.state import BackendState
+from fastapi import APIRouter
+from fastapi import BackgroundTasks
+
+
+router = APIRouter()
+
+
+@router.get("/pickle")
+async def pickle(request: BackendRequest, background_tasks: BackgroundTasks):
+ backend_state: BackendState = request.state.backend_state
+ background_tasks.add_task(backend_state.take_pickle_snapshot)
+ return {"result": "background task added"}
diff --git a/backend/app.py b/backend/app.py
new file mode 100644
index 0000000..5720015
--- /dev/null
+++ b/backend/app.py
@@ -0,0 +1,102 @@
+from contextlib import asynccontextmanager
+import glob
+import os
+import shutil
+
+from backend.api import asset_liability
+from backend.api import health
+from backend.api import liquidation
+from backend.api import metadata
+from backend.api import price_shock
+from backend.api import snapshot
+from backend.middleware.cache_middleware import CacheMiddleware
+from backend.middleware.readiness import ReadinessMiddleware
+from backend.state import BackendState
+from backend.utils.repeat_every import repeat_every
+from dotenv import load_dotenv
+from fastapi import BackgroundTasks
+from fastapi import FastAPI
+from fastapi.testclient import TestClient
+
+
+load_dotenv()
+state = BackendState()
+
+
+@repeat_every(seconds=60 * 5) # 5 minutes
+async def repeatedly_retake_snapshot(state: BackendState) -> None:
+ await state.take_pickle_snapshot()
+
+
+@repeat_every(seconds=60 * 5) # 5 minutes
+async def repeatedly_clean_cache(state: BackendState) -> None:
+ if not os.path.exists("pickles"):
+ print("pickles folder does not exist")
+ return
+
+ pickles = glob.glob("pickles/*")
+ if len(pickles) > 3:
+ print("pickles folder has more than 3 pickles, deleting old ones")
+ pickles.sort(key=os.path.getmtime)
+ for pickle in pickles[:-3]:
+ print(f"deleting {pickle}")
+ shutil.rmtree(pickle)
+
+ cache_files = glob.glob("cache/*")
+ if len(cache_files) > 20:
+ print("cache folder has more than 20 files, deleting old ones")
+ cache_files.sort(key=os.path.getmtime)
+ for cache_file in cache_files[:-20]:
+ print(f"deleting {cache_file}")
+ os.remove(cache_file)
+
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+ url = os.getenv("RPC_URL")
+ if not url:
+ raise ValueError("RPC_URL environment variable is not set.")
+ global state
+ state.initialize(url)
+
+ print("Checking if cached vat exists")
+ cached_vat_path = sorted(glob.glob("pickles/*"))
+ if len(cached_vat_path) > 0:
+ print("Loading cached vat")
+ await state.load_pickle_snapshot(cached_vat_path[-1])
+ await repeatedly_clean_cache(state)
+ await repeatedly_retake_snapshot(state)
+ else:
+ print("No cached vat found, bootstrapping")
+ await state.bootstrap()
+ await state.take_pickle_snapshot()
+ await repeatedly_clean_cache(state)
+ await repeatedly_retake_snapshot(state)
+ state.ready = True
+ print("Starting app")
+ yield
+
+ # Cleanup
+ state.ready = False
+ await state.dc.unsubscribe()
+ await state.connection.close()
+
+
+app = FastAPI(lifespan=lifespan)
+app.add_middleware(ReadinessMiddleware, state=state)
+app.add_middleware(CacheMiddleware, state=state, cache_dir="cache")
+
+app.include_router(health.router, prefix="/api/health", tags=["health"])
+app.include_router(metadata.router, prefix="/api/metadata", tags=["metadata"])
+app.include_router(liquidation.router, prefix="/api/liquidation", tags=["liquidation"])
+app.include_router(price_shock.router, prefix="/api/price-shock", tags=["price-shock"])
+app.include_router(
+ asset_liability.router, prefix="/api/asset-liability", tags=["asset-liability"]
+)
+app.include_router(snapshot.router, prefix="/api/snapshot", tags=["snapshot"])
+
+
+# NOTE: All other routes should be in /api/* within the /api folder. Routes outside of /api are not exposed in k8s
+@app.get("/")
+async def root():
+ return {"message": "risk dashboard backend is online"}
diff --git a/backend/middleware/cache_middleware.py b/backend/middleware/cache_middleware.py
new file mode 100644
index 0000000..a650491
--- /dev/null
+++ b/backend/middleware/cache_middleware.py
@@ -0,0 +1,69 @@
+import hashlib
+import os
+import pickle
+
+from backend.state import BackendRequest
+from backend.state import BackendState
+from fastapi import HTTPException
+from fastapi import Response
+from starlette.middleware.base import BaseHTTPMiddleware
+from starlette.types import ASGIApp
+
+
+class CacheMiddleware(BaseHTTPMiddleware):
+ def __init__(self, app: ASGIApp, state: BackendState, cache_dir: str = "cache"):
+ super().__init__(app)
+ self.state = state
+ self.cache_dir = cache_dir
+ if not os.path.exists(self.cache_dir):
+ os.makedirs(self.cache_dir)
+
+ async def dispatch(self, request: BackendRequest, call_next):
+ if not request.url.path.startswith("/api"):
+ return await call_next(request)
+ if self.state.current_pickle_path == "bootstrap":
+ return await call_next(request)
+
+ cache_key = self._generate_cache_key(request)
+ cache_file = os.path.join(self.cache_dir, f"{cache_key}.pkl")
+
+ if os.path.exists(cache_file):
+ print(f"Cache hit for {request.url.path}")
+ with open(cache_file, "rb") as f:
+ response_data = pickle.load(f)
+ return Response(
+ content=response_data["content"],
+ status_code=response_data["status_code"],
+ headers=response_data["headers"],
+ )
+
+ print(f"Cache miss for {request.url.path}")
+ response = await call_next(request)
+
+ if response.status_code == 200:
+ response_body = b""
+ async for chunk in response.body_iterator:
+ response_body += chunk
+ response_data = {
+ "content": response_body,
+ "status_code": response.status_code,
+ "headers": dict(response.headers),
+ }
+
+ os.makedirs(os.path.dirname(cache_file), exist_ok=True)
+ with open(cache_file, "wb") as f:
+ pickle.dump(response_data, f)
+
+ return Response(
+ content=response_body,
+ status_code=response.status_code,
+ headers=dict(response.headers),
+ )
+
+ return response
+
+ def _generate_cache_key(self, request: BackendRequest) -> str:
+ current_pickle_path = self.state.current_pickle_path
+ hash_input = f"{current_pickle_path}:{request.method}:{request.url.path}:{request.url.query}"
+ print("Hash input: ", hash_input)
+ return hashlib.md5(hash_input.encode()).hexdigest()
diff --git a/backend/middleware/readiness.py b/backend/middleware/readiness.py
new file mode 100644
index 0000000..f30a5ba
--- /dev/null
+++ b/backend/middleware/readiness.py
@@ -0,0 +1,19 @@
+from backend.state import BackendRequest
+from backend.state import BackendState
+from fastapi import HTTPException
+from starlette.middleware.base import BaseHTTPMiddleware
+from starlette.types import ASGIApp
+
+
+class ReadinessMiddleware(BaseHTTPMiddleware):
+ def __init__(self, app: ASGIApp, state: BackendState):
+ super().__init__(app)
+ self.state = state
+
+ async def dispatch(self, request: BackendRequest, call_next):
+ if not self.state.ready and request.url.path != "/health":
+ raise HTTPException(status_code=503, detail="Service is not ready")
+
+ request.state.backend_state = self.state
+ response = await call_next(request)
+ return response
diff --git a/backend/state.py b/backend/state.py
new file mode 100644
index 0000000..8959c8b
--- /dev/null
+++ b/backend/state.py
@@ -0,0 +1,127 @@
+from asyncio import create_task
+from asyncio import gather
+from datetime import datetime
+import os
+from typing import TypedDict
+
+from anchorpy import Wallet
+from backend.utils.vat import load_newest_files
+from backend.utils.waiting_for import waiting_for
+from driftpy.account_subscription_config import AccountSubscriptionConfig
+from driftpy.drift_client import DriftClient
+from driftpy.market_map.market_map import MarketMap
+from driftpy.market_map.market_map_config import (
+ WebsocketConfig as MarketMapWebsocketConfig,
+)
+from driftpy.market_map.market_map_config import MarketMapConfig
+from driftpy.pickle.vat import Vat
+from driftpy.types import MarketType
+from driftpy.user_map.user_map import UserMap
+from driftpy.user_map.user_map_config import (
+ WebsocketConfig as UserMapWebsocketConfig,
+)
+from driftpy.user_map.user_map_config import UserMapConfig
+from driftpy.user_map.user_map_config import UserStatsMapConfig
+from driftpy.user_map.userstats_map import UserStatsMap
+from fastapi import Request
+import pandas as pd
+from solana.rpc.async_api import AsyncClient
+
+
+class BackendState:
+ connection: AsyncClient
+ dc: DriftClient
+ spot_map: MarketMap
+ perp_map: MarketMap
+ user_map: UserMap
+ stats_map: UserStatsMap
+
+ current_pickle_path: str
+ vat: Vat
+ ready: bool
+
+ def initialize(
+ self, url: str
+ ): # Not using __init__ because we need the rpc url to be passed in
+ self.connection = AsyncClient(url)
+ self.dc = DriftClient(
+ self.connection,
+ Wallet.dummy(),
+ "mainnet",
+ account_subscription=AccountSubscriptionConfig("cached"),
+ )
+ self.perp_map = MarketMap(
+ MarketMapConfig(
+ self.dc.program,
+ MarketType.Perp(),
+ MarketMapWebsocketConfig(),
+ self.dc.connection,
+ )
+ )
+ self.spot_map = MarketMap(
+ MarketMapConfig(
+ self.dc.program,
+ MarketType.Spot(),
+ MarketMapWebsocketConfig(),
+ self.dc.connection,
+ )
+ )
+ self.user_map = UserMap(UserMapConfig(self.dc, UserMapWebsocketConfig()))
+ self.stats_map = UserStatsMap(UserStatsMapConfig(self.dc))
+ self.vat = Vat(
+ self.dc,
+ self.user_map,
+ self.stats_map,
+ self.spot_map,
+ self.perp_map,
+ )
+
+ async def bootstrap(self):
+ with waiting_for("drift client"):
+ await self.dc.subscribe()
+ with waiting_for("subscriptions"):
+ await gather(
+ create_task(self.spot_map.subscribe()),
+ create_task(self.perp_map.subscribe()),
+ create_task(self.user_map.subscribe()),
+ create_task(self.stats_map.subscribe()),
+ )
+ self.current_pickle_path = "bootstrap"
+
+ async def take_pickle_snapshot(self):
+ now = datetime.now()
+ folder_name = now.strftime("vat-%Y-%m-%d-%H-%M-%S")
+ if not os.path.exists("pickles"):
+ os.makedirs("pickles")
+ path = os.path.join("pickles", folder_name, "")
+
+ os.makedirs(path, exist_ok=True)
+ with waiting_for("pickling"):
+ result = await self.vat.pickle(path)
+ with waiting_for("unpickling"):
+ await self.load_pickle_snapshot(path)
+ return result
+
+ async def load_pickle_snapshot(self, directory: str):
+ pickle_map = load_newest_files(directory)
+ self.current_pickle_path = directory
+ with waiting_for("unpickling"):
+ await self.vat.unpickle(
+ users_filename=pickle_map["usermap"],
+ user_stats_filename=pickle_map["userstats"],
+ spot_markets_filename=pickle_map["spot"],
+ perp_markets_filename=pickle_map["perp"],
+ spot_oracles_filename=pickle_map["spotoracles"],
+ perp_oracles_filename=pickle_map["perporacles"],
+ )
+ return pickle_map
+
+
+class BackendRequest(Request):
+ @property
+ def backend_state(self) -> BackendState:
+ return self.state.get("backend_state")
+
+ @backend_state.setter
+ def backend_state(self, value: BackendState):
+ self.state["backend_state"] = value
diff --git a/backend/utils/matrix.py b/backend/utils/matrix.py
new file mode 100644
index 0000000..a45d54e
--- /dev/null
+++ b/backend/utils/matrix.py
@@ -0,0 +1,160 @@
+from backend.utils.user_metrics import get_usermap_df
+from driftpy.constants.spot_markets import mainnet_spot_market_configs
+from driftpy.drift_client import DriftClient
+from driftpy.pickle.vat import Vat
+import pandas as pd
+
+
+async def get_matrix(
+ drift_client: DriftClient,
+ vat: Vat,
+ mode=0,
+ perp_market_index=0,
+):
+ NUMBER_OF_SPOT = len(mainnet_spot_market_configs)
+
+ res = await get_usermap_df(
+ drift_client,
+ vat.users,
+ "margins",
+ oracle_distortion=0,
+ cov_matrix="ignore stables",
+ n_scenarios=0,
+ )
+ levs_none = res["leverages_none"]
+ levs_init = res["leverages_initial"]
+ levs_maint = res["leverages_maintenance"]
+ user_keys = res["user_keys"]
+
+ levs_maint = [x for x in levs_maint if int(x["health"]) <= 10]
+ levs_init = [x for x in levs_init if int(x["health"]) <= 10]
+
+ df: pd.DataFrame
+ match mode:
+ case 0: # nothing
+ df = pd.DataFrame(levs_none, index=user_keys)
+ case 1: # liq within 50% of oracle
+ df = pd.DataFrame(levs_none, index=user_keys)
+ case 2: # maint. health < 10%
+ user_keys = [x["user_key"] for x in levs_init]
+ df = pd.DataFrame(levs_init, index=user_keys)
+ case 3: # init. health < 10%
+ user_keys = [x["user_key"] for x in levs_maint]
+ df = pd.DataFrame(levs_maint, index=user_keys)
+
+ def get_rattt(row):
+ calculations = [
+ (
+ "all_assets",
+ lambda v: v if v > 0 else 0,
+ ), # Simplified from v / row['spot_asset'] * row['spot_asset']
+ (
+ "all",
+ lambda v: (
+ v
+ / row["spot_asset"]
+ * (row["perp_liability"] + row["spot_liability"])
+ if v > 0
+ else 0
+ ),
+ ),
+ (
+ "all_perp",
+ lambda v: v / row["spot_asset"] * row["perp_liability"] if v > 0 else 0,
+ ),
+ (
+ "all_spot",
+ lambda v: v / row["spot_asset"] * row["spot_liability"] if v > 0 else 0,
+ ),
+ (
+ f"perp_{perp_market_index}_long",
+ lambda v: (
+ v / row["spot_asset"] * row["net_p"][perp_market_index]
+ if v > 0 and row["net_p"][0] > 0
+ else 0
+ ),
+ ),
+ (
+ f"perp_{perp_market_index}_short",
+ lambda v: (
+ v / row["spot_asset"] * row["net_p"][perp_market_index]
+ if v > 0 and row["net_p"][perp_market_index] < 0
+ else 0
+ ),
+ ),
+ ]
+
+ series_list = []
+ for suffix, calc_func in calculations:
+ series = pd.Series([calc_func(val) for key, val in row["net_v"].items()])
+ series.index = [f"spot_{x}_{suffix}" for x in series.index]
+ series_list.append(series)
+
+ return pd.concat(series_list)
+
+ df = pd.concat([df, df.apply(get_rattt, axis=1)], axis=1)
+
+ def calculate_effective_leverage(group):
+ assets = group["all_assets"]
+ liabilities = group["all_liabilities"]
+ return liabilities / assets if assets != 0 else 0
+
+ def format_with_checkmark(value, condition, mode, financial=False):
+ if financial:
+ formatted_value = f"{value:,.2f}"
+ else:
+ formatted_value = f"{value:.2f}"
+
+ if condition and mode > 0:
+ return f"{formatted_value} โ
"
+ return formatted_value
+
+ res = pd.DataFrame(
+ {
+ ("spot" + str(i)): (
+ df[f"spot_{i}_all_assets"].sum(),
+ format_with_checkmark(
+ df[f"spot_{i}_all"].sum(),
+ 0 < df[f"spot_{i}_all"].sum() < 1_000_000,
+ mode,
+ financial=True,
+ ),
+ format_with_checkmark(
+ calculate_effective_leverage(
+ {
+ "all_assets": df[f"spot_{i}_all_assets"].sum(),
+ "all_liabilities": df[f"spot_{i}_all"].sum(),
+ }
+ ),
+ 0
+ < calculate_effective_leverage(
+ {
+ "all_assets": df[f"spot_{i}_all_assets"].sum(),
+ "all_liabilities": df[f"spot_{i}_all"].sum(),
+ }
+ )
+ < 2,
+ mode,
+ ),
+ df[f"spot_{i}_all_spot"].sum(),
+ df[f"spot_{i}_all_perp"].sum(),
+ df[f"spot_{i}_perp_{perp_market_index}_long"].sum(),
+ df[f"spot_{i}_perp_{perp_market_index}_short"].sum(),
+ )
+ for i in range(NUMBER_OF_SPOT)
+ },
+ index=[
+ "all_assets",
+ "all_liabilities",
+ "effective_leverage",
+ "all_spot",
+ "all_perp",
+ f"perp_{perp_market_index}_long",
+ f"perp_{perp_market_index}_short",
+ ],
+ ).T
+
+ res["all_liabilities"] = res["all_liabilities"].astype(str)
+ res["effective_leverage"] = res["effective_leverage"].astype(str)
+
+ return res, df
diff --git a/backend/utils/repeat_every.py b/backend/utils/repeat_every.py
new file mode 100644
index 0000000..6d23cb1
--- /dev/null
+++ b/backend/utils/repeat_every.py
@@ -0,0 +1,148 @@
+"""
+Periodic Task Execution Decorator
+
+Provides a `repeat_every` decorator for periodic execution of tasks in asynchronous environments.
+Modified from fastapi_utils library to support passing a state object to the repeated function.
+
+Features:
+- Configurable execution interval and initial delay
+- Exception handling with optional callback
+- Completion callback and maximum repetitions limit
+- Supports both sync and async functions
+
+Usage:
+ @repeat_every(seconds=60)
+ async def my_task(state):
+ # Task logic here
+
+Note: Designed for use with asynchronous frameworks like FastAPI.
+
+Original Source: https://github.com/dmontagu/fastapi-utils (MIT License)
+"""
+
+from __future__ import annotations
+
+import asyncio
+from functools import wraps
+import logging
+from traceback import format_exception
+from typing import Any, Callable, Coroutine, TypeVar, Union
+import warnings
+
+from starlette.concurrency import run_in_threadpool
+
+
+T = TypeVar("T")
+
+ArgsReturnFuncT = Callable[[T], Any]
+ArgsReturnAsyncFuncT = Callable[[T], Coroutine[Any, Any, Any]]
+ExcArgNoReturnFuncT = Callable[[Exception], None]
+ExcArgNoReturnAsyncFuncT = Callable[[Exception], Coroutine[Any, Any, None]]
+ArgsReturnAnyFuncT = Union[ArgsReturnFuncT, ArgsReturnAsyncFuncT]
+ExcArgNoReturnAnyFuncT = Union[ExcArgNoReturnFuncT, ExcArgNoReturnAsyncFuncT]
+ArgsReturnDecorator = Callable[
+ [ArgsReturnAnyFuncT], Callable[[T], Coroutine[Any, Any, None]]
+]
+
+
+async def _handle_func(func: ArgsReturnAnyFuncT, arg: T) -> Any:
+ if asyncio.iscoroutinefunction(func):
+ return await func(arg)
+ else:
+ return await run_in_threadpool(func, arg)
+
+
+async def _handle_exc(
+ exc: Exception, on_exception: ExcArgNoReturnAnyFuncT | None
+) -> None:
+ if on_exception:
+ if asyncio.iscoroutinefunction(on_exception):
+ await on_exception(exc)
+ else:
+ await run_in_threadpool(on_exception, exc)
+
+
+def repeat_every(
+ *,
+ seconds: float,
+ wait_first: float | None = None,
+ logger: logging.Logger | None = None,
+ raise_exceptions: bool = False,
+ max_repetitions: int | None = None,
+ on_complete: ArgsReturnAnyFuncT | None = None,
+ on_exception: ExcArgNoReturnAnyFuncT | None = None,
+) -> ArgsReturnDecorator:
+ """
+ This function returns a decorator that modifies a function so it is periodically re-executed after its first call.
+
+ The function it decorates should accept one argument and can return a value.
+
+ Parameters
+ ----------
+ seconds: float
+ The number of seconds to wait between repeated calls
+ wait_first: float (default None)
+ If not None, the function will wait for the given duration before the first call
+ logger: Optional[logging.Logger] (default None)
+ Warning: This parameter is deprecated and will be removed in the 1.0 release.
+ The logger to use to log any exceptions raised by calls to the decorated function.
+ If not provided, exceptions will not be logged by this function (though they may be handled by the event loop).
+ raise_exceptions: bool (default False)
+ Warning: This parameter is deprecated and will be removed in the 1.0 release.
+ If True, errors raised by the decorated function will be raised to the event loop's exception handler.
+ Note that if an error is raised, the repeated execution will stop.
+ Otherwise, exceptions are just logged and the execution continues to repeat.
+ See https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.loop.set_exception_handler for more info.
+ max_repetitions: Optional[int] (default None)
+ The maximum number of times to call the repeated function. If `None`, the function is repeated forever.
+ on_complete: Optional[Callable[[T], Any]] (default None)
+ A function to call after the final repetition of the decorated function.
+ on_exception: Optional[Callable[[Exception], None]] (default None)
+ A function to call when an exception is raised by the decorated function.
+ """
+
+ def decorator(func: ArgsReturnAnyFuncT) -> Callable[[T], Coroutine[Any, Any, None]]:
+ """
+ Converts the decorated function into a repeated, periodically-called version of itself.
+ """
+
+ @wraps(func)
+ async def wrapped(arg: T) -> None:
+ async def loop() -> None:
+ if wait_first is not None:
+ await asyncio.sleep(wait_first)
+
+ repetitions = 0
+ while max_repetitions is None or repetitions < max_repetitions:
+ try:
+ await _handle_func(func, arg)
+
+ except Exception as exc:
+ if logger is not None:
+ warnings.warn(
+ "'logger' is to be deprecated in favor of 'on_exception' in the 1.0 release.",
+ DeprecationWarning,
+ )
+ formatted_exception = "".join(
+ format_exception(type(exc), exc, exc.__traceback__)
+ )
+ logger.error(formatted_exception)
+ if raise_exceptions:
+ warnings.warn(
+ "'raise_exceptions' is to be deprecated in favor of 'on_exception' in the 1.0 release.",
+ DeprecationWarning,
+ )
+ raise exc
+ await _handle_exc(exc, on_exception)
+
+ repetitions += 1
+ await asyncio.sleep(seconds)
+
+ if on_complete:
+ await _handle_func(on_complete, arg)
+
+ asyncio.ensure_future(loop())
+
+ return wrapped
+
+ return decorator
diff --git a/backend/utils/user_metrics.py b/backend/utils/user_metrics.py
new file mode 100644
index 0000000..8e4b77d
--- /dev/null
+++ b/backend/utils/user_metrics.py
@@ -0,0 +1,202 @@
+import copy
+from typing import List, Optional
+
+from driftpy.constants.numeric_constants import MARGIN_PRECISION
+from driftpy.constants.numeric_constants import QUOTE_PRECISION
+from driftpy.constants.perp_markets import mainnet_perp_market_configs
+from driftpy.constants.spot_markets import mainnet_spot_market_configs
+from driftpy.drift_client import DriftClient
+from driftpy.drift_user import DriftUser
+from driftpy.math.margin import MarginCategory
+from driftpy.types import OraclePriceData
+from driftpy.user_map.user_map import UserMap
+
+
+def get_init_health(user: DriftUser):
+ """
+ Returns the initial health of the user.
+ """
+ if user.is_being_liquidated():
+ return 0
+
+ total_collateral = user.get_total_collateral(MarginCategory.INITIAL)
+ maintenance_margin_req = user.get_margin_requirement(MarginCategory.INITIAL)
+
+ if maintenance_margin_req == 0 and total_collateral >= 0:
+ return 100
+ elif total_collateral <= 0:
+ return 0
+ else:
+ return round(
+ min(100, max(0, (1 - maintenance_margin_req / total_collateral) * 100))
+ )
+
+
+def comb_asset_liab(a_l_tup):
+ return a_l_tup[0] - a_l_tup[1]
+
+
+def get_collateral_composition(x: DriftUser, margin_category, n):
+ net_v = {
+ i: comb_asset_liab(
+ x.get_spot_market_asset_and_liability_value(i, margin_category)
+ )
+ / QUOTE_PRECISION
+ for i in range(n)
+ }
+ return net_v
+
+
+def get_perp_liab_composition(x: DriftUser, margin_category, n):
+ net_p = {
+ i: x.get_perp_market_liability(i, margin_category, signed=True)
+ / QUOTE_PRECISION
+ for i in range(n)
+ }
+ return net_p
+
+
+def get_user_metrics(x: DriftUser, margin_category: MarginCategory):
+ """
+ Returns a dictionary of the user's health, leverage, and other metrics.
+ """
+ NUMBER_OF_SPOT = len(mainnet_spot_market_configs)
+ NUMBER_OF_PERP = len(mainnet_perp_market_configs)
+
+ metrics = {
+ "user_key": x.user_public_key,
+ "leverage": x.get_leverage() / MARGIN_PRECISION,
+ "perp_liability": x.get_total_perp_position_liability(margin_category)
+ / QUOTE_PRECISION,
+ "spot_asset": x.get_spot_market_asset_value(None, margin_category)
+ / QUOTE_PRECISION,
+ "spot_liability": x.get_spot_market_liability_value(None, margin_category)
+ / QUOTE_PRECISION,
+ "upnl": x.get_unrealized_pnl(True) / QUOTE_PRECISION,
+ "net_usd_value": (
+ x.get_net_spot_market_value(None) + x.get_unrealized_pnl(True)
+ )
+ / QUOTE_PRECISION,
+ }
+ metrics["health"] = (
+ get_init_health(x)
+ if margin_category == MarginCategory.INITIAL
+ else x.get_health()
+ )
+ metrics["net_v"] = get_collateral_composition(x, margin_category, NUMBER_OF_SPOT)
+ metrics["net_p"] = get_perp_liab_composition(x, margin_category, NUMBER_OF_PERP)
+
+ return metrics
+
+
+def get_skipped_oracles(cov_matrix: Optional[str]) -> List[str]:
+ """
+ Determine which oracles to skip based on the cov_matrix parameter.
+ """
+ groups = {
+ "sol only": ["SOL"],
+ "sol lst only": ["mSOL", "jitoSOL", "bSOL"],
+ "sol ecosystem only": ["PYTH", "JTO", "WIF", "JUP", "TNSR", "DRIFT"],
+ "meme": ["WIF"],
+ "wrapped only": ["wBTC", "wETH"],
+ "stables only": ["USD"],
+ }
+ if cov_matrix in groups:
+ return [
+ str(x.oracle)
+ for x in mainnet_spot_market_configs
+ if x.symbol not in groups[cov_matrix]
+ ]
+ elif cov_matrix == "ignore stables":
+ return [str(x.oracle) for x in mainnet_spot_market_configs if "USD" in x.symbol]
+ else:
+ return []
+
+
+def calculate_leverages(
+ user_vals: list[DriftUser], maintenance_category: MarginCategory
+):
+ """
+ Calculate the leverages for all users at a given maintenance category
+ """
+ return list(get_user_metrics(x, maintenance_category) for x in user_vals)
+
+
+async def get_usermap_df(
+ _drift_client: DriftClient,
+ user_map: UserMap,
+ mode: str,
+ oracle_distortion: float = 0.1,
+ cov_matrix: Optional[str] = None,
+ n_scenarios: int = 5,
+):
+ user_keys = list(user_map.user_map.keys())
+ user_vals = list(user_map.values())
+
+ skipped_oracles = get_skipped_oracles(cov_matrix)
+
+ if mode == "margins":
+ leverages_none = calculate_leverages(user_vals, None)
+ leverages_initial = calculate_leverages(user_vals, MarginCategory.INITIAL)
+ leverages_maintenance = calculate_leverages(
+ user_vals, MarginCategory.MAINTENANCE
+ )
+ return {
+ "leverages_none": leverages_none,
+ "leverages_initial": leverages_initial,
+ "leverages_maintenance": leverages_maintenance,
+ "user_keys": user_keys,
+ }
+ else:
+ num_entrs = n_scenarios
+ new_oracles_dat_up = []
+ new_oracles_dat_down = []
+
+ for i in range(num_entrs):
+ new_oracles_dat_up.append({})
+ new_oracles_dat_down.append({})
+
+ assert len(new_oracles_dat_down) == num_entrs
+ print("skipped oracles:", skipped_oracles)
+ distorted_oracles = []
+ cache_up = copy.deepcopy(_drift_client.account_subscriber.cache)
+ cache_down = copy.deepcopy(_drift_client.account_subscriber.cache)
+ for i, (key, val) in enumerate(
+ _drift_client.account_subscriber.cache["oracle_price_data"].items()
+ ):
+ for i in range(num_entrs):
+ new_oracles_dat_up[i][key] = copy.deepcopy(val)
+ new_oracles_dat_down[i][key] = copy.deepcopy(val)
+ if cov_matrix is not None and key in skipped_oracles:
+ continue
+ distorted_oracles.append(key)
+ for i in range(num_entrs):
+ oracle_distort_up = max(1 + oracle_distortion * (i + 1), 1)
+ oracle_distort_down = max(1 - oracle_distortion * (i + 1), 0)
+
+ if isinstance(new_oracles_dat_up[i][key], OraclePriceData):
+ new_oracles_dat_up[i][key].price *= oracle_distort_up
+ new_oracles_dat_down[i][key].price *= oracle_distort_down
+ else:
+ new_oracles_dat_up[i][key].data.price *= oracle_distort_up
+ new_oracles_dat_down[i][key].data.price *= oracle_distort_down
+
+ levs_none = calculate_leverages(user_vals, None)
+ levs_up = []
+ levs_down = []
+
+ for i in range(num_entrs):
+ cache_up["oracle_price_data"] = new_oracles_dat_up[i]
+ cache_down["oracle_price_data"] = new_oracles_dat_down[i]
+ levs_up_i = list(get_user_metrics(x, None, cache_up) for x in user_vals)
+ levs_down_i = list(get_user_metrics(x, None, cache_down) for x in user_vals)
+ levs_up.append(levs_up_i)
+ levs_down.append(levs_down_i)
+
+ return {
+ "leverages_none": levs_none,
+ "leverages_up": tuple(levs_up),
+ "leverages_down": tuple(levs_down),
+ "user_keys": user_keys,
+ "distorted_oracles": distorted_oracles,
+ }
diff --git a/backend/utils/vat.py b/backend/utils/vat.py
new file mode 100644
index 0000000..c519773
--- /dev/null
+++ b/backend/utils/vat.py
@@ -0,0 +1,29 @@
+import os
+from typing import Optional
+
+
+def load_newest_files(directory: Optional[str] = None) -> dict[str, str]:
+ directory = directory or os.getcwd()
+
+ newest_files: dict[str, tuple[str, int]] = {}
+
+ prefixes = ["perp", "perporacles", "spot", "spotoracles", "usermap", "userstats"]
+
+ for filename in os.listdir(directory):
+ if filename.endswith(".pkl") and any(
+ filename.startswith(prefix + "_") for prefix in prefixes
+ ):
+ print(filename)
+ start = filename.rindex("_") + 1 # Use rindex to find the last underscore
+ prefix = filename[: start - 1]
+ end = filename.index(".")
+ slot = int(filename[start:end])
+ if not prefix in newest_files or slot > newest_files[prefix][1]:
+ newest_files[prefix] = (directory + "/" + filename, slot)
+
+ # mapping e.g { 'spotoracles' : 'spotoracles_272636137.pkl' }
+ prefix_to_filename = {
+ prefix: filename for prefix, (filename, _) in newest_files.items()
+ }
+
+ return prefix_to_filename
diff --git a/backend/utils/waiting_for.py b/backend/utils/waiting_for.py
new file mode 100644
index 0000000..397d81f
--- /dev/null
+++ b/backend/utils/waiting_for.py
@@ -0,0 +1,50 @@
+from contextlib import contextmanager
+import sys
+import threading
+import time
+from typing import Optional
+
+
+class LiveCounter:
+ def __init__(self, action: str, file: Optional[object] = sys.stdout):
+ self.action = action
+ self.file = file
+ self.is_running = False
+ self.start_time = None
+
+ def run(self):
+ self.start_time = time.time()
+ while self.is_running:
+ elapsed_time = time.time() - self.start_time
+ print(
+ f"\rWaiting for {self.action}... ({elapsed_time:.1f}s)",
+ end="",
+ file=self.file,
+ flush=True,
+ )
+ time.sleep(0.1)
+
+ def start(self):
+ self.is_running = True
+ self.thread = threading.Thread(target=self.run)
+ self.thread.start()
+
+ def stop(self):
+ self.is_running = False
+ self.thread.join()
+ elapsed_time = time.time() - self.start_time
+ print(
+ f"\rWaiting for {self.action}... ok ({elapsed_time:.1f}s)",
+ file=self.file,
+ flush=True,
+ )
+
+
+@contextmanager
+def waiting_for(action: str, file: Optional[object] = sys.stdout):
+ counter = LiveCounter(action, file)
+ try:
+ counter.start()
+ yield
+ finally:
+ counter.stop()
diff --git a/images/drift.svg b/images/drift.svg
new file mode 100644
index 0000000..eba2e26
--- /dev/null
+++ b/images/drift.svg
@@ -0,0 +1,87 @@
+
diff --git a/pickles/.DS_Store b/pickles/.DS_Store
deleted file mode 100644
index 5008ddf..0000000
Binary files a/pickles/.DS_Store and /dev/null differ
diff --git a/pickles/perp_274342434.pkl b/pickles/perp_274342434.pkl
deleted file mode 100644
index b66ed8f..0000000
Binary files a/pickles/perp_274342434.pkl and /dev/null differ
diff --git a/pickles/perporacles_274342436.pkl b/pickles/perporacles_274342436.pkl
deleted file mode 100644
index 10fefd3..0000000
Binary files a/pickles/perporacles_274342436.pkl and /dev/null differ
diff --git a/pickles/spot_274342432.pkl b/pickles/spot_274342432.pkl
deleted file mode 100644
index 036c367..0000000
Binary files a/pickles/spot_274342432.pkl and /dev/null differ
diff --git a/pickles/spotoracles_274342436.pkl b/pickles/spotoracles_274342436.pkl
deleted file mode 100644
index 916fce8..0000000
Binary files a/pickles/spotoracles_274342436.pkl and /dev/null differ
diff --git a/pickles/usermap_274342298.pkl b/pickles/usermap_274342298.pkl
deleted file mode 100644
index 91bea55..0000000
Binary files a/pickles/usermap_274342298.pkl and /dev/null differ
diff --git a/pickles/userstats_274342347.pkl b/pickles/userstats_274342347.pkl
deleted file mode 100644
index cb2c561..0000000
Binary files a/pickles/userstats_274342347.pkl and /dev/null differ
diff --git a/requirements.txt b/requirements.txt
index cf3e579..d51caca 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,6 +4,7 @@ aiosignal==1.3.1
altair==5.3.0
anchorpy==0.20.1
anchorpy-core==0.2.0
+annotated-types==0.7.0
anyio==3.6.2
apischema==0.17.5
async-timeout==4.0.2
@@ -22,9 +23,12 @@ construct==2.10.68
construct-typing==0.5.3
Deprecated==1.2.14
dnspython==2.2.1
-driftpy==0.7.59
+driftpy==0.7.89
Events==0.5
exceptiongroup==1.0.4
+fastapi==0.115.0
+fastapi-restful==0.6.0
+fastapi-utils==0.7.0
flake8==6.0.0
frozenlist==1.3.3
ghp-import==2.1.0
@@ -34,12 +38,13 @@ grpcio==1.64.1
h11==0.14.0
httpcore==0.16.3
httpx==0.23.1
+humanize==4.10.0
idna==3.4
iniconfig==1.1.1
-Jinja2==3.0.3
+Jinja2==3.1.4
jito_searcher_client==0.1.4
jsonalias==0.1.1
-jsonrpcclient==4.0.2
+jsonrpcclient==4.0.3
jsonrpcserver==5.0.9
jsonschema==4.17.3
loguru==0.6.0
@@ -57,7 +62,7 @@ mypy==1.10.0
mypy-extensions==1.0.0
numpy==1.26.4
OSlash==0.6.3
-packaging==22.0
+packaging==23.1
pandas==2.2.2
pathspec==0.12.1
pillow==10.3.0
@@ -71,6 +76,9 @@ pyarrow==16.1.0
pycares==4.3.0
pycodestyle==2.10.0
pycparser==2.21
+pydantic==2.9.2
+pydantic-settings==2.5.2
+pydantic_core==2.23.4
pydeck==0.9.1
pyflakes==3.0.1
Pygments==2.18.0
@@ -78,6 +86,7 @@ pyheck==0.1.5
pyrsistent==0.19.2
pythclient==0.1.4
python-dateutil==2.9.0.post0
+python-dotenv==1.0.0
pytz==2024.1
PyYAML==6.0.1
pyyaml_env_tag==0.1
@@ -90,7 +99,9 @@ smmap==5.0.1
sniffio==1.3.0
solana==0.34.0
solders==0.21.0
-streamlit==1.35.0
+SQLAlchemy==2.0.35
+starlette==0.38.6
+streamlit==1.38.0
sumtypes==0.1a6
tenacity==8.3.0
toml==0.10.2
@@ -100,9 +111,11 @@ tornado==6.4.1
types-cachetools==4.2.10
types-requests==2.31.0.6
types-urllib3==1.26.25.14
+typing-inspect==0.9.0
typing_extensions==4.12.2
tzdata==2024.1
urllib3==1.26.13
+uvicorn==0.31.0
watchdog==4.0.1
websockets==10.4
wrapt==1.16.0
diff --git a/src/lib/api.py b/src/lib/api.py
new file mode 100644
index 0000000..32637a2
--- /dev/null
+++ b/src/lib/api.py
@@ -0,0 +1,54 @@
+import os
+from typing import Optional
+
+from dotenv import load_dotenv
+import pandas as pd
+import requests
+
+
+load_dotenv()
+
+BASE_URL = os.environ["BACKEND_URL"]
+
+
+def api(
+ section: str,
+ path: str,
+ path_extra_1: Optional[str] = None,
+ path_extra_2: Optional[
+ str
+ ] = None, # TODO: this is pretty silly, but it works for now
+ as_json: bool = False,
+ params: Optional[dict] = None,
+):
+ """
+ Fetches data from the backend API. To find the corresponding
+ path, look at the `backend/api/` directory. It should be setup
+ so that the `section` is the name of the file, the `path` is the
+ function inside the file.
+
+ Args:
+ section (str): The section of the API to fetch from.
+ path (str): The path of the API to fetch from.
+ path_extra (Optional[str]): An optional extra path to append to the path.
+ as_json (bool): Whether to return the response as JSON.
+
+ Returns:
+ The response from the API.
+ """
+ if path_extra_1:
+ path = f"{path}/{path_extra_1}"
+ if path_extra_2:
+ path = f"{path}/{path_extra_2}"
+ if params:
+ response = requests.get(f"{BASE_URL}/api/{section}/{path}", params=params)
+ else:
+ response = requests.get(f"{BASE_URL}/api/{section}/{path}")
+
+ if as_json:
+ return response.json()
+
+ try:
+ return pd.DataFrame(response.json())
+ except ValueError:
+ return response.json()
diff --git a/src/lib/page.py b/src/lib/page.py
new file mode 100644
index 0000000..f362073
--- /dev/null
+++ b/src/lib/page.py
@@ -0,0 +1,68 @@
+"""
+Common page functionality
+"""
+
+import asyncio
+from asyncio import AbstractEventLoop
+from datetime import datetime
+import os
+import time
+
+from anchorpy import Wallet
+from driftpy.account_subscription_config import AccountSubscriptionConfig
+from driftpy.drift_client import DriftClient
+import humanize
+from lib.api import api
+from solana.rpc.async_api import AsyncClient
+import streamlit as st
+
+from utils import load_newest_files
+from utils import load_vat
+
+
+RPC_STATE_KEY = "rpc_url"
+NETWORK_STATE_KEY = "network"
+VAT_STATE_KEY = "vat"
+
+
+def header():
+ image_path = os.path.abspath("./images/drift.svg")
+ st.logo(image=image_path)
+
+
+def sidebar():
+ st.sidebar.header("Data Information")
+ try:
+ metadata = api("metadata", "", as_json=True)
+ pickle_file = metadata["pickle_file"]
+ if pickle_file[-1] == "/":
+ pickle_file = pickle_file[:-1]
+ timestamp = pickle_file.split("-")[1:]
+ timestamp = datetime.strptime(" ".join(timestamp), "%Y %m %d %H %M %S")
+ time_ago = datetime.now() - timestamp
+ time_ago_str = humanize.precisedelta(
+ time_ago,
+ minimum_unit="seconds",
+ )
+ st.sidebar.write(f"Last snapshot: {timestamp}")
+ st.sidebar.write(f"Time since last snapshot: {time_ago_str}")
+ except Exception as e:
+ print(e)
+ st.sidebar.error("Unable to reach backend")
+
+
+def needs_backend(page_callable: callable):
+ """
+ Decorator to add a guard to a page function
+ """
+
+ def page_with_guard():
+ try:
+ api("metadata", "", as_json=True)
+ except Exception as e:
+ st.error("Sorry, unable to reach backend")
+ return
+
+ page_callable()
+
+ return page_with_guard
diff --git a/src/lib/user_metrics.py b/src/lib/user_metrics.py
new file mode 100644
index 0000000..f835353
--- /dev/null
+++ b/src/lib/user_metrics.py
@@ -0,0 +1,199 @@
+import copy
+from typing import List, Optional
+
+from driftpy.constants.numeric_constants import MARGIN_PRECISION
+from driftpy.constants.numeric_constants import QUOTE_PRECISION
+from driftpy.constants.perp_markets import mainnet_perp_market_configs
+from driftpy.constants.spot_markets import mainnet_spot_market_configs
+from driftpy.drift_client import DriftClient
+from driftpy.drift_user import DriftUser
+from driftpy.math.margin import MarginCategory
+from driftpy.types import OraclePriceData
+from driftpy.user_map.user_map import UserMap
+
+
+def get_init_health(user: DriftUser):
+ """
+ Returns the initial health of the user.
+ """
+ if user.is_being_liquidated():
+ return 0
+
+ total_collateral = user.get_total_collateral(MarginCategory.INITIAL)
+ maintenance_margin_req = user.get_margin_requirement(MarginCategory.INITIAL)
+
+ if maintenance_margin_req == 0 and total_collateral >= 0:
+ return 100
+ elif total_collateral <= 0:
+ return 0
+ else:
+ return round(
+ min(100, max(0, (1 - maintenance_margin_req / total_collateral) * 100))
+ )
+
+
+def comb_asset_liab(a_l_tup):
+ return a_l_tup[0] - a_l_tup[1]
+
+
+def get_collateral_composition(x: DriftUser, margin_category, n):
+ net_v = {
+ i: comb_asset_liab(
+ x.get_spot_market_asset_and_liability_value(i, margin_category)
+ )
+ / QUOTE_PRECISION
+ for i in range(n)
+ }
+ return net_v
+
+
+def get_perp_liab_composition(x: DriftUser, margin_category, n):
+ net_p = {
+ i: x.get_perp_market_liability(i, margin_category, signed=True)
+ / QUOTE_PRECISION
+ for i in range(n)
+ }
+ return net_p
+
+
+def get_user_metrics(x: DriftUser, margin_category: MarginCategory, all_fields=False):
+ """
+ Returns a dictionary of the user's health, leverage, and other metrics.
+ """
+ NUMBER_OF_SPOT = len(mainnet_spot_market_configs)
+ NUMBER_OF_PERP = len(mainnet_perp_market_configs)
+
+ metrics = {
+ "user_key": x.user_public_key,
+ "leverage": x.get_leverage() / MARGIN_PRECISION,
+ "perp_liability": x.get_total_perp_position_liability(margin_category)
+ / QUOTE_PRECISION,
+ "spot_asset": x.get_spot_market_asset_value(None, margin_category)
+ / QUOTE_PRECISION,
+ "spot_liability": x.get_spot_market_liability_value(None, margin_category)
+ / QUOTE_PRECISION,
+ "upnl": x.get_unrealized_pnl(True) / QUOTE_PRECISION,
+ "net_usd_value": (
+ x.get_net_spot_market_value(None) + x.get_unrealized_pnl(True)
+ )
+ / QUOTE_PRECISION,
+ }
+ metrics["health"] = (
+ get_init_health(x)
+ if margin_category == MarginCategory.INITIAL
+ else x.get_health()
+ )
+
+ if all_fields:
+ metrics["net_v"] = get_collateral_composition(
+ x, margin_category, NUMBER_OF_SPOT
+ )
+ metrics["net_p"] = get_perp_liab_composition(x, margin_category, NUMBER_OF_PERP)
+
+ return metrics
+
+
+def get_skipped_oracles(cov_matrix: Optional[str]) -> List[str]:
+ """
+ Determine which oracles to skip based on the cov_matrix parameter.
+ """
+ groups = {
+ "sol only": ["SOL"],
+ "sol lst only": ["mSOL", "jitoSOL", "bSOL"],
+ "sol ecosystem only": ["PYTH", "JTO", "WIF", "JUP", "TNSR", "DRIFT"],
+ "meme": ["WIF"],
+ "wrapped only": ["wBTC", "wETH"],
+ "stables only": ["USD"],
+ }
+ if cov_matrix in groups:
+ return [
+ str(x.oracle)
+ for x in mainnet_spot_market_configs
+ if x.symbol not in groups[cov_matrix]
+ ]
+ elif cov_matrix == "ignore stables":
+ return [str(x.oracle) for x in mainnet_spot_market_configs if "USD" in x.symbol]
+ else:
+ return []
+
+
+def calculate_leverages(
+ user_vals: list[DriftUser], maintenance_category: MarginCategory
+):
+ """
+ Calculate the leverages for all users at a given maintenance category
+ """
+ return list(get_user_metrics(x, maintenance_category) for x in user_vals)
+
+
+async def get_usermap_df(
+ _drift_client: DriftClient,
+ user_map: UserMap,
+ mode: str,
+ oracle_distortion: float = 0.1,
+ cov_matrix: Optional[str] = None,
+ n_scenarios: int = 5,
+):
+ user_keys = list(user_map.user_map.keys())
+ user_vals = list(user_map.values())
+
+ skipped_oracles = get_skipped_oracles(cov_matrix)
+
+ if mode == "margins":
+ leverages_none = calculate_leverages(user_vals, None)
+ leverages_initial = calculate_leverages(user_vals, MarginCategory.INITIAL)
+ leverages_maintenance = calculate_leverages(
+ user_vals, MarginCategory.MAINTENANCE
+ )
+ return (leverages_none, leverages_initial, leverages_maintenance), user_keys
+ else:
+ num_entrs = n_scenarios
+ new_oracles_dat_up = []
+ new_oracles_dat_down = []
+
+ for i in range(num_entrs):
+ new_oracles_dat_up.append({})
+ new_oracles_dat_down.append({})
+
+ assert len(new_oracles_dat_down) == num_entrs
+ print("skipped oracles:", skipped_oracles)
+ distorted_oracles = []
+ cache_up = copy.deepcopy(_drift_client.account_subscriber.cache)
+ cache_down = copy.deepcopy(_drift_client.account_subscriber.cache)
+ for i, (key, val) in enumerate(
+ _drift_client.account_subscriber.cache["oracle_price_data"].items()
+ ):
+ for i in range(num_entrs):
+ new_oracles_dat_up[i][key] = copy.deepcopy(val)
+ new_oracles_dat_down[i][key] = copy.deepcopy(val)
+ if cov_matrix is not None and key in skipped_oracles:
+ continue
+ distorted_oracles.append(key)
+ for i in range(num_entrs):
+ oracle_distort_up = max(1 + oracle_distortion * (i + 1), 1)
+ oracle_distort_down = max(1 - oracle_distortion * (i + 1), 0)
+
+ if isinstance(new_oracles_dat_up[i][key], OraclePriceData):
+ new_oracles_dat_up[i][key].price *= oracle_distort_up
+ new_oracles_dat_down[i][key].price *= oracle_distort_down
+ else:
+ new_oracles_dat_up[i][key].data.price *= oracle_distort_up
+ new_oracles_dat_down[i][key].data.price *= oracle_distort_down
+
+ levs_none = calculate_leverages(user_vals, None)
+ levs_up = []
+ levs_down = []
+
+ for i in range(num_entrs):
+ cache_up["oracle_price_data"] = new_oracles_dat_up[i]
+ cache_down["oracle_price_data"] = new_oracles_dat_down[i]
+ levs_up_i = list(get_user_metrics(x, None, cache_up) for x in user_vals)
+ levs_down_i = list(get_user_metrics(x, None, cache_down) for x in user_vals)
+ levs_up.append(levs_up_i)
+ levs_down.append(levs_down_i)
+
+ return (
+ (levs_none, tuple(levs_up), tuple(levs_down)),
+ user_keys,
+ distorted_oracles,
+ )
diff --git a/src/lib/vat.py b/src/lib/vat.py
new file mode 100644
index 0000000..cdd2b71
--- /dev/null
+++ b/src/lib/vat.py
@@ -0,0 +1,29 @@
+"""
+VAT (Virtual Account Table) Loading
+"""
+
+import asyncio
+import os
+import time
+
+import streamlit as st
+from driftpy.drift_client import DriftClient
+
+from utils import load_newest_files, load_vat
+
+
+@st.cache_data()
+def cached_load_vat(dc: DriftClient):
+ loop = asyncio.new_event_loop()
+ asyncio.set_event_loop(loop)
+ newest_snapshot = load_newest_files(os.getcwd() + "/pickles")
+ vat = loop.run_until_complete(load_vat(dc, newest_snapshot))
+ loop.close()
+ return vat
+
+
+def get_vat(dc: DriftClient):
+ start_load_vat = time.time()
+ vat = cached_load_vat(dc)
+ print(f"loaded vat in {time.time() - start_load_vat}")
+ return vat
diff --git a/src/main.py b/src/main.py
index 3957605..69d1572 100644
--- a/src/main.py
+++ b/src/main.py
@@ -1,136 +1,73 @@
-import asyncio
-import heapq
-import time
import os
-from asyncio import AbstractEventLoop
-import plotly.express as px # type: ignore
-import pandas as pd # type: ignore
-
-from typing import Any
-
-from solana.rpc.async_api import AsyncClient
-
-from anchorpy import Wallet
-
-import streamlit as st
-from driftpy.drift_user import DriftUser
-from driftpy.drift_client import DriftClient
-from driftpy.account_subscription_config import AccountSubscriptionConfig
-from driftpy.constants.numeric_constants import (
- BASE_PRECISION,
- SPOT_BALANCE_PRECISION,
- PRICE_PRECISION,
-)
-from driftpy.types import is_variant
-from driftpy.pickle.vat import Vat
-from driftpy.constants.spot_markets import mainnet_spot_market_configs, devnet_spot_market_configs
-from driftpy.constants.perp_markets import mainnet_perp_market_configs, devnet_perp_market_configs
-
-from utils import load_newest_files, load_vat, to_financial
-from sections.asset_liab_matrix import asset_liab_matrix_page
-from sections.ob import ob_cmp_page
-from sections.scenario import plot_page
+from dotenv import load_dotenv
+from lib.page import header
+from lib.page import needs_backend
+from lib.page import sidebar
+from page.asset_liability import asset_liab_matrix_page
+from page.backend import backend_page
+from page.health import health_page
+from page.orderbook import orderbook_page
+from page.price_shock import price_shock_page
from sections.liquidation_curves import plot_liquidation_curve
+from sections.welcome import welcome_page
+import streamlit as st
-from health_utils import *
-
-@st.cache(allow_output_mutation=True)
-def cached_load_vat(dc: DriftClient):
- loop = asyncio.new_event_loop()
- asyncio.set_event_loop(loop)
- newest_snapshot = load_newest_files(os.getcwd() + "/pickles")
- vat = loop.run_until_complete(load_vat(dc, newest_snapshot))
- loop.close()
- return vat
-def get_vat(dc: DriftClient):
- start_load_vat = time.time()
- vat = cached_load_vat(dc)
- print(f"loaded vat in {time.time() - start_load_vat}")
- return vat
+load_dotenv()
-def main():
+if __name__ == "__main__":
st.set_page_config(layout="wide")
-
- url = os.getenv("RPC_URL", "๐คซ")
- env = st.sidebar.radio('env', ('mainnet-beta', 'devnet'))
- rpc = st.sidebar.text_input("RPC URL", value=url)
- if env == 'mainnet-beta' and (rpc == '๐คซ' or rpc == ''):
- rpc = os.environ['ANCHOR_PROVIDER_URL']
-
- query_index = 0
- def query_string_callback():
- st.query_params['tab'] = st.session_state.query_key
- query_tab = st.query_params.get('tab', ['Welcome'])[0]
- tab_options = ('Welcome', 'Health', 'Price-Shock', 'Asset-Liab-Matrix', 'Orderbook', 'Liquidations')
- for idx, x in enumerate(tab_options):
- if x.lower() == query_tab.lower():
- query_index = idx
-
- tab = st.sidebar.radio(
- "Select Tab:",
- tab_options,
- query_index,
- on_change=query_string_callback,
- key='query_key'
+ header()
+ sidebar()
+
+ pages = [
+ st.Page(
+ welcome_page,
+ url_path="welcome",
+ title="Welcome",
+ icon=":material/home:",
+ ),
+ st.Page(
+ orderbook_page,
+ url_path="orderbook",
+ title="Orderbook",
+ icon="๐",
+ ),
+ st.Page(
+ needs_backend(health_page),
+ url_path="health",
+ title="Health",
+ icon="๐ฅ",
+ ),
+ st.Page(
+ needs_backend(price_shock_page),
+ url_path="price-shock",
+ title="Price Shock",
+ icon="๐ธ",
+ ),
+ st.Page(
+ needs_backend(asset_liab_matrix_page),
+ url_path="asset-liab-matrix",
+ title="Asset-Liab Matrix",
+ icon="๐",
+ ),
+ st.Page(
+ needs_backend(plot_liquidation_curve),
+ url_path="liquidation-curves",
+ title="Liquidation Curves",
+ icon="๐",
+ ),
+ ]
+ if os.getenv("DEV"):
+ pages.append(
+ st.Page(
+ needs_backend(backend_page),
+ url_path="backend",
+ title="Control Backend",
+ icon="๐งช",
+ )
)
- if rpc == "๐คซ" or rpc == "":
- st.warning("Please enter a Solana RPC URL")
- else:
- drift_client = DriftClient(
- AsyncClient(rpc),
- Wallet.dummy(),
- account_subscription=AccountSubscriptionConfig("cached"),
- )
-
- loop: AbstractEventLoop = asyncio.new_event_loop()
- if tab.lower() in ['health', 'price-shock', 'asset-liab-matrix', 'liquidations'] and 'vat' not in st.session_state:
- # start_sub = time.time()
- # loop.run_until_complete(dc.subscribe())
- # print(f"subscribed in {time.time() - start_sub}")
-
- newest_snapshot = load_newest_files(os.getcwd() + "/pickles")
-
- start_load_vat = time.time()
- vat = loop.run_until_complete(load_vat(drift_client, newest_snapshot))
- st.session_state["vat"] = vat
- print(f"loaded vat in {time.time() - start_load_vat}")
- elif tab.lower() in ['health', 'price-shock', 'asset-liab-matrix', 'liquidations']:
- vat = st.session_state["vat"]
-
- if tab.lower() == 'health':
- health_distribution = get_account_health_distribution(vat)
-
- with st.container():
- st.plotly_chart(health_distribution, use_container_width=True)
-
- perp_col, spot_col = st.columns([1, 1])
-
- with perp_col:
- largest_perp_positions = get_largest_perp_positions(vat)
- st.markdown("### **Largest perp positions:**")
- st.table(largest_perp_positions)
- most_levered_positions = get_most_levered_perp_positions_above_1m(vat)
- st.markdown("### **Most levered perp positions > $1m:**")
- st.table(most_levered_positions)
-
- with spot_col:
- largest_spot_borrows = get_largest_spot_borrows(vat)
- st.markdown("### **Largest spot borrows:**")
- st.table(largest_spot_borrows)
- most_levered_borrows = get_most_levered_spot_borrows_above_1m(vat)
- st.markdown("### **Most levered spot borrows > $750k:**")
- st.table(most_levered_borrows)
-
- elif tab.lower() == 'price-shock':
- plot_page(loop, vat, drift_client)
- elif tab.lower() == 'asset-liab-matrix':
- asset_liab_matrix_page(loop, vat, drift_client)
- elif tab.lower() == 'orderbook':
- ob_cmp_page()
- elif tab.lower() == 'liquidations':
- plot_liquidation_curve(vat)
-
-main()
+ pg = st.navigation(pages)
+ pg.run()
diff --git a/src/page/asset_liability.py b/src/page/asset_liability.py
new file mode 100644
index 0000000..15ffed8
--- /dev/null
+++ b/src/page/asset_liability.py
@@ -0,0 +1,51 @@
+from driftpy.constants.perp_markets import mainnet_perp_market_configs
+from driftpy.constants.spot_markets import mainnet_spot_market_configs
+from lib.api import api
+import pandas as pd
+import streamlit as st
+
+
+options = [0, 1, 2, 3]
+labels = [
+ "none",
+ "liq within 50% of oracle",
+ "maint. health < 10%",
+ "init. health < 10%",
+]
+
+
+def asset_liab_matrix_page():
+ mode = st.selectbox("Options", options, format_func=lambda x: labels[x])
+ if mode is None:
+ mode = 0
+
+ perp_market_index = st.selectbox(
+ "Market index", [x.market_index for x in mainnet_perp_market_configs]
+ )
+ if perp_market_index is None:
+ perp_market_index = 0
+
+ result = api(
+ "asset-liability",
+ "matrix",
+ "0" if mode is None else str(mode),
+ "0" if perp_market_index is None else str(perp_market_index),
+ as_json=True,
+ )
+ res = pd.DataFrame(result["res"])
+ df = pd.DataFrame(result["df"])
+
+ st.write(f"{df.shape[0]} users for scenario")
+ st.write(res)
+
+ tabs = st.tabs(["FULL"] + [x.symbol for x in mainnet_spot_market_configs])
+ tabs[0].dataframe(df, hide_index=True)
+
+ for idx, tab in enumerate(tabs[1:]):
+ important_cols = [x for x in df.columns if "spot_" + str(idx) in x]
+ toshow = df[["spot_asset", "net_usd_value"] + important_cols]
+ toshow = toshow[toshow[important_cols].abs().sum(axis=1) != 0].sort_values(
+ by="spot_" + str(idx) + "_all", ascending=False
+ )
+ tab.write(f"{ len(toshow)} users with this asset to cover liabilities")
+ tab.dataframe(toshow, hide_index=True)
diff --git a/src/page/backend.py b/src/page/backend.py
new file mode 100644
index 0000000..167014d
--- /dev/null
+++ b/src/page/backend.py
@@ -0,0 +1,21 @@
+from lib.api import api
+import streamlit as st
+
+
+def backend_page():
+
+ if st.button("Load Pickle"):
+ result = api("snapshot", "pickle", as_json=True)
+ st.write(result)
+
+ st.write(
+ """
+ ## Backend API
+
+ - [swagger](http://localhost:8000/docs)
+
+ - [redoc](http://localhost:8000/redoc)
+ """
+ )
+
+ st.title("Health")
diff --git a/src/page/health.py b/src/page/health.py
new file mode 100644
index 0000000..0860d6a
--- /dev/null
+++ b/src/page/health.py
@@ -0,0 +1,41 @@
+from lib.api import api
+import plotly.express as px
+import streamlit as st
+
+
+def health_page():
+ health_distribution = api("health", "health_distribution")
+ largest_perp_positions = api("health", "largest_perp_positions")
+ most_levered_positions = api("health", "most_levered_perp_positions_above_1m")
+ largest_spot_borrows = api("health", "largest_spot_borrows")
+ most_levered_borrows = api("health", "most_levered_spot_borrows_above_1m")
+
+ fig = px.bar(
+ health_distribution,
+ x="Health Range",
+ y="Counts",
+ title="Health Distribution",
+ hover_data={"Notional Values": ":,"}, # Custom format for notional values
+ labels={"Counts": "Num Users", "Notional Values": "Notional Value ($)"},
+ )
+
+ fig.update_traces(
+ hovertemplate="Health Range: %{x}
Count: %{y}
Notional Value: $%{customdata[0]:,.0f}"
+ )
+
+ with st.container():
+ st.plotly_chart(fig, use_container_width=True)
+
+ perp_col, spot_col = st.columns([1, 1])
+
+ with perp_col:
+ st.markdown("### **Largest perp positions:**")
+ st.dataframe(largest_perp_positions, hide_index=True)
+ st.markdown("### **Most levered perp positions > $1m:**")
+ st.dataframe(most_levered_positions, hide_index=True)
+
+ with spot_col:
+ st.markdown("### **Largest spot borrows:**")
+ st.dataframe(largest_spot_borrows, hide_index=True)
+ st.markdown("### **Most levered spot borrows > $750k:**")
+ st.dataframe(most_levered_borrows, hide_index=True)
diff --git a/src/page/orderbook.py b/src/page/orderbook.py
new file mode 100644
index 0000000..6b5fb68
--- /dev/null
+++ b/src/page/orderbook.py
@@ -0,0 +1,141 @@
+import requests
+import streamlit as st
+
+from lib.page import RPC_STATE_KEY
+
+
+def fetch_orderbook_data(coin, size):
+ post_url = "https://api.hyperliquid.xyz/info"
+ payload = {"type": "metaAndAssetCtxs"}
+ payload2 = {"type": "l2Book", "coin": coin}
+
+ post_headers = {"Content-Type": "application/json"}
+
+ results = {}
+
+ for nom, pay in [("hl_cxt", payload), ("hl_book", payload2)]:
+ post_response = requests.post(post_url, json=pay, headers=post_headers)
+ if post_response.status_code == 200:
+ results[nom] = post_response.json()
+ else:
+ print("Error:", post_response.text, "\n")
+
+ get_url = "https://dlob.drift.trade/l2"
+ get_params = {
+ "marketName": coin + "-PERP",
+ "depth": 5,
+ "includeOracle": "true",
+ "includeVamm": "true",
+ }
+
+ get_response = requests.get(get_url, params=get_params)
+ if not get_response.status_code == 200:
+ print("Error:", get_response.text)
+
+ results["dr_book"] = get_response.json()
+
+ def calculate_average_fill_price_dr(order_book, volume):
+ volume = volume
+
+ bids = order_book["bids"]
+ asks = order_book["asks"]
+
+ print(f'{float(bids[0]["price"])/1e6}/{float(asks[0]["price"])/1e6}')
+
+ def average_price(levels, volume, is_buy):
+ total_volume = 0
+ total_cost = 0.0
+
+ for level in levels:
+ # Price is in 1e6 precision, size is in 1e9 precision
+ price = float(level["price"]) / 1e6
+ size = float(level["size"]) / 1e9
+
+ if total_volume + size >= volume:
+ # Only take the remaining required volume at this level
+ remaining_volume = volume - total_volume
+ total_cost += remaining_volume * price
+ total_volume += remaining_volume
+ break
+ else:
+ # Take the whole size at this level
+ total_cost += size * price
+ total_volume += size
+
+ if total_volume < volume:
+ raise ValueError(
+ "Insufficient volume in the order book to fill the order"
+ )
+
+ return total_cost / volume
+
+ try:
+ buy_price = average_price(asks, volume, is_buy=True)
+ sell_price = average_price(bids, volume, is_buy=False)
+ except ValueError as e:
+ return str(e)
+
+ return {"average_buy_price": buy_price, "average_sell_price": sell_price}
+
+ def calculate_average_fill_price_hl(order_book, volume):
+ buy_levels = order_book["levels"][1] # Bids (lower prices first)
+ sell_levels = order_book["levels"][0] # Asks (higher prices first)
+
+ def average_price(levels, volume):
+ total_volume = 0
+ total_cost = 0.0
+
+ for level in levels:
+ px = float(level["px"])
+ sz = float(level["sz"])
+
+ if total_volume + sz >= volume:
+ # Only take the remaining required volume at this level
+ remaining_volume = volume - total_volume
+ total_cost += remaining_volume * px
+ total_volume += remaining_volume
+ break
+ else:
+ # Take the whole size at this level
+ total_cost += sz * px
+ total_volume += sz
+
+ if total_volume < volume:
+ raise ValueError(
+ "Insufficient volume in the order book to fill the order"
+ )
+
+ return total_cost / volume
+
+ try:
+ buy_price = average_price(buy_levels, volume)
+ sell_price = average_price(sell_levels, volume)
+ except ValueError as e:
+ return str(e)
+
+ return {"average_buy_price": buy_price, "average_sell_price": sell_price}
+
+ r = calculate_average_fill_price_hl(results["hl_book"], size)
+ d = calculate_average_fill_price_dr(results["dr_book"], size)
+ return (r, d, results["dr_book"]["oracle"] / 1e6, results["hl_cxt"])
+
+
+def orderbook_page():
+ if st.button("Refresh"):
+ st.cache_data.clear()
+ s1, s2 = st.columns(2)
+
+ coin = s1.selectbox("coin:", ["SOL", "BTC", "ETH"])
+ size = s2.number_input("size:", min_value=0.1, value=1.0, help="in base units")
+ hl, dr, dr_oracle, hl_ctx = fetch_orderbook_data(coin, size)
+
+ uni_id = [i for (i, x) in enumerate(hl_ctx[0]["universe"]) if coin == x["name"]]
+
+ o1, o2 = st.columns(2)
+ o1.header("hyperliquid")
+ o1.write(float(hl_ctx[1][uni_id[0]]["oraclePx"]))
+ o1.write(hl)
+
+ o2.header("drift")
+ o2.write(dr_oracle)
+ o2.write(dr)
diff --git a/src/page/price_shock.py b/src/page/price_shock.py
new file mode 100644
index 0000000..920ba85
--- /dev/null
+++ b/src/page/price_shock.py
@@ -0,0 +1,110 @@
+import asyncio
+from asyncio import AbstractEventLoop
+import os
+import time
+from typing import Any
+
+from anchorpy import Wallet
+from driftpy.account_subscription_config import AccountSubscriptionConfig
+from driftpy.drift_client import DriftClient
+from driftpy.pickle.vat import Vat
+from lib.api import api
+from lib.page import RPC_STATE_KEY
+from lib.page import VAT_STATE_KEY
+from lib.user_metrics import get_usermap_df
+import pandas as pd
+from solana.rpc.async_api import AsyncClient
+import streamlit as st
+
+
+def price_shock_plot(price_scenario_users: list[Any], oracle_distort: float):
+ levs = price_scenario_users
+ dfs = (
+ [pd.DataFrame(levs[2][i]) for i in range(len(levs[2]))]
+ + [pd.DataFrame(levs[0])]
+ + [pd.DataFrame(levs[1][i]) for i in range(len(levs[1]))]
+ )
+
+ st.write(dfs)
+
+ spot_bankrs = []
+ for df in dfs:
+ spot_b_t1 = df[
+ (df["spot_asset"] < df["spot_liability"]) & (df["net_usd_value"] < 0)
+ ]
+ spot_bankrs.append(
+ (spot_b_t1["spot_liability"] - spot_b_t1["spot_asset"]).sum()
+ )
+
+ xdf = [
+ [-df[df["net_usd_value"] < 0]["net_usd_value"].sum() for df in dfs],
+ spot_bankrs,
+ ]
+ toplt_fig = pd.DataFrame(
+ xdf,
+ index=["bankruptcy", "spot bankrupt"],
+ columns=[oracle_distort * (i + 1) * -100 for i in range(len(levs[2]))]
+ + [0]
+ + [oracle_distort * (i + 1) * 100 for i in range(len(levs[1]))],
+ ).T
+ toplt_fig["perp bankrupt"] = toplt_fig["bankruptcy"] - toplt_fig["spot bankrupt"]
+ toplt_fig = toplt_fig.sort_index()
+ toplt_fig = toplt_fig.plot()
+
+ toplt_fig.update_layout(
+ title="Bankruptcies in crypto price scenarios",
+ xaxis_title="Oracle Move (%)",
+ yaxis_title="Bankruptcy ($)",
+ )
+ st.plotly_chart(toplt_fig)
+
+
+def price_shock_page():
+ cov_col, distort_col = st.columns(2)
+ cov = cov_col.selectbox(
+ "covariance:",
+ [
+ "ignore stables",
+ "sol + lst only",
+ "meme",
+ ],
+ index=0,
+ )
+
+ oracle_distort = distort_col.selectbox(
+ "oracle distortion:",
+ [0.05, 0.1, 0.2, 0.5, 1],
+ index=0,
+ help="step size of oracle distortions",
+ )
+
+ result = api(
+ "price-shock",
+ "usermap",
+ params={
+ "asset_group": cov,
+ "oracle_distortion": oracle_distort,
+ "n_scenarios": 5,
+ },
+ as_json=True,
+ )
+ st.write(result)
+
+ # price_shock_plot(price_scenario_users, oracle_distort)
+
+ # oracle_down_max = pd.DataFrame(price_scenario_users[-1][-1], index=user_keys)
+ # with st.expander(
+ # str("oracle down max bankrupt count=")
+ # + str(len(oracle_down_max[oracle_down_max.net_usd_value < 0]))
+ # ):
+ # st.dataframe(oracle_down_max)
+
+ # oracle_up_max = pd.DataFrame(price_scenario_users[1][-1], index=user_keys)
+ # with st.expander(
+ # str("oracle up max bankrupt count=")
+ # + str(len(oracle_up_max[oracle_up_max.net_usd_value < 0]))
+ # ):
+ # st.dataframe(oracle_up_max)
+
+ # with st.expander("distorted oracle keys"):
+ # st.write(distorted_oracles)
diff --git a/src/scenario.py b/src/scenario.py
deleted file mode 100644
index 8054eb8..0000000
--- a/src/scenario.py
+++ /dev/null
@@ -1,207 +0,0 @@
-import sys
-from tokenize import tabsize
-import driftpy
-import pandas as pd
-import numpy as np
-import copy
-import plotly.express as px
-pd.options.plotting.backend = "plotly"
-# from datafetch.transaction_fetch import load_token_balance
-# from driftpy.constants.config import configs
-from anchorpy import Provider, Wallet, AccountClient
-from solders.keypair import Keypair
-from solana.rpc.async_api import AsyncClient
-from solana.rpc.types import MemcmpOpts
-from driftpy.drift_client import DriftClient
-from driftpy.accounts import get_perp_market_account, get_spot_market_account, get_user_account, get_state_account
-from driftpy.constants.numeric_constants import *
-from driftpy.drift_user import DriftUser, get_token_amount
-# from datafetch.transaction_fetch import transaction_history_for_account, load_token_balance
-import pickle
-
-from driftpy.constants.perp_markets import mainnet_perp_market_configs
-from driftpy.constants.spot_markets import mainnet_spot_market_configs
-
-import os
-import json
-import streamlit as st
-from driftpy.constants.spot_markets import devnet_spot_market_configs, SpotMarketConfig
-from driftpy.constants.perp_markets import devnet_perp_market_configs, PerpMarketConfig
-from dataclasses import dataclass
-from solders.pubkey import Pubkey
-# from helpers import serialize_perp_market_2, serialize_spot_market, all_user_stats, DRIFT_WHALE_LIST_SNAP
-from anchorpy import EventParser
-import asyncio
-from driftpy.math.margin import MarginCategory
-import requests
-from driftpy.types import InsuranceFundStakeAccount, SpotMarketAccount, OraclePriceData
-from driftpy.addresses import *
-import time
-from driftpy.market_map.market_map_config import WebsocketConfig
-from driftpy.user_map.user_map import UserMap, UserMapConfig, PollingConfig
-import datetime
-import csv
-from utils import get_init_health
-
-NUMBER_OF_SPOT = 20
-NUMBER_OF_PERP = 33
-
-def comb_asset_liab(a_l_tup):
- return a_l_tup[0] - a_l_tup[1]
-
-def get_collateral_composition(x: DriftUser, margin_category, n):
- # ua = x.get_user_account()
- net_v = {i: comb_asset_liab(x.get_spot_market_asset_and_liability_value(i, margin_category))/QUOTE_PRECISION for i in range(n)}
- return net_v
-
-def get_perp_liab_composition(x: DriftUser, margin_category, n):
- # ua = x.get_user_account()
- net_p = {i: x.get_perp_market_liability(i, margin_category, signed=True)/QUOTE_PRECISION for i in range(n)}
- return net_p
-
-def get_perp_lp_share_composition(x: DriftUser, n):
- # ua = x.get_user_account()
- def get_lp_shares(x, i):
- res = x.get_perp_position(i)
- if res is not None:
- return res.lp_shares/1e9
- else:
- return 0
- net_p = {i: get_lp_shares(x, i) for i in range(n)}
- return net_p
-
-async def get_usermap_df(_drift_client: DriftClient, user_map: UserMap, mode: str, oracle_distor=.1,
- only_one_index=None, cov_matrix=None, n_scenarios=5, all_fields=False):
- perp_n = NUMBER_OF_PERP
- spot_n = NUMBER_OF_SPOT
-
- def do_dict(x: DriftUser, margin_category: MarginCategory, oracle_cache=None):
- if oracle_cache is not None:
- x.drift_client.account_subscriber.cache = oracle_cache
-
- if margin_category == MarginCategory.INITIAL:
- health_func = lambda x: get_init_health(x)
- else:
- health_func = lambda x: x.get_health()
-
- # user_account = x.get_user_account()
- levs0 = {
- # 'tokens': [x.get_token_amount(i) for i in range(spot_n)],
- 'user_key': x.user_public_key,
- 'leverage': x.get_leverage() / MARGIN_PRECISION,
- 'health': health_func(x),
- 'perp_liability': x.get_perp_market_liability(None, margin_category) / QUOTE_PRECISION,
- 'spot_asset': x.get_spot_market_asset_value(None, margin_category) / QUOTE_PRECISION,
- 'spot_liability': x.get_spot_market_liability_value(None, margin_category) / QUOTE_PRECISION,
- 'upnl': x.get_unrealized_pnl(True) / QUOTE_PRECISION,
- # 'funding_upnl': x.get_unrealized_funding_pnl() / QUOTE_PRECISION,
- # 'total_collateral': x.get_total_collateral(margin_category or MarginCategory.INITIAL) / QUOTE_PRECISION,
- # 'margin_req': x.get_margin_requirement(margin_category or MarginCategory.INITIAL) / QUOTE_PRECISION,
- # 'net_v': get_collateral_composition(x, margin_category, spot_n),
- # 'net_p': get_perp_liab_composition(x, margin_category, perp_n),
- # 'net_lp': get_perp_lp_share_composition(x, perp_n),
- # 'last_active_slot': user_account.last_active_slot,
- # 'cumulative_perp_funding': user_account.cumulative_perp_funding/QUOTE_PRECISION,
- # 'settled_perp_pnl': user_account.settled_perp_pnl/QUOTE_PRECISION,
- # 'name': bytes(user_account.name).decode('utf-8', errors='ignore').strip(),
- # 'authority': str(user_account.authority),
- # 'has_open_order': user_account.has_open_order,
- # 'sub_account_id': user_account.sub_account_id,
- # 'next_liquidation_id': user_account.next_liquidation_id,
- # 'cumulative_spot_fees': user_account.cumulative_spot_fees,
- # 'total_deposits': user_account.total_deposits,
- # 'total_withdraws': user_account.total_withdraws,
- # 'total_social_loss': user_account.total_social_loss,
- # 'unsettled_pnl_perp_x': x.get_unrealized_pnl(True, market_index=24) / QUOTE_PRECISION,
- }
- levs0['net_usd_value'] = levs0['spot_asset'] + levs0['upnl'] - levs0['spot_liability']
-
- if all_fields:
- levs0['net_v'] = get_collateral_composition(x, margin_category, spot_n)
- levs0['net_p'] = get_perp_liab_composition(x, margin_category, spot_n)
-
- return levs0
- user_map_result: UserMap = user_map
-
- user_keys = list(user_map_result.user_map.keys())
- user_vals = list(user_map_result.values())
- if cov_matrix == 'ignore stables':
- skipped_oracles = [str(x.oracle) for x in mainnet_spot_market_configs if 'USD' in x.symbol]
- elif cov_matrix == 'sol + lst only':
- skipped_oracles = [str(x.oracle) for x in mainnet_spot_market_configs if 'SOL' not in x.symbol]
- elif cov_matrix == 'sol lst only':
- skipped_oracles = [str(x.oracle) for x in mainnet_spot_market_configs if x.symbol not in ['mSOL', 'jitoSOL', 'bSOL']]
- elif cov_matrix == 'sol ecosystem only':
- skipped_oracles = [str(x.oracle) for x in mainnet_spot_market_configs if x.symbol not in ['PYTH', 'JTO', 'WIF', 'JUP', 'TNSR', 'DRIFT']]
- elif cov_matrix == 'meme':
- skipped_oracles = [str(x.oracle) for x in mainnet_spot_market_configs if x.symbol not in ['WIF']]
- elif cov_matrix == 'wrapped only':
- skipped_oracles = [str(x.oracle) for x in mainnet_spot_market_configs if x.symbol not in ['wBTC', 'wETH']]
- elif cov_matrix == 'stables only':
- skipped_oracles = [str(x.oracle) for x in mainnet_spot_market_configs if 'USD' not in x.symbol]
-
- if only_one_index is None or len(only_one_index) > 12:
- only_one_index_key = only_one_index
- else:
- only_one_index_key = ([str(x.oracle) for x in mainnet_perp_market_configs if x.base_asset_symbol == only_one_index] \
- +[str(x.oracle) for x in mainnet_spot_market_configs if x.symbol == only_one_index])[0]
-
- if mode == 'margins':
- levs_none = list(do_dict(x, None) for x in user_vals)
- levs_init = list(do_dict(x, MarginCategory.INITIAL) for x in user_vals)
- levs_maint = list(do_dict(x, MarginCategory.MAINTENANCE) for x in user_vals)
- return (levs_none, levs_init, levs_maint), user_keys
- else:
- num_entrs = n_scenarios # increment to get more steps
- new_oracles_dat_up = []
- new_oracles_dat_down = []
-
- for i in range(num_entrs):
- new_oracles_dat_up.append({})
- new_oracles_dat_down.append({})
-
-
- assert(len(new_oracles_dat_down) == num_entrs)
- print('skipped oracles:', skipped_oracles)
- distorted_oracles = []
- cache_up = copy.deepcopy(_drift_client.account_subscriber.cache)
- cache_down = copy.deepcopy(_drift_client.account_subscriber.cache)
- for i,(key, val) in enumerate(_drift_client.account_subscriber.cache['oracle_price_data'].items()):
- for i in range(num_entrs):
- new_oracles_dat_up[i][key] = copy.deepcopy(val)
- new_oracles_dat_down[i][key] = copy.deepcopy(val)
- if cov_matrix is not None and key in skipped_oracles:
- continue
- if only_one_index is None or only_one_index_key == key:
- distorted_oracles.append(key)
- for i in range(num_entrs):
- oracle_distort_up = max(1 + oracle_distor * (i+1), 1)
- oracle_distort_down = max(1 - oracle_distor * (i+1), 0)
-
- # weird pickle artifact
- if isinstance(new_oracles_dat_up[i][key], OraclePriceData):
- new_oracles_dat_up[i][key].price *= oracle_distort_up
- new_oracles_dat_down[i][key].price *= oracle_distort_down
- else:
- new_oracles_dat_up[i][key].data.price *= oracle_distort_up
- new_oracles_dat_down[i][key].data.price *= oracle_distort_down
-
- levs_none = list(do_dict(x, None, None) for x in user_vals)
- levs_up = []
- levs_down = []
-
- for i in range(num_entrs):
- cache_up['oracle_price_data'] = new_oracles_dat_up[i]
- cache_down['oracle_price_data'] = new_oracles_dat_down[i]
- levs_up_i = list(do_dict(x, None, cache_up) for x in user_vals)
- levs_down_i = list(do_dict(x, None, cache_down) for x in user_vals)
- levs_up.append(levs_up_i)
- levs_down.append(levs_down_i)
-
- return (levs_none, tuple(levs_up), tuple(levs_down)), user_keys, distorted_oracles
-
-
-
-async def get_new_ff(usermap):
- await usermap.sync()
- usermap.dump()
\ No newline at end of file
diff --git a/src/sections/asset_liab_matrix.py b/src/sections/asset_liab_matrix.py
deleted file mode 100644
index 9bc16b4..0000000
--- a/src/sections/asset_liab_matrix.py
+++ /dev/null
@@ -1,145 +0,0 @@
-from asyncio import AbstractEventLoop
-import pandas as pd # type: ignore
-
-import streamlit as st
-from driftpy.drift_client import DriftClient
-from driftpy.pickle.vat import Vat
-
-from driftpy.constants.spot_markets import mainnet_spot_market_configs
-from driftpy.constants.perp_markets import mainnet_perp_market_configs
-
-from scenario import get_usermap_df
-
-options = [0, 1, 2, 3]
-labels = ["none", "liq within 50% of oracle", "maint. health < 10%", "init. health < 10%"]
-
-def get_matrix(loop: AbstractEventLoop, vat: Vat, drift_client: DriftClient, env='mainnet', mode=0, perp_market_inspect=0):
- NUMBER_OF_SPOT = 20
- NUMBER_OF_PERP = 33
-
- oracle_distort = 0
- if "margin" not in st.session_state:
- (levs_none, levs_init, levs_maint), user_keys = loop.run_until_complete(get_usermap_df(drift_client, vat.users,
- 'margins', oracle_distort,
- None, 'ignore stables', n_scenarios=0, all_fields=True))
- levs_maint = [x for x in levs_maint if int(x['health']) <= 10]
- levs_init = [x for x in levs_init if int(x['health']) <= 10]
- st.session_state["margin"] = (levs_none, levs_init, levs_maint), user_keys
- else:
- (levs_none, levs_init, levs_maint), user_keys = st.session_state["margin"]
-
- df: pd.DataFrame
- match mode:
- case 0: # nothing
- df = pd.DataFrame(levs_none, index=user_keys)
- case 1: # liq within 50% of oracle
- df = pd.DataFrame(levs_none, index=user_keys)
- case 2: # maint. health < 10%
- user_keys = [x['user_key'] for x in levs_init]
- df = pd.DataFrame(levs_init, index=user_keys)
- case 3: # init. health < 10%
- user_keys = [x['user_key'] for x in levs_maint]
- df = pd.DataFrame(levs_maint, index=user_keys)
-
- def get_rattt(row):
- calculations = [
- ('all_assets', lambda v: v if v > 0 else 0), # Simplified from v / row['spot_asset'] * row['spot_asset']
- ('all', lambda v: v / row['spot_asset'] * (row['perp_liability'] + row['spot_liability']) if v > 0 else 0),
- ('all_perp', lambda v: v / row['spot_asset'] * row['perp_liability'] if v > 0 else 0),
- ('all_spot', lambda v: v / row['spot_asset'] * row['spot_liability'] if v > 0 else 0),
- (f'perp_{perp_market_inspect}_long', lambda v: v / row['spot_asset'] * row['net_p'][perp_market_inspect] if v > 0 and row['net_p'][0] > 0 else 0),
- (f'perp_{perp_market_inspect}_short', lambda v: v / row['spot_asset'] * row['net_p'][perp_market_inspect] if v > 0 and row['net_p'][perp_market_inspect] < 0 else 0),
- ]
-
- series_list = []
- for suffix, calc_func in calculations:
- series = pd.Series([calc_func(val) for key, val in row['net_v'].items()])
- series.index = [f'spot_{x}_{suffix}' for x in series.index]
- series_list.append(series)
-
- return pd.concat(series_list)
-
- df = pd.concat([df, df.apply(get_rattt, axis=1)], axis=1)
-
- def calculate_effective_leverage(group):
- assets = group['all_assets']
- liabilities = group['all_liabilities']
- return liabilities / assets if assets != 0 else 0
-
- def format_with_checkmark(value, condition, mode, financial=False):
- if financial:
- formatted_value = f"{value:,.2f}"
- else:
- formatted_value = f"{value:.2f}"
-
- if condition and mode > 0:
- return f"{formatted_value} โ
"
- return formatted_value
-
- res = pd.DataFrame({
- ('spot' + str(i)): (
- df[f"spot_{i}_all_assets"].sum(),
- format_with_checkmark(
- df[f"spot_{i}_all"].sum(),
- 0 < df[f"spot_{i}_all"].sum() < 1_000_000,
- mode,
- financial=True
- ),
- format_with_checkmark(
- calculate_effective_leverage({
- 'all_assets': df[f"spot_{i}_all_assets"].sum(),
- 'all_liabilities': df[f"spot_{i}_all"].sum()
- }),
- 0 < calculate_effective_leverage({
- 'all_assets': df[f"spot_{i}_all_assets"].sum(),
- 'all_liabilities': df[f"spot_{i}_all"].sum()
- }) < 2,
- mode
- ),
- df[f"spot_{i}_all_spot"].sum(),
- df[f"spot_{i}_all_perp"].sum(),
- df[f"spot_{i}_perp_{perp_market_inspect}_long"].sum(),
- df[f"spot_{i}_perp_{perp_market_inspect}_short"].sum()
- ) for i in range(NUMBER_OF_SPOT)
- }, index=['all_assets', 'all_liabilities', 'effective_leverage', 'all_spot', 'all_perp',
- f'perp_{perp_market_inspect}_long',
- f'perp_{perp_market_inspect}_short']).T
-
- res['all_liabilities'] = res['all_liabilities'].astype(str)
- res['effective_leverage'] = res['effective_leverage'].astype(str)
-
- if env == 'mainnet': #mainnet_spot_market_configs
- res.index = [x.symbol for x in mainnet_spot_market_configs]
- res.index.name = 'spot assets' # type: ignore
-
- return res, df
-
-def asset_liab_matrix_page(loop: AbstractEventLoop, vat: Vat, drift_client: DriftClient, env='mainnet'):
- mode = st.selectbox("Options", options, format_func=lambda x: labels[x])
-
- if mode is None:
- mode = 0
-
- perp_market_inspect = st.selectbox("Market index", [x.market_index for x in mainnet_perp_market_configs])
-
- if perp_market_inspect is None:
- perp_market_inspect = 0
-
- res, df = get_matrix(loop, vat, drift_client, env, mode, perp_market_inspect)
-
- st.write(f"{df.shape[0]} users for scenario")
-
-
- st.write(res)
-
- tabs = st.tabs(['FULL'] + [x.symbol for x in mainnet_spot_market_configs])
-
- tabs[0].dataframe(df)
-
- for idx, tab in enumerate(tabs[1:]):
- important_cols = [x for x in df.columns if 'spot_'+str(idx) in x]
- toshow = df[['spot_asset', 'net_usd_value']+important_cols]
- toshow = toshow[toshow[important_cols].abs().sum(axis=1)!=0].sort_values(by="spot_"+str(idx)+'_all', ascending=False)
- tab.write(f'{ len(toshow)} users with this asset to cover liabilities')
- tab.dataframe(toshow)
-
diff --git a/src/sections/liquidation_curves.py b/src/sections/liquidation_curves.py
index 2de971b..91513b9 100644
--- a/src/sections/liquidation_curves.py
+++ b/src/sections/liquidation_curves.py
@@ -1,58 +1,31 @@
from collections import defaultdict
-from driftpy.pickle.vat import Vat
-from driftpy.constants.numeric_constants import (
- BASE_PRECISION,
- PRICE_PRECISION,
-)
+
+from lib.api import api
import numpy as np
-import plotly.graph_objects as go # type: ignore
+import plotly.graph_objects as go
import streamlit as st
+
options = [0, 1, 2]
labels = ["SOL-PERP", "BTC-PERP", "ETH-PERP"]
-def get_liquidation_curve(vat: Vat, market_index: int):
- liquidations_long: list[tuple[float, float]] = []
- liquidations_short: list[tuple[float, float]] = []
- market_price = vat.perp_oracles.get(market_index)
- market_price_ui = market_price.price / PRICE_PRECISION # type: ignore
- for user in vat.users.user_map.values():
- perp_position = user.get_perp_position(market_index)
- if perp_position is not None:
- liquidation_price = user.get_perp_liq_price(market_index)
- if liquidation_price is not None:
- liquidation_price_ui = liquidation_price / PRICE_PRECISION
- position_size = abs(perp_position.base_asset_amount) / BASE_PRECISION
- position_notional = position_size * market_price_ui
- is_zero = round(position_notional) == 0
- is_short = perp_position.base_asset_amount < 0
- is_long = perp_position.base_asset_amount > 0
- if is_zero:
- continue
- if is_short and liquidation_price_ui > market_price_ui:
- liquidations_short.append((liquidation_price_ui, position_notional))
- elif is_long and liquidation_price_ui < market_price_ui:
- liquidations_long.append((liquidation_price_ui, position_notional))
- else:
- pass
- # print(f"liquidation price for user {user.user_public_key} is {liquidation_price_ui} and market price is {market_price_ui} - is_short: {is_short} - size {position_size} - notional {position_notional}")
-
- liquidations_long.sort(key=lambda x: x[0])
- liquidations_short.sort(key=lambda x: x[0])
-
- # for (price, size) in liquidations_long:
- # print(f"Long liquidation for {size} @ {price}")
-
- # for (price, size) in liquidations_short:
- # print(f"Short liquidation for {size} @ {price}")
-
- return plot_liquidation_curves(liquidations_long, liquidations_short, market_price_ui)
-
-def plot_liquidation_curves(liquidations_long, liquidations_short, market_price_ui):
- def filter_outliers(liquidations, upper_bound_multiplier=2.0, lower_bound_multiplier=0.5):
+
+def plot_liquidation_curves(liquidation_data):
+ liquidations_long = liquidation_data["liquidations_long"]
+ liquidations_short = liquidation_data["liquidations_short"]
+ market_price_ui = liquidation_data["market_price_ui"]
+
+ def filter_outliers(
+ liquidations, upper_bound_multiplier=2.0, lower_bound_multiplier=0.5
+ ):
"""Filter out liquidations based on a range multiplier of the market price."""
- return [(price, notional) for price, notional in liquidations
- if lower_bound_multiplier * market_price_ui <= price <= upper_bound_multiplier * market_price_ui]
+ return [
+ (price, notional)
+ for price, notional in liquidations
+ if lower_bound_multiplier * market_price_ui
+ <= price
+ <= upper_bound_multiplier * market_price_ui
+ ]
def aggregate_liquidations(liquidations):
"""Aggregate liquidations to calculate cumulative notional amounts."""
@@ -64,27 +37,29 @@ def aggregate_liquidations(liquidations):
def prepare_data_for_plot(aggregated_data, reverse=False):
"""Prepare and sort data for plotting, optionally reversing the cumulative sum for descending plots."""
sorted_prices = sorted(aggregated_data.keys(), reverse=reverse)
- cumulative_notional = np.cumsum([aggregated_data[price] for price in sorted_prices])
- # if reverse:
- # cumulative_notional = cumulative_notional[::-1] # Reverse cumulative sum for descending plots
+ cumulative_notional = np.cumsum(
+ [aggregated_data[price] for price in sorted_prices]
+ )
return sorted_prices, cumulative_notional
# Filter outliers based on defined criteria
- liquidations_long = filter_outliers(liquidations_long, 2, 0.2) # Example multipliers for long positions
- liquidations_short = filter_outliers(liquidations_short, 5, 0.5) # Example multipliers for short positions
+ liquidations_long = filter_outliers(
+ liquidations_long, 2, 0.2
+ ) # Example multipliers for long positions
+ liquidations_short = filter_outliers(
+ liquidations_short, 5, 0.5
+ ) # Example multipliers for short positions
# Aggregate and prepare data
aggregated_long = aggregate_liquidations(liquidations_long)
aggregated_short = aggregate_liquidations(liquidations_short)
- long_prices, long_cum_notional = prepare_data_for_plot(aggregated_long, reverse=True)
+ long_prices, long_cum_notional = prepare_data_for_plot(
+ aggregated_long, reverse=True
+ )
short_prices, short_cum_notional = prepare_data_for_plot(aggregated_short)
- print(sum(long_cum_notional))
- print(sum(short_cum_notional))
-
if not long_prices or not short_prices:
- print("No data available for plotting.")
return None
# Create Plotly figures
@@ -92,28 +67,46 @@ def prepare_data_for_plot(aggregated_data, reverse=False):
short_fig = go.Figure()
# Add traces for long and short positions
- long_fig.add_trace(go.Scatter(x=long_prices, y=long_cum_notional, mode='lines', name='Long Positions',
- line=dict(color='purple', width=2)))
- short_fig.add_trace(go.Scatter(x=short_prices, y=short_cum_notional, mode='lines', name='Short Positions',
- line=dict(color='turquoise', width=2)))
+ long_fig.add_trace(
+ go.Scatter(
+ x=long_prices,
+ y=long_cum_notional,
+ mode="lines",
+ name="Long Positions",
+ line=dict(color="purple", width=2),
+ )
+ )
+ short_fig.add_trace(
+ go.Scatter(
+ x=short_prices,
+ y=short_cum_notional,
+ mode="lines",
+ name="Short Positions",
+ line=dict(color="turquoise", width=2),
+ )
+ )
# Update layout with axis titles and grid settings
- long_fig.update_layout(title='Long Liquidation Curve',
- xaxis_title='Asset Price',
- yaxis_title='Liquidations (Notional)',
- xaxis=dict(showgrid=True),
- yaxis=dict(showgrid=True))
-
- short_fig.update_layout(title='Short Liquidation Curve',
- xaxis_title='Asset Price',
- yaxis_title='Liquidations (Notional)',
- xaxis=dict(showgrid=True),
- yaxis=dict(showgrid=True))
+ long_fig.update_layout(
+ title="Long Liquidation Curve",
+ xaxis_title="Asset Price",
+ yaxis_title="Liquidations (Notional)",
+ xaxis=dict(showgrid=True),
+ yaxis=dict(showgrid=True),
+ )
+
+ short_fig.update_layout(
+ title="Short Liquidation Curve",
+ xaxis_title="Asset Price",
+ yaxis_title="Liquidations (Notional)",
+ xaxis=dict(showgrid=True),
+ yaxis=dict(showgrid=True),
+ )
return long_fig, short_fig
-
-def plot_liquidation_curve(vat: Vat):
+
+def plot_liquidation_curve(): # (vat: Vat):
st.write("Liquidation Curves")
market_index = st.selectbox(
@@ -125,7 +118,10 @@ def plot_liquidation_curve(vat: Vat):
if market_index is None:
market_index = 0
- (long_fig, short_fig) = get_liquidation_curve(vat, market_index)
+ liquidation_data = api(
+ "liquidation", "liquidation-curve", str(market_index), as_json=True
+ )
+ (long_fig, short_fig) = plot_liquidation_curves(liquidation_data)
long_col, short_col = st.columns([1, 1])
@@ -134,4 +130,3 @@ def plot_liquidation_curve(vat: Vat):
with short_col:
st.plotly_chart(short_fig, use_container_width=True)
-
diff --git a/src/sections/ob.py b/src/sections/ob.py
index 781d3c7..563467f 100644
--- a/src/sections/ob.py
+++ b/src/sections/ob.py
@@ -1,43 +1,37 @@
-
import asyncio
import heapq
-import time
import os
-
+import time
from asyncio import AbstractEventLoop
-import plotly.express as px # type: ignore
-import pandas as pd # type: ignore
-
from typing import Any
+
+import pandas as pd # type: ignore
+import plotly.express as px # type: ignore
+import requests
import streamlit as st
+from driftpy.constants.perp_markets import (
+ devnet_perp_market_configs,
+ mainnet_perp_market_configs,
+)
+from driftpy.constants.spot_markets import (
+ devnet_spot_market_configs,
+ mainnet_spot_market_configs,
+)
from driftpy.drift_client import DriftClient
from driftpy.pickle.vat import Vat
-from driftpy.constants.spot_markets import mainnet_spot_market_configs, devnet_spot_market_configs
-from driftpy.constants.perp_markets import mainnet_perp_market_configs, devnet_perp_market_configs
-
-from scenario import get_usermap_df
-import requests
-
def fetch_ob_data(coin, size):
# Define the POST request details
post_url = "https://api.hyperliquid.xyz/info"
- payload = {
- 'type': 'metaAndAssetCtxs'
- }
- payload2 = {
- "type": 'l2Book',
- "coin": coin
- }
+ payload = {"type": "metaAndAssetCtxs"}
+ payload2 = {"type": "l2Book", "coin": coin}
- post_headers = {
- "Content-Type": "application/json"
- }
+ post_headers = {"Content-Type": "application/json"}
results = {}
- for nom, pay in [('hl_cxt', payload), ('hl_book', payload2)]:
+ for nom, pay in [("hl_cxt", payload), ("hl_book", payload2)]:
# Send the POST request
post_response = requests.post(post_url, json=pay, headers=post_headers)
# Print the POST request response
@@ -52,10 +46,10 @@ def fetch_ob_data(coin, size):
# Define the GET request URL
get_url = "https://dlob.drift.trade/l2"
get_params = {
- "marketName": coin+"-PERP",
+ "marketName": coin + "-PERP",
"depth": 5,
"includeOracle": "true",
- "includeVamm": "true"
+ "includeVamm": "true",
}
# Send the GET request
@@ -69,27 +63,26 @@ def fetch_ob_data(coin, size):
else:
print("Error:", get_response.text)
-
- results['dr_book'] = get_response.json()
+ results["dr_book"] = get_response.json()
def calculate_average_fill_price_dr(order_book, volume):
# Adjust volume to match size precision (1e9)
volume = volume
-
- bids = order_book['bids']
- asks = order_book['asks']
+
+ bids = order_book["bids"]
+ asks = order_book["asks"]
print(f'{float(bids[0]["price"])/1e6}/{float(asks[0]["price"])/1e6}')
-
+
def average_price(levels, volume, is_buy):
total_volume = 0
total_cost = 0.0
-
+
for level in levels:
# Price is in 1e6 precision, size is in 1e9 precision
- price = float(level['price']) / 1e6
- size = float(level['size']) / 1e9
-
+ price = float(level["price"]) / 1e6
+ size = float(level["size"]) / 1e9
+
if total_volume + size >= volume:
# Only take the remaining required volume at this level
remaining_volume = volume - total_volume
@@ -100,35 +93,33 @@ def average_price(levels, volume, is_buy):
# Take the whole size at this level
total_cost += size * price
total_volume += size
-
+
if total_volume < volume:
- raise ValueError("Insufficient volume in the order book to fill the order")
-
+ raise ValueError(
+ "Insufficient volume in the order book to fill the order"
+ )
+
return total_cost / volume
-
+
try:
buy_price = average_price(asks, volume, is_buy=True)
sell_price = average_price(bids, volume, is_buy=False)
except ValueError as e:
return str(e)
-
- return {
- "average_buy_price": buy_price,
- "average_sell_price": sell_price
- }
+ return {"average_buy_price": buy_price, "average_sell_price": sell_price}
def calculate_average_fill_price_hl(order_book, volume):
- buy_levels = order_book['levels'][1] # Bids (lower prices first)
- sell_levels = order_book['levels'][0] # Asks (higher prices first)
+ buy_levels = order_book["levels"][1] # Bids (lower prices first)
+ sell_levels = order_book["levels"][0] # Asks (higher prices first)
def average_price(levels, volume):
total_volume = 0
total_cost = 0.0
for level in levels:
- px = float(level['px'])
- sz = float(level['sz'])
+ px = float(level["px"])
+ sz = float(level["sz"])
if total_volume + sz >= volume:
# Only take the remaining required volume at this level
@@ -142,7 +133,9 @@ def average_price(levels, volume):
total_volume += sz
if total_volume < volume:
- raise ValueError("Insufficient volume in the order book to fill the order")
+ raise ValueError(
+ "Insufficient volume in the order book to fill the order"
+ )
return total_cost / volume
@@ -152,34 +145,30 @@ def average_price(levels, volume):
except ValueError as e:
return str(e)
- return {
- "average_buy_price": buy_price,
- "average_sell_price": sell_price
- }
+ return {"average_buy_price": buy_price, "average_sell_price": sell_price}
+ r = calculate_average_fill_price_hl(results["hl_book"], size)
+ d = calculate_average_fill_price_dr(results["dr_book"], size)
+ return (r, d, results["dr_book"]["oracle"] / 1e6, results["hl_cxt"])
- r = calculate_average_fill_price_hl(results['hl_book'], size)
- d = calculate_average_fill_price_dr(results['dr_book'], size)
- return (r,d, results['dr_book']['oracle']/1e6, results['hl_cxt'])
def ob_cmp_page():
- if st.button('Refresh'):
+ if st.button("Refresh"):
st.cache_data.clear()
s1, s2 = st.columns(2)
- coin = s1.selectbox('coin:', ['SOL','BTC','ETH'])
- size = s2.number_input('size:', min_value=.1, value=1.0, help='in base units')
+ coin = s1.selectbox("coin:", ["SOL", "BTC", "ETH"])
+ size = s2.number_input("size:", min_value=0.1, value=1.0, help="in base units")
hl, dr, dr_oracle, hl_ctx = fetch_ob_data(coin, size)
-
- uni_id = [i for (i,x) in enumerate(hl_ctx[0]['universe']) if coin==x['name']]
+ uni_id = [i for (i, x) in enumerate(hl_ctx[0]["universe"]) if coin == x["name"]]
# hl_oracle = hl_ctx
o1, o2 = st.columns(2)
- o1.header('hyperliquid')
- o1.write(float(hl_ctx[1][uni_id[0]]['oraclePx']))
+ o1.header("hyperliquid")
+ o1.write(float(hl_ctx[1][uni_id[0]]["oraclePx"]))
o1.write(hl)
- o2.header('drift')
+ o2.header("drift")
o2.write(dr_oracle)
- o2.write(dr)
\ No newline at end of file
+ o2.write(dr)
diff --git a/src/sections/scenario.py b/src/sections/scenario.py
index 2dd91e7..fe288ad 100644
--- a/src/sections/scenario.py
+++ b/src/sections/scenario.py
@@ -1,84 +1,105 @@
-import asyncio
-import heapq
-import time
-import os
-
from asyncio import AbstractEventLoop
-import plotly.express as px # type: ignore
-import pandas as pd # type: ignore
-
+import time
from typing import Any
-import streamlit as st
+
from driftpy.drift_client import DriftClient
from driftpy.pickle.vat import Vat
+from lib.user_metrics import get_usermap_df
+import pandas as pd
+import streamlit as st
-from scenario import get_usermap_df
-
def price_shock_plot(price_scenario_users: list[Any], oracle_distort: float):
levs = price_scenario_users
- dfs = [pd.DataFrame(levs[2][i]) for i in range(len(levs[2]))] \
- + [pd.DataFrame(levs[0])] \
- + [pd.DataFrame(levs[1][i]) for i in range(len(levs[1]))]
-
+ dfs = (
+ [pd.DataFrame(levs[2][i]) for i in range(len(levs[2]))]
+ + [pd.DataFrame(levs[0])]
+ + [pd.DataFrame(levs[1][i]) for i in range(len(levs[1]))]
+ )
+
spot_bankrs = []
for df in dfs:
- spot_b_t1 = (df[(df['spot_asset']= 0:
- return 100
- elif total_collateral <= 0:
- return 0
- else:
- return round(
- min(100, max(0, (1 - maintenance_margin_req / total_collateral) * 100))
- )
def to_financial(num):
num_str = str(num)