Skip to content

Commit

Permalink
Add cached versions
Browse files Browse the repository at this point in the history
  • Loading branch information
SinaKhalili committed Oct 30, 2024
1 parent 9f5c6d5 commit 0298a56
Show file tree
Hide file tree
Showing 7 changed files with 390 additions and 1 deletion.
13 changes: 13 additions & 0 deletions backend/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ def clean_cache(state: BackendState) -> None:
except Exception as e:
print(f"Error deleting {pickle}: {e}")

# Clean regular cache
cache_files = glob.glob("cache/*")
if len(cache_files) > 35:
print("cache folder has more than 35 files, deleting old ones")
Expand All @@ -71,6 +72,18 @@ def clean_cache(state: BackendState) -> None:
except Exception as e:
print(f"Error deleting {cache_file}: {e}")

# Clean ucache
ucache_files = glob.glob("ucache/*")
if len(ucache_files) > 35:
print("ucache folder has more than 35 files, deleting old ones")
ucache_files.sort(key=os.path.getmtime)
for ucache_file in ucache_files[:-35]:
print(f"deleting {ucache_file}")
try:
os.remove(ucache_file)
except Exception as e:
print(f"Error deleting {ucache_file}: {e}")


@repeat_every(seconds=60 * 8, wait_first=True)
async def repeatedly_clean_cache(state: BackendState) -> None:
Expand Down
20 changes: 19 additions & 1 deletion backend/middleware/cache_middleware.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,12 @@ def __init__(self, app: ASGIApp, state: BackendState, cache_dir: str = "cache"):
super().__init__(app)
self.state = state
self.cache_dir = cache_dir
self.ucache_dir = "ucache"
self.revalidation_locks: Dict[str, asyncio.Lock] = {}
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
if not os.path.exists(self.ucache_dir):
os.makedirs(self.ucache_dir)

async def dispatch(self, request: BackendRequest, call_next: Callable):
if not request.url.path.startswith("/api"):
Expand Down Expand Up @@ -155,7 +158,22 @@ async def _fetch_and_cache(
os.makedirs(os.path.dirname(cache_file), exist_ok=True)
with open(cache_file, "w") as f:
json.dump(response_data, f)
print(f"Cached fresh data for {request.url.path}")

ucache_key = f"{request.method}{request.url.path}"
if request.url.query:
safe_query = request.url.query.replace("&", "_").replace(
"=", "-"
)
ucache_key = f"{ucache_key}__{safe_query}"
ucache_key = ucache_key.replace("/", "_")

ucache_file = os.path.join(self.ucache_dir, f"{ucache_key}.json")
with open(ucache_file, "w") as f:
json.dump(response_data, f)

print(
f"Cached fresh data for {request.url.path} with query {request.url.query}"
)
else:
print(
f"Failed to cache data for {request.url.path}. Status code: {response.status_code}"
Expand Down
40 changes: 40 additions & 0 deletions src/lib/api.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import json
import os
from typing import Optional

Expand All @@ -9,6 +10,7 @@
load_dotenv()

BASE_URL = os.environ["BACKEND_URL"]
R2_PREFIX = "https://pub" + "-7dc8852b9fd5407a92614093e1f73280.r" + "2.dev"


def api(
Expand Down Expand Up @@ -52,3 +54,41 @@ def api(
return pd.DataFrame(response.json())
except ValueError:
return response.json()


def api2(url: str, params: Optional[dict] = None) -> dict:
"""
Fetch data from R2 storage using the simplified naming scheme.
Example: /api/health/health_distribution -> GET_api_health_health_distribution.json
Example with params: /api/price-shock/usermap?asset_group=ignore+stables&oracle_distortion=0.05
-> GET_api_price-shock_usermap__asset_group-ignore+stables_oracle_distortion-0.05.json
"""
print("SERVING FROM R2")

try:
# Convert URL path to R2 filename format
cache_key = f"GET/api/{url}".replace("/", "_")

# Handle query parameters exactly as they appear in the URL
if params:
# Convert params to URL query string format
query_parts = []
for k, v in params.items():
# Replace space with + to match URL encoding
if isinstance(v, str):
v = v.replace(" ", "%2B")
query_parts.append(f"{k}-{v}")
query_str = "_".join(query_parts)
cache_key = f"{cache_key}__{query_str}"

r2_url = f"{R2_PREFIX}/{cache_key}.json"
print(f"Fetching from R2: {r2_url}")
response = requests.get(r2_url)
if response.status_code != 200:
raise Exception(f"Failed to fetch from R2: {response.status_code}")

response_data = response.json()
return response_data["content"]
except Exception as e:
print(f"Error fetching from R2: {str(e)}")
raise
21 changes: 21 additions & 0 deletions src/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,14 @@
from lib.page import needs_backend
from lib.page import sidebar
from page.asset_liability import asset_liab_matrix_page
from page.asset_liability_cached import asset_liab_matrix_cached_page
from page.backend import backend_page
from page.health import health_page
from page.health_cached import health_cached_page
from page.liquidation_curves import liquidation_curves_page
from page.orderbook import orderbook_page
from page.price_shock import price_shock_page
from page.price_shock_cached import price_shock_cached_page
from sections.welcome import welcome_page
import streamlit as st

Expand Down Expand Up @@ -58,6 +61,24 @@
title="Liquidation Curves",
icon="🌊",
),
st.Page(
health_cached_page,
url_path="health-cached",
title="Health (Cached)",
icon="🏥",
),
st.Page(
price_shock_cached_page,
url_path="price-shock-cached",
title="Price Shock (Cached)",
icon="💸",
),
st.Page(
asset_liab_matrix_cached_page,
url_path="asset-liab-matrix-cached",
title="Asset-Liab Matrix (Cached)",
icon="📊",
),
]
if os.getenv("DEV"):
pages.append(
Expand Down
79 changes: 79 additions & 0 deletions src/page/asset_liability_cached.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
import json

from driftpy.constants.perp_markets import mainnet_perp_market_configs
from driftpy.constants.spot_markets import mainnet_spot_market_configs
from lib.api import api2
import pandas as pd
from requests.exceptions import JSONDecodeError
import streamlit as st


options = [0, 1, 2, 3]
labels = [
"none",
"liq within 50% of oracle",
"maint. health < 10%",
"init. health < 10%",
]


def asset_liab_matrix_cached_page():
params = st.query_params
mode = int(params.get("mode", 0))
perp_market_index = int(params.get("perp_market_index", 0))

mode = st.selectbox(
"Options", options, format_func=lambda x: labels[x], index=options.index(mode)
)
st.query_params.update({"mode": mode})

perp_market_index = st.selectbox(
"Market index",
[x.market_index for x in mainnet_perp_market_configs],
index=[x.market_index for x in mainnet_perp_market_configs].index(
perp_market_index
),
)
st.query_params.update({"perp_market_index": perp_market_index})

try:
url = f"asset-liability/matrix/{0 if mode is None else mode}/{0 if perp_market_index is None else perp_market_index}"
result = api2(url)
if "result" in result and result["result"] == "miss":
st.write("Fetching data for the first time...")
st.image(
"https://i.gifer.com/origin/8a/8a47f769c400b0b7d81a8f6f8e09a44a_w200.gif"
)
st.write("Check again in one minute!")
st.stop()

except Exception as e:
if type(e) == JSONDecodeError:
print("HIT A JSONDecodeError...", e)
st.write("Fetching data for the first time...")
st.image(
"https://i.gifer.com/origin/8a/8a47f769c400b0b7d81a8f6f8e09a44a_w200.gif"
)
st.write("Check again in one minute!")
st.stop()
else:
st.write(e)
st.stop()

res = pd.DataFrame(result["res"])
df = pd.DataFrame(result["df"])

st.write(f"{df.shape[0]} users for scenario")
st.write(res)

tabs = st.tabs(["FULL"] + [x.symbol for x in mainnet_spot_market_configs])
tabs[0].dataframe(df, hide_index=True)

for idx, tab in enumerate(tabs[1:]):
important_cols = [x for x in df.columns if "spot_" + str(idx) in x]
toshow = df[["spot_asset", "net_usd_value"] + important_cols]
toshow = toshow[toshow[important_cols].abs().sum(axis=1) != 0].sort_values(
by="spot_" + str(idx) + "_all", ascending=False
)
tab.write(f"{ len(toshow)} users with this asset to cover liabilities")
tab.dataframe(toshow, hide_index=True)
45 changes: 45 additions & 0 deletions src/page/health_cached.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
from lib.api import api2
import plotly.express as px
import streamlit as st

from utils import fetch_result_with_retry


def health_cached_page():
health_distribution = api2("health/health_distribution")
largest_perp_positions = api2("health/largest_perp_positions")
most_levered_positions = api2("health/most_levered_perp_positions_above_1m")
largest_spot_borrows = api2("health/largest_spot_borrows")
most_levered_borrows = api2("health/most_levered_spot_borrows_above_1m")

print(health_distribution)

fig = px.bar(
health_distribution,
x="Health Range",
y="Counts",
title="Health Distribution",
hover_data={"Notional Values": ":,"}, # Custom format for notional values
labels={"Counts": "Num Users", "Notional Values": "Notional Value ($)"},
)

fig.update_traces(
hovertemplate="<b>Health Range: %{x}</b><br>Count: %{y}<br>Notional Value: $%{customdata[0]:,.0f}<extra></extra>"
)

with st.container():
st.plotly_chart(fig, use_container_width=True)

perp_col, spot_col = st.columns([1, 1])

with perp_col:
st.markdown("### **Largest perp positions:**")
st.dataframe(largest_perp_positions, hide_index=True)
st.markdown("### **Most levered perp positions > $1m:**")
st.dataframe(most_levered_positions, hide_index=True)

with spot_col:
st.markdown("### **Largest spot borrows:**")
st.dataframe(largest_spot_borrows, hide_index=True)
st.markdown("### **Most levered spot borrows > $750k:**")
st.dataframe(most_levered_borrows, hide_index=True)
Loading

0 comments on commit 0298a56

Please sign in to comment.