Skip to content

Commit 38a385f

Browse files
feat: add form data parsing support for multipart clients in raptor endpoint
1 parent 1367540 commit 38a385f

File tree

1 file changed

+60
-6
lines changed

1 file changed

+60
-6
lines changed

raptor_api.py

Lines changed: 60 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -22,7 +22,7 @@
2222
from numba.core.errors import NumbaWarning
2323
from sklearn.mixture import GaussianMixture
2424
from sentence_transformers import SentenceTransformer
25-
from fastapi import FastAPI, UploadFile, File, HTTPException, Depends
25+
from fastapi import FastAPI, UploadFile, File, HTTPException, Depends, Request
2626
from fastapi.responses import JSONResponse
2727
from tqdm import tqdm
2828
from contextlib import asynccontextmanager
@@ -465,7 +465,7 @@ async def lifespan(app: FastAPI):
465465
app = FastAPI(
466466
title="RAPTOR API",
467467
description="API for Recursive Abstraction and Processing for Text Organization and Reduction",
468-
version="0.5.3",
468+
version="0.5.4",
469469
lifespan=lifespan,
470470
)
471471

@@ -1616,6 +1616,7 @@ async def health_check():
16161616
async def raptor(
16171617
file: UploadFile = File(..., description="JSON file (.json) containing chunks to process with a 'chunks' array"),
16181618
input_data: RaptorInput = Depends(),
1619+
request: Request = None,
16191620
):
16201621
"""Process semantic chunks from an uploaded JSON file for hierarchical clustering.
16211622
@@ -1640,12 +1641,65 @@ async def raptor(
16401641

16411642
# Get settings and apply defaults
16421643
settings = get_settings()
1643-
llm_model = input_data.llm_model or settings.llm_model
1644-
embedder_model = input_data.embedder_model or settings.embedder_model
1645-
temperature = input_data.temperature if input_data.temperature is not None else settings.temperature
1646-
context_window = input_data.context_window if input_data.context_window is not None else settings.context_window
1644+
1645+
llm_model = input_data.llm_model
1646+
embedder_model = input_data.embedder_model
1647+
temperature = input_data.temperature
1648+
context_window = input_data.context_window
16471649
threshold_tokens = input_data.threshold_tokens
16481650
custom_prompt = input_data.custom_prompt
1651+
1652+
# For multipart clients sending parameters as form fields, extract values manually
1653+
if any(
1654+
value is None
1655+
for value in (llm_model, embedder_model, temperature, context_window)
1656+
) or (threshold_tokens is None and request is not None):
1657+
try:
1658+
form_data = await request.form() if request is not None else None
1659+
except Exception:
1660+
form_data = None
1661+
1662+
if form_data is not None:
1663+
def _get_form_value(key: str):
1664+
raw = form_data.get(key)
1665+
return raw if raw not in (None, "") else None
1666+
1667+
if llm_model is None:
1668+
llm_model = _get_form_value("llm_model")
1669+
if embedder_model is None:
1670+
embedder_model = _get_form_value("embedder_model")
1671+
if temperature is None:
1672+
temp_value = _get_form_value("temperature")
1673+
if temp_value is not None:
1674+
try:
1675+
temperature = float(temp_value)
1676+
except ValueError:
1677+
logger.warning("Invalid temperature value received in form data; falling back to settings default.")
1678+
if context_window is None:
1679+
context_value = _get_form_value("context_window")
1680+
if context_value is not None:
1681+
try:
1682+
context_window = int(context_value)
1683+
except ValueError:
1684+
logger.warning("Invalid context_window value received in form data; falling back to settings default.")
1685+
if threshold_tokens is None:
1686+
threshold_value = _get_form_value("threshold_tokens")
1687+
if threshold_value is not None:
1688+
try:
1689+
threshold_tokens = int(threshold_value)
1690+
except ValueError:
1691+
logger.warning("Invalid threshold_tokens value received in form data; keeping original value.")
1692+
if custom_prompt is None:
1693+
custom_prompt = _get_form_value("custom_prompt")
1694+
1695+
llm_model = llm_model or settings.llm_model
1696+
embedder_model = embedder_model or settings.embedder_model
1697+
temperature = (
1698+
temperature if temperature is not None else settings.temperature
1699+
)
1700+
context_window = (
1701+
context_window if context_window is not None else settings.context_window
1702+
)
16491703

16501704
# Verify model availability before processing
16511705
# This handles the case where models might be deleted from Ollama after the app has started

0 commit comments

Comments
 (0)