Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions .dockerignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
.git
__pycache__/
*.pyc
*.pyo
*.pyd
*.db
*.sqlite3
.ipynb_checkpoints
BehaviorAnalysis/
*.mp4
*.avi
*.mkv
*.mov
sam2_final.ipynb
resources/example.jpeg

19 changes: 19 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
FROM python:3.11-slim

ENV PYTHONDONTWRITEBYTECODE=1
ENV PYTHONUNBUFFERED=1
WORKDIR /app

RUN apt-get update && apt-get install -y --no-install-recommends \
ffmpeg \
&& rm -rf /var/lib/apt/lists/*

COPY requirements-web.txt ./
RUN pip install --no-cache-dir -r requirements-web.txt

COPY app ./app
COPY resources ./resources

EXPOSE 8000
CMD ["sh", "-c", "uvicorn app.main:app --host 0.0.0.0 --port ${PORT:-8000}"]

41 changes: 41 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -242,3 +242,44 @@ AnimalCare is released under the [MIT License](https://opensource.org/licenses/M


---

## Quickstart (Web App)

Local dev (Python 3.11+):

```sh
pip install -r requirements-web.txt
export OPENAI_API_KEY=your_key_here
uvicorn app.main:app --host 0.0.0.0 --port 8000
```

Open http://localhost:8000 and upload a short animal video.

API:
- POST `/api/analyze` with `multipart/form-data` field `video`.

## Deployment

### Docker
```sh
docker build -t animalcare .
docker run -e OPENAI_API_KEY=your_key -p 8000:8000 animalcare
```

### Render (Docker)
1. Push this repo to GitHub
2. Create a Web Service on Render using this repo (Render will read `render.yaml`)
3. Add environment variable `OPENAI_API_KEY`
4. Deploy and visit the service URL

### Vercel (Next.js)

- App lives in `web/` (Next.js 14, Tailwind, Framer Motion, Edge API).
- Import the repo in Vercel and set Root Directory to `web`.
- Add env var `OPENAI_API_KEY`.

Deploy button:

```
https://vercel.com/new/clone?repository-url=<YOUR_REPO_URL>&root-directory=web&env=OPENAI_API_KEY
```
1 change: 1 addition & 0 deletions app/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# Makes app a package
Binary file added app/__pycache__/__init__.cpython-313.pyc
Binary file not shown.
Binary file added app/__pycache__/main.cpython-313.pyc
Binary file not shown.
66 changes: 66 additions & 0 deletions app/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
import os
import uvicorn
from fastapi import FastAPI, UploadFile, File, HTTPException
from fastapi.responses import HTMLResponse, JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from starlette.staticfiles import StaticFiles
from starlette.responses import RedirectResponse

from .services.analyze import analyze_video_and_summarize


app = FastAPI(title="AnimalCare API", version="0.1.0")

app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)

static_dir = os.path.join(os.path.dirname(__file__), "static")
os.makedirs(static_dir, exist_ok=True)
app.mount("/static", StaticFiles(directory=static_dir), name="static")


@app.get("/", response_class=HTMLResponse)
async def index():
return RedirectResponse(url="/static/index.html")


@app.post("/api/analyze")
async def analyze(video: UploadFile = File(...)):
if not video.filename:
raise HTTPException(status_code=400, detail="No file uploaded")
suffix = os.path.splitext(video.filename)[1].lower()
if suffix not in [".mp4", ".mov", ".avi", ".mkv"]:
raise HTTPException(status_code=400, detail="Unsupported file type")

temp_dir = os.path.join("/tmp", "animalcare")
os.makedirs(temp_dir, exist_ok=True)
temp_path = os.path.join(temp_dir, video.filename)
with open(temp_path, "wb") as f:
f.write(await video.read())

try:
result = await analyze_video_and_summarize(temp_path)
except Exception as exc:
raise HTTPException(status_code=500, detail=str(exc))
finally:
try:
os.remove(temp_path)
except Exception:
pass

return JSONResponse(result)


def start():
port = int(os.environ.get("PORT", "8000"))
uvicorn.run("app.main:app", host="0.0.0.0", port=port, reload=False)


if __name__ == "__main__":
start()

Binary file added app/services/__pycache__/analyze.cpython-313.pyc
Binary file not shown.
58 changes: 58 additions & 0 deletions app/services/analyze.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
import os
import cv2
import base64
from typing import Dict, List
import numpy as np
import asyncio
from openai import OpenAI


def _extract_sampled_frames_as_base64(video_path: str, sample_every_n_frames: int = 50, max_frames: int = 30) -> List[str]:
capture = cv2.VideoCapture(video_path)
if not capture.isOpened():
raise RuntimeError("Failed to open video")

base64_frames: List[str] = []
frame_index = 0
while True:
success, frame = capture.read()
if not success:
break
if frame_index % sample_every_n_frames == 0:
success_jpg, buffer = cv2.imencode(".jpg", frame)
if success_jpg:
base64_frames.append(base64.b64encode(buffer).decode("utf-8"))
if len(base64_frames) >= max_frames:
break
frame_index += 1
capture.release()
return base64_frames


async def analyze_video_and_summarize(video_path: str) -> Dict[str, str]:
# Extract representative frames
frames = await asyncio.to_thread(_extract_sampled_frames_as_base64, video_path)

if len(frames) == 0:
raise RuntimeError("No frames extracted from video")

# Prepare prompt
prompt = {
"role": "user",
"content": [
"You are an animal behaviorist observing the animal in the following frames. Provide: 1) Activity observations, 2) Signs of illness or discomfort, 3) Health assessment (movement, social interaction, physical form, coat/skin), 4) Recommendations for care, 5) Write as a cohesive narrative suited for a zoo care report. Do not mention frames or numbers.",
*map(lambda x: {"image": x, "resize": 768}, frames),
],
}

client = OpenAI()
result = await asyncio.to_thread(
client.chat.completions.create,
model="gpt-4o",
messages=[prompt],
max_tokens=1200,
)

report = result.choices[0].message.content
return {"report": report}

Loading