Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 3 additions & 0 deletions backend/alembic.ini
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[alembic]
script_location = migrations/alembic
sqlalchemy.url = postgresql+asyncpg://postgres:postgres@localhost/solfoundry
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Hardcoded database credentials in configuration file.

The sqlalchemy.url contains hardcoded credentials (postgres:postgres). This poses security risks if the file is committed to version control, and creates inconsistency with backend/app/database.py which uses os.getenv("DATABASE_URL", ...).

Alembic supports environment variable interpolation via env.py. The standard approach is to leave sqlalchemy.url empty or use a placeholder, then override it in env.py using the same DATABASE_URL environment variable.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@backend/alembic.ini` at line 3, The alembic.ini currently hardcodes
credentials in the sqlalchemy.url setting; remove the sensitive value and
replace it with an empty string or a placeholder (e.g. sqlalchemy.url = ) and
update Alembic's env.py to load the actual URL from the same environment
variable used by backend/app/database.py (os.getenv("DATABASE_URL", ...)) so
Alembic uses DATABASE_URL at runtime; ensure env.py sets
config.set_main_option("sqlalchemy.url", database_url) before running
migrations.

7 changes: 7 additions & 0 deletions backend/app/database.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,13 @@ async def init_db() -> None:
from app.models.user import User # noqa: F401
from app.models.bounty_table import BountyTable # noqa: F401
from app.models.agent import Agent # noqa: F401
from app.models.contributor import ContributorDB # noqa: F401
from app.models.submission import SubmissionDB # noqa: F401
from app.models.tables import ( # noqa: F401
PayoutTable,
BuybackTable,
ReputationHistoryTable,
)

await conn.run_sync(Base.metadata.create_all)

Expand Down
15 changes: 15 additions & 0 deletions backend/app/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,21 @@ async def lifespan(app: FastAPI):
await init_db()
await ws_manager.init()

# Hydrate in-memory caches from PostgreSQL (source of truth)
try:
from app.services.payout_service import hydrate_from_database as hydrate_payouts
from app.services.reputation_service import hydrate_from_database as hydrate_reputation

await hydrate_payouts()
await hydrate_reputation()
logger.info("PostgreSQL hydration complete")
except ImportError as exc:
logger.error("Hydration import failed: %s", exc)
except ConnectionRefusedError as exc:
logger.warning("PostgreSQL unavailable during hydration: %s — starting with empty caches", exc)
except Exception as exc:
logger.error("PostgreSQL hydration failed: %s — starting with empty caches", exc, exc_info=True)

# Sync bounties + contributors from GitHub Issues (replaces static seeds)
try:
result = await sync_all()
Expand Down
8 changes: 3 additions & 5 deletions backend/app/models/contributor.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
"""Contributor database and Pydantic models."""
"""Contributor database and Pydantic models (Issue #162: shared Base)."""

import uuid
from datetime import datetime, timezone
Expand All @@ -7,14 +7,12 @@
from pydantic import BaseModel, Field
from sqlalchemy import Column, String, DateTime, JSON, Float, Integer, Text
from sqlalchemy.dialects.postgresql import UUID
from sqlalchemy.orm import DeclarativeBase


class Base(DeclarativeBase):
pass
from app.database import Base


class ContributorDB(Base):
"""SQLAlchemy ORM model for the ``contributors`` table."""
__tablename__ = "contributors"

id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
Expand Down
63 changes: 63 additions & 0 deletions backend/app/models/tables.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
"""SQLAlchemy ORM tables for payouts, buybacks, reputation (Issue #162).

Registered with the shared ``Base`` from ``app.database`` so
``Base.metadata.create_all`` creates them during ``init_db()``.
"""

import uuid
from datetime import datetime, timezone

from sqlalchemy import Boolean, Column, DateTime, Float, Index, Integer, String
from sqlalchemy.dialects.postgresql import UUID

from app.database import Base


class PayoutTable(Base):
"""On-chain payout records with tx_hash uniqueness constraint."""

__tablename__ = "payouts"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
recipient = Column(String(100), nullable=False, index=True)
recipient_wallet = Column(String(64))
amount = Column(Float, nullable=False)
token = Column(String(20), nullable=False, server_default="FNDRY")
bounty_id = Column(String(64), index=True)
bounty_title = Column(String(200))
tx_hash = Column(String(128), unique=True, index=True)
status = Column(String(20), nullable=False, server_default="pending")
solscan_url = Column(String(256))
created_at = Column(DateTime(timezone=True), nullable=False,
default=lambda: datetime.now(timezone.utc), index=True)


class BuybackTable(Base):
"""Treasury buyback records (SOL exchanged for $FNDRY)."""

__tablename__ = "buybacks"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
amount_sol = Column(Float, nullable=False)
amount_fndry = Column(Float, nullable=False)
price_per_fndry = Column(Float, nullable=False)
tx_hash = Column(String(128), unique=True, index=True)
solscan_url = Column(String(256))
created_at = Column(DateTime(timezone=True), nullable=False,
default=lambda: datetime.now(timezone.utc), index=True)


class ReputationHistoryTable(Base):
"""Per-bounty reputation entries with contributor+bounty uniqueness."""

__tablename__ = "reputation_history"
id = Column(UUID(as_uuid=True), primary_key=True, default=uuid.uuid4)
contributor_id = Column(String(64), nullable=False, index=True)
bounty_id = Column(String(64), nullable=False, index=True)
bounty_title = Column(String(200), nullable=False)
bounty_tier = Column(Integer, nullable=False)
review_score = Column(Float, nullable=False)
earned_reputation = Column(Float, nullable=False, server_default="0")
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

🧩 Analysis chain

🌐 Web query:

SQLAlchemy server_default string for Float column PostgreSQL

💡 Result:

In SQLAlchemy, server_default must be SQL text / an expression, not a Python float. Use text() with a numeric literal:

from sqlalchemy import Column, Float, text

value = Column(Float, nullable=False, server_default=text("0.0"))

If you want to be explicit for PostgreSQL, you can cast:

  • default for Float (typically double precision on PostgreSQL): server_default=text("0.0::double precision")
  • if you’re using Float(precision=24) (maps to real): server_default=text("0.0::real")

SQLAlchemy documents server_default=text("0") for server-side constants. (docs.sqlalchemy.org) PostgreSQL treats a fractional literal like 1.2 as double precision by default, which is why 0.0 is usually sufficient without an explicit cast. (postgresql.org)

Citations:


Use text("0.0") wrapper for server_default on Float column.

Line 58 uses server_default="0" (plain string), which bypasses SQLAlchemy's expression handling. According to SQLAlchemy documentation, server_default must use a text expression. Change to:

earned_reputation = Column(Float, nullable=False, server_default=text("0.0"))

This is consistent with SQLAlchemy best practices and ensures proper SQL type coercion for the Float column.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@backend/app/models/tables.py` at line 58, The earned_reputation Column uses a
plain string server_default which bypasses SQLAlchemy expression handling;
update the Column definition for earned_reputation to use a text() SQL
expression (e.g., server_default=text("0.0")) and ensure the SQLAlchemy text
function is imported (from sqlalchemy import text) so the Float default is
properly treated as a SQL expression and type-coerced.

anti_farming_applied = Column(Boolean, nullable=False, server_default="false")
created_at = Column(DateTime(timezone=True), nullable=False,
default=lambda: datetime.now(timezone.utc), index=True)
__table_args__ = (
Index("ix_rep_cid_bid", "contributor_id", "bounty_id", unique=True),)
52 changes: 43 additions & 9 deletions backend/app/services/bounty_service.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,17 @@
"""In-memory bounty service for MVP (Issue #3).
"""Bounty service with PostgreSQL write-through persistence (Issue #162).

Provides CRUD operations and solution submission.
Claim lifecycle is out of scope (see Issue #16).
In-memory cache is the hot read path; every mutation is written through
to PostgreSQL via ``pg_store`` so the database is the durable source of
truth. Write errors are logged but do not block the API response
(fire-and-forget via ``asyncio.create_task``).
"""

import asyncio
import logging
from datetime import datetime, timezone
from typing import Optional
from app.core.audit import audit_event

from app.core.audit import audit_event
from app.models.bounty import (
BountyCreate,
BountyDB,
Expand All @@ -24,19 +28,37 @@
VALID_STATUS_TRANSITIONS,
)

# ---------------------------------------------------------------------------
# In-memory store (replaced by a database in production)
# ---------------------------------------------------------------------------
logger = logging.getLogger(__name__)

# In-memory cache (write-through to PostgreSQL)
_bounty_store: dict[str, BountyDB] = {}


def _fire_and_forget(coro) -> None:
"""Schedule an async coroutine as a background task.

Attaches a done-callback that logs exceptions so write failures
are never silently swallowed. No-ops when called outside an
async context (e.g. synchronous tests).
"""
try:
loop = asyncio.get_running_loop()
task = loop.create_task(coro)
task.add_done_callback(
lambda t: logger.error("pg_store background write failed", exc_info=t.exception())
if t.exception() else None
)
except RuntimeError:
pass # No event loop (sync tests)


# ---------------------------------------------------------------------------
# Internal helpers
# ---------------------------------------------------------------------------


def _to_submission_response(s: SubmissionRecord) -> SubmissionResponse:
"""Convert a SubmissionRecord to the public API response schema."""
return SubmissionResponse(
id=s.id,
bounty_id=s.bounty_id,
Expand All @@ -50,6 +72,7 @@ def _to_submission_response(s: SubmissionRecord) -> SubmissionResponse:


def _to_bounty_response(b: BountyDB) -> BountyResponse:
"""Convert a BountyDB record to the full API response schema."""
subs = [_to_submission_response(s) for s in b.submissions]
return BountyResponse(
id=b.id,
Expand All @@ -70,6 +93,7 @@ def _to_bounty_response(b: BountyDB) -> BountyResponse:


def _to_list_item(b: BountyDB) -> BountyListItem:
"""Convert a BountyDB record to a lightweight list item."""
subs = [_to_submission_response(s) for s in b.submissions]
return BountyListItem(
id=b.id,
Expand Down Expand Up @@ -105,6 +129,8 @@ def create_bounty(data: BountyCreate) -> BountyResponse:
created_by=data.created_by,
)
_bounty_store[bounty.id] = bounty
from app.services.pg_store import persist_bounty
_fire_and_forget(persist_bounty(bounty))
return _to_bounty_response(bounty)
Comment on lines +132 to 134
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

Inline imports for pg_store are acceptable but create repetition.

The imports inside each function (from app.services.pg_store import persist_bounty) avoid circular import issues but create code repetition across 5 locations (lines 132, 215, 225, 263, 307).

♻️ Alternative: lazy import at module level
# At module level
_pg_store = None

def _get_pg_store():
    global _pg_store
    if _pg_store is None:
        from app.services import pg_store as _pg_store
    return _pg_store

# Then in functions:
_fire_and_forget(_get_pg_store().persist_bounty(bounty))
🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@backend/app/services/bounty_service.py` around lines 132 - 134, Multiple
functions repeatedly perform an inline import "from app.services.pg_store import
persist_bounty" which avoids circular imports but duplicates code; replace these
inline imports with a single lazy module-level accessor to pg_store (e.g., add a
module-level _pg_store = None and a _get_pg_store() function that imports and
caches app.services.pg_store) and update call sites to use
_get_pg_store().persist_bounty(...) inside the existing _fire_and_forget(...)
calls (and remove the per-function import statements), leaving return
_to_bounty_response(bounty) and related logic unchanged.



Expand Down Expand Up @@ -186,14 +212,18 @@ def update_bounty(
updated_by=bounty.created_by # In a real app, this would be the current user
)

from app.services.pg_store import persist_bounty
_fire_and_forget(persist_bounty(bounty))
return _to_bounty_response(bounty), None


def delete_bounty(bounty_id: str) -> bool:
"""Delete a bounty by ID. Returns True if deleted, False if not found."""
"""Delete a bounty by ID. Removes from cache and PostgreSQL."""
deleted = _bounty_store.pop(bounty_id, None) is not None
if deleted:
audit_event("bounty_deleted", bounty_id=bounty_id)
from app.services.pg_store import delete_bounty as pg_delete
_fire_and_forget(pg_delete(bounty_id))
return deleted


Expand Down Expand Up @@ -230,6 +260,8 @@ def submit_solution(
)
bounty.submissions.append(submission)
bounty.updated_at = datetime.now(timezone.utc)
from app.services.pg_store import persist_bounty
_fire_and_forget(persist_bounty(bounty))
return _to_submission_response(submission), None


Expand Down Expand Up @@ -271,7 +303,9 @@ def update_submission(
submission_id=submission_id,
new_status=status
)


from app.services.pg_store import persist_bounty
_fire_and_forget(persist_bounty(bounty))
return _to_submission_response(sub), None

return None, "Submission not found"
2 changes: 1 addition & 1 deletion backend/app/services/contributor_service.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
"""In-memory contributor service for MVP."""
"""Contributor service with in-memory store (Issue #162: shared Base)."""
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Contributors not persisted to PostgreSQL despite PR objectives.

The docstring references Issue #162 (shared Base), but unlike bounty_service.py, this service has no write-through persistence to PostgreSQL. The PR objectives state: "Migrate all in-memory data stores (bounties, contributors, payouts, submissions, reputation) to PostgreSQL so data persists across restarts."

Contributors created/updated via this service will be lost on restart, while contributor data from GitHub sync (in sync_contributors()) also lacks PostgreSQL persistence.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@backend/app/services/contributor_service.py` at line 1, The contributor
service currently keeps contributors only in-memory and does not write changes
to PostgreSQL; update create_contributor, update_contributor, and
sync_contributors in contributor_service.py to perform write-through persistence
by using the shared SQLAlchemy Base/Session (same pattern as bounty_service.py):
inject or import the DB session, map in-memory Contributor objects to the
persistent Contributor model, call session.add() or session.merge(), commit on
success, and handle/rollback on exceptions; ensure new contributors created in
sync_contributors() are upserted into the DB (use unique keys like github_id or
email) so all created/updated contributors persist across restarts.


import uuid
from datetime import datetime, timezone
Expand Down
9 changes: 9 additions & 0 deletions backend/app/services/github_sync.py
Original file line number Diff line number Diff line change
Expand Up @@ -327,6 +327,15 @@ async def sync_bounties() -> int:
_bounty_store.clear()
_bounty_store.update(new_store)

# Persist synced bounties to PostgreSQL (write-through)
try:
from app.services.pg_store import persist_bounty

for bounty in new_store.values():
await persist_bounty(bounty)
except Exception as exc:
logger.warning("DB persistence during sync failed: %s", exc)
Comment on lines +330 to +337
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

Sequential persistence is inefficient; consider parallel execution.

Persisting bounties sequentially with for bounty in new_store.values(): await persist_bounty(bounty) can be slow for large bounty counts. Consider using asyncio.gather for parallel persistence:

♻️ Suggested improvement
         try:
             from app.services.pg_store import persist_bounty

-            for bounty in new_store.values():
-                await persist_bounty(bounty)
+            await asyncio.gather(
+                *(persist_bounty(bounty) for bounty in new_store.values()),
+                return_exceptions=True
+            )
         except Exception as exc:
             logger.warning("DB persistence during sync failed: %s", exc)

Additionally, sync_contributors() (lines 382-510) lacks similar PostgreSQL persistence, creating inconsistency with the stated PR objective to migrate all in-memory stores.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@backend/app/services/github_sync.py` around lines 330 - 337, The current
persistence loop writes bounties sequentially using for bounty in
new_store.values(): await persist_bounty(bounty), which is slow and inconsistent
with other migration goals; replace it with parallel async persistence using
asyncio.gather (or an async concurrency limiter like asyncio.Semaphore /
asyncio.BoundedSemaphore if you need to cap concurrency) to await all
persist_bounty coroutines concurrently, and wrap the gather call in the existing
try/except to log failures; also add equivalent PostgreSQL persistence calls
inside sync_contributors() to persist contributor records (use the same
persist_* pattern or new persist_contributor function) so contributor state is
migrated consistently with new_store and persist_bounty.


_last_sync = datetime.now(timezone.utc)
logger.info("Synced %d bounties from GitHub Issues", len(new_store))
return len(new_store)
Expand Down
52 changes: 50 additions & 2 deletions backend/app/services/payout_service.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,18 @@
"""In-memory payout service (MVP -- data lost on restart, DB coming later)."""
"""Payout service with PostgreSQL write-through persistence (Issue #162).

In-memory cache is the hot read path. On startup ``hydrate_from_database``
loads all rows from PostgreSQL so the database is the durable source of
truth. Every create operation fires a background write to PostgreSQL.
"""

from __future__ import annotations

import asyncio
import logging
import threading
from typing import Optional
from app.core.audit import audit_event

from app.core.audit import audit_event
from app.models.payout import (
BuybackCreate,
BuybackRecord,
Expand All @@ -18,13 +25,52 @@
PayoutStatus,
)

logger = logging.getLogger(__name__)

_lock = threading.Lock()
_payout_store: dict[str, PayoutRecord] = {}
_buyback_store: dict[str, BuybackRecord] = {}

SOLSCAN_TX_BASE = "https://solscan.io/tx"


async def hydrate_from_database() -> None:
"""Load payouts and buybacks from PostgreSQL into in-memory cache.

Called once during application startup. Errors propagate so the
lifespan handler can log them and decide on fallback behaviour.
"""
from app.services.pg_store import load_payouts, load_buybacks

payouts = await load_payouts()
buybacks = await load_buybacks()
with _lock:
_payout_store.update(payouts)
_buyback_store.update(buybacks)


def _fire_db(record, kind: str) -> None:
"""Schedule an async DB write as a background task.

Logs errors via a done-callback so failures are never silent.
"""
try:
loop = asyncio.get_running_loop()
if kind == "payout":
from app.services.pg_store import insert_payout
coro = insert_payout(record)
else:
from app.services.pg_store import insert_buyback
coro = insert_buyback(record)
task = loop.create_task(coro)
task.add_done_callback(
lambda t: logger.error("pg_store %s write failed", kind, exc_info=t.exception())
if t.exception() else None
)
except RuntimeError:
pass # No event loop (sync tests)
Comment on lines +52 to +71
Copy link
Copy Markdown

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🧹 Nitpick | 🔵 Trivial

Missing type annotation for record parameter.

The record parameter lacks a type annotation. While the function handles both PayoutRecord and BuybackRecord, adding a Union type or Any annotation would improve code clarity.

💡 Suggested improvement
-def _fire_db(record, kind: str) -> None:
+def _fire_db(record: Any, kind: str) -> None:

Add Any import from typing if not already present.

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@backend/app/services/payout_service.py` around lines 52 - 71, The _fire_db
function's record parameter is missing a type annotation; update its signature
to annotate record as the union of expected record types (e.g.,
Union[PayoutRecord, BuybackRecord]) or as Any if concrete types aren't
importable, and add the necessary import from typing (Union or Any). Locate
_fire_db and the associated imports, then adjust the parameter annotation and
imports so usages with insert_payout and insert_buyback remain type-checked.



def _solscan_url(tx_hash: Optional[str]) -> Optional[str]:
"""Return a Solscan explorer link for *tx_hash*, or ``None``."""
if not tx_hash:
Expand Down Expand Up @@ -92,6 +138,7 @@ def create_payout(data: PayoutCreate) -> PayoutResponse:
token=record.token,
tx_hash=record.tx_hash
)
_fire_db(record, "payout")
return _payout_to_response(record)


Expand Down Expand Up @@ -174,6 +221,7 @@ def create_buyback(data: BuybackCreate) -> BuybackResponse:
amount_fndry=record.amount_fndry,
tx_hash=record.tx_hash
)
_fire_db(record, "buyback")
return _buyback_to_response(record)


Expand Down
Loading
Loading