Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
42 changes: 32 additions & 10 deletions public/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -1919,21 +1919,43 @@ <h3 class="text-lg font-semibold text-slate-900 dark:text-slate-100 mb-2">Error<

/**
* Check for PR updates by fetching lightweight timestamp data
* Compare with local cache to detect changes
* Compare with local cache to detect changes.
*
* The /api/prs/updates endpoint is paginated (cursor-based, after_id)
* to avoid D1 storage-operation timeouts on large datasets.
* We fetch all pages here before processing changes so that removal
* detection remains accurate across the whole PR set.
*/
async function checkForPrUpdates() {
if (!autoRefreshEnabled) return;

try {
const response = await fetch('/api/prs/updates');
const data = await response.json();
// Collect all pages from the paginated updates endpoint.
const allUpdates = [];
let afterId = null;
let hasMore = true;
while (hasMore) {
const url = afterId
? `/api/prs/updates?after_id=${afterId}`
: '/api/prs/updates';
const response = await fetch(url);
const data = await response.json();

if (data.error) {
console.error('Error checking for updates:', data.error);
return;
}

if (data.error) {
console.error('Error checking for updates:', data.error);
return;
const page = data.updates || [];
allUpdates.push(...page);
hasMore = data.has_more === true;
if (hasMore && page.length > 0) {
afterId = page[page.length - 1].id;
} else {
hasMore = false;
}
}

const updates = data.updates || [];
const changedPrIds = [];
const newPrIds = [];
const removedPrIds = [];
Expand All @@ -1942,7 +1964,7 @@ <h3 class="text-lg font-semibold text-slate-900 dark:text-slate-100 mb-2">Error<
const isFirstCheck = Object.keys(prUpdateTimestamps).length === 0;

// Detect changes and new PRs
updates.forEach(update => {
allUpdates.forEach(update => {
const oldTimestamp = prUpdateTimestamps[update.id];
if (oldTimestamp === undefined) {
// PR not seen before - it's newly added
Expand All @@ -1955,7 +1977,7 @@ <h3 class="text-lg font-semibold text-slate-900 dark:text-slate-100 mb-2">Error<

// Detect removals (PRs that were in cache but not in update)
// Use Set for O(1) lookup instead of O(n) find operation
const currentPrIds = new Set(updates.map(u => u.id));
const currentPrIds = new Set(allUpdates.map(u => u.id));
Object.keys(prUpdateTimestamps).forEach(prId => {
const prIdNum = parseInt(prId);
if (!currentPrIds.has(prIdNum)) {
Expand Down Expand Up @@ -2001,7 +2023,7 @@ <h3 class="text-lg font-semibold text-slate-900 dark:text-slate-100 mb-2">Error<
// were detected, the view may be stuck in an empty state due to a prior bug.
// Trigger a reload (skipped on the first check to avoid conflicting with
// the initial loadPrs() call that runs concurrently at page startup).
if (!isFirstCheck && allPrs.length === 0 && updates.length > 0 &&
if (!isFirstCheck && allPrs.length === 0 && allUpdates.length > 0 &&
changedPrIds.length === 0 && removedPrIds.length === 0) {
await loadPrs(true);
}
Expand Down
50 changes: 37 additions & 13 deletions src/handlers.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,10 @@
# Maximum PRs to import/discover per bulk operation to prevent timeouts on large orgs
_MAX_PRS_PER_BULK_OP = 1000

# Page size for the lightweight /api/prs/updates endpoint.
# Keeps each D1 query well within the storage-operation timeout limit.
_PR_UPDATES_PAGE_SIZE = 500


def _is_caller_scoped_token(token_info):
"""Return True when the request uses a caller-provided token."""
Expand Down Expand Up @@ -960,38 +964,58 @@ def _row_to_dict(r):
}
}), {'headers': {'Content-Type': 'application/json'}})

async def handle_pr_updates_check(env):
async def handle_pr_updates_check(env, after_id=None):
"""
GET /api/prs/updates
GET /api/prs/updates[?after_id=<int>]
Lightweight endpoint to check for PR updates.
Returns only PR IDs and their updated_at timestamps for change detection.


Results are paginated using cursor-based pagination (after_id) to keep
each D1 query within the storage-operation timeout limit. When the
response includes ``"has_more": true`` the caller should repeat the
request with ``after_id`` set to the last id in the returned list.

This allows the frontend to poll efficiently without fetching full PR data.
"""
try:
db = get_db(env)

# Fetch only IDs and timestamps - minimal data transfer
stmt = db.prepare('SELECT id, updated_at FROM prs ORDER BY id')

# Cursor-based pagination: fetch the next page starting after after_id.
# We request one extra row (LIMIT + 1) so we can reliably detect whether
# another page exists without a separate COUNT query.
fetch_limit = _PR_UPDATES_PAGE_SIZE + 1
if after_id is not None:
stmt = db.prepare(
'SELECT id, updated_at FROM prs WHERE id > ? ORDER BY id LIMIT ?'
).bind(after_id, fetch_limit)
else:
stmt = db.prepare(
'SELECT id, updated_at FROM prs ORDER BY id LIMIT ?'
).bind(fetch_limit)

result = await stmt.all()

if not result or not result.results:
return Response.new(
json.dumps({'updates': []}),
json.dumps({'updates': [], 'has_more': False}),
{'headers': {'Content-Type': 'application/json'}}
)

# Convert to lightweight format

# Convert to lightweight format; trim to the real page size
rows = list(result.results)
has_more = len(rows) > _PR_UPDATES_PAGE_SIZE
rows = rows[:_PR_UPDATES_PAGE_SIZE]

updates = []
for row in result.results:
for row in rows:
row_dict = row.to_py()
updates.append({
'id': row_dict.get('id'),
'updated_at': row_dict.get('updated_at')
})

return Response.new(
json.dumps({'updates': updates}),
json.dumps({'updates': updates, 'has_more': has_more}),
{'headers': {'Content-Type': 'application/json'}}
)
except Exception as e:
Expand Down
9 changes: 8 additions & 1 deletion src/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,14 @@ async def on_fetch(request, env):
response = None

if path == '/api/prs/updates' and request.method == 'GET':
response = await handle_pr_updates_check(env)
after_id_str = url.searchParams.get('after_id')
after_id = None
if after_id_str:
try:
after_id = int(after_id_str)
except (ValueError, TypeError):
after_id = None
response = await handle_pr_updates_check(env, after_id=after_id)
elif path == '/api/prs':
if request.method == 'GET':
repo = url.searchParams.get('repo')
Expand Down
Loading