diff --git a/.gitignore b/.gitignore
index 43e994e15c..546ab11930 100644
--- a/.gitignore
+++ b/.gitignore
@@ -174,4 +174,3 @@ OPUS_ANALYSIS_AND_IDEAS.md
/shared_docs
logs/security/
Agents.md
-packages/
diff --git a/apps/backend/agents/base.py b/apps/backend/agents/base.py
index badcb22efb..3bb914213b 100644
--- a/apps/backend/agents/base.py
+++ b/apps/backend/agents/base.py
@@ -14,9 +14,7 @@
AUTO_CONTINUE_DELAY_SECONDS = 3
HUMAN_INTERVENTION_FILE = "PAUSE"
-# Retry configuration for 400 tool concurrency errors
-MAX_CONCURRENCY_RETRIES = 5 # Maximum number of retries for tool concurrency errors
-INITIAL_RETRY_DELAY_SECONDS = (
- 2 # Initial retry delay (doubles each retry: 2s, 4s, 8s, 16s, 32s)
-)
-MAX_RETRY_DELAY_SECONDS = 32 # Cap retry delay at 32 seconds
+# Concurrency retry constants
+MAX_CONCURRENCY_RETRIES = 5
+INITIAL_RETRY_DELAY_SECONDS = 2
+MAX_RETRY_DELAY_SECONDS = 32
diff --git a/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py b/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py
index 32f0cc5226..385fa9dfae 100644
--- a/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py
+++ b/apps/backend/runners/github/services/parallel_orchestrator_reviewer.py
@@ -1116,85 +1116,6 @@ async def review(self, context: PRContext) -> PRReviewResult:
f"{len(findings)} findings from {len(agents_invoked)} agents"
)
- # Skip the old orchestrator session code - findings come from parallel specialists
- # The code below (structured output parsing, retries, etc.) is no longer needed
- # as _run_parallel_specialists handles everything
-
- # NOTE: The following block is kept but skipped via this marker
- if False: # DISABLED: Old orchestrator + Task tool approach
- # Old code for reference - to be removed after testing
- prompt = self._build_orchestrator_prompt(context)
- agent_defs = self._define_specialist_agents(project_root)
- client = self._create_sdk_client(project_root, model, thinking_budget)
-
- MAX_RETRIES = 3
- RETRY_DELAY = 2.0
-
- result_text = ""
- structured_output = None
- msg_count = 0
- last_error = None
-
- for attempt in range(MAX_RETRIES):
- if attempt > 0:
- logger.info(
- f"[ParallelOrchestrator] Retry attempt {attempt}/{MAX_RETRIES - 1} "
- f"after tool concurrency error"
- )
- safe_print(
- f"[ParallelOrchestrator] Retry {attempt}/{MAX_RETRIES - 1} "
- f"(tool concurrency error detected)"
- )
- await asyncio.sleep(RETRY_DELAY)
- client = self._create_sdk_client(
- project_root, model, thinking_budget
- )
-
- try:
- async with client:
- await client.query(prompt)
-
- safe_print(
- f"[ParallelOrchestrator] Running orchestrator ({model})...",
- flush=True,
- )
-
- stream_result = await process_sdk_stream(
- client=client,
- context_name="ParallelOrchestrator",
- model=model,
- system_prompt=prompt,
- agent_definitions=agent_defs,
- )
-
- error = stream_result.get("error")
-
- if (
- error == "tool_use_concurrency_error"
- and attempt < MAX_RETRIES - 1
- ):
- last_error = error
- continue
- if error:
- raise RuntimeError(
- f"SDK stream processing failed: {error}"
- )
- result_text = stream_result["result_text"]
- structured_output = stream_result["structured_output"]
- agents_invoked = stream_result["agents_invoked"]
- break
- except Exception as e:
- if attempt < MAX_RETRIES - 1:
- last_error = str(e)
- continue
- raise
- else:
- raise RuntimeError(
- f"Orchestrator failed after {MAX_RETRIES} attempts"
- )
-
- # END DISABLED BLOCK
-
self._report_progress(
"finalizing",
50,
diff --git a/apps/frontend/src/renderer/components/AuthStatusIndicator.tsx b/apps/frontend/src/renderer/components/AuthStatusIndicator.tsx
index 8123eadec9..502d139cfd 100644
--- a/apps/frontend/src/renderer/components/AuthStatusIndicator.tsx
+++ b/apps/frontend/src/renderer/components/AuthStatusIndicator.tsx
@@ -23,7 +23,6 @@ import {
} from './ui/tooltip';
import { useTranslation } from 'react-i18next';
import { useSettingsStore } from '../stores/settings-store';
-import { useClaudeProfileStore } from '../stores/claude-profile-store';
import { detectProvider, getProviderLabel, getProviderBadgeColor, type ApiProvider } from '../../shared/utils/provider-detection';
import { formatTimeRemaining, localizeUsageWindowLabel, hasHardcodedText } from '../../shared/utils/format-time';
import type { ClaudeUsageSnapshot } from '../../shared/types/agent';
@@ -50,13 +49,8 @@ const OAUTH_FALLBACK = {
} as const;
export function AuthStatusIndicator() {
- // Subscribe to profile state from settings store (API profiles)
+ // Subscribe to profile state from settings store
const { profiles, activeProfileId } = useSettingsStore();
-
- // Subscribe to Claude OAuth profile state
- const claudeProfiles = useClaudeProfileStore((state) => state.profiles);
- const activeClaudeProfileId = useClaudeProfileStore((state) => state.activeProfileId);
-
const { t } = useTranslation(['common']);
// Track usage data for warning badge
@@ -108,7 +102,6 @@ export function AuthStatusIndicator() {
// Compute auth status and provider detection using useMemo to avoid unnecessary re-renders
const authStatus = useMemo(() => {
- // First check if user is using API profile auth (has active API profile)
if (activeProfileId) {
const activeProfile = profiles.find(p => p.id === activeProfileId);
if (activeProfile) {
@@ -126,36 +119,12 @@ export function AuthStatusIndicator() {
badgeColor: getProviderBadgeColor(provider)
};
}
+ // Profile ID set but profile not found - fallback to OAuth
+ return OAUTH_FALLBACK;
}
-
- // No active API profile - check Claude OAuth profiles directly
- if (activeClaudeProfileId && claudeProfiles.length > 0) {
- const activeClaudeProfile = claudeProfiles.find(p => p.id === activeClaudeProfileId);
- if (activeClaudeProfile) {
- return {
- type: 'oauth' as const,
- name: activeClaudeProfile.email || activeClaudeProfile.name,
- provider: 'anthropic' as const,
- providerLabel: 'Anthropic',
- badgeColor: 'bg-orange-500/10 text-orange-500 border-orange-500/20 hover:bg-orange-500/15'
- };
- }
- }
-
- // Fallback to usage data if Claude profiles aren't loaded yet
- if (usage && (usage.profileName || usage.profileEmail)) {
- return {
- type: 'oauth' as const,
- name: usage.profileEmail || usage.profileName,
- provider: 'anthropic' as const,
- providerLabel: 'Anthropic',
- badgeColor: 'bg-orange-500/10 text-orange-500 border-orange-500/20 hover:bg-orange-500/15'
- };
- }
-
- // No auth info available - fallback to generic OAuth
+ // No active profile - using OAuth
return OAUTH_FALLBACK;
- }, [activeProfileId, profiles, activeClaudeProfileId, claudeProfiles, usage]);
+ }, [activeProfileId, profiles]);
// Helper function to truncate ID for display
const truncateId = (id: string): string => {
@@ -305,22 +274,6 @@ export function AuthStatusIndicator() {
>
)}
-
- {/* Account details for OAuth profiles */}
- {isOAuth && authStatus.name && authStatus.name !== 'OAuth' && (
- <>
-
- {/* Account name/email with icon */}
-
-
-
- {t('common:usage.account')}
-
-
{authStatus.name}
-
-
- >
- )}
diff --git a/apps/frontend/src/renderer/components/KanbanBoard.tsx b/apps/frontend/src/renderer/components/KanbanBoard.tsx
index a682a8ba74..3912d244bf 100644
--- a/apps/frontend/src/renderer/components/KanbanBoard.tsx
+++ b/apps/frontend/src/renderer/components/KanbanBoard.tsx
@@ -28,7 +28,6 @@ import { TaskCard } from './TaskCard';
import { SortableTaskCard } from './SortableTaskCard';
import { QueueSettingsModal } from './QueueSettingsModal';
import { TASK_STATUS_COLUMNS, TASK_STATUS_LABELS } from '../../shared/constants';
-import { debugLog } from '../../shared/utils/debug-logger';
import { cn } from '../lib/utils';
import { persistTaskStatus, forceCompleteTask, archiveTasks, deleteTasks, useTaskStore } from '../stores/task-store';
import { updateProjectSettings, useProjectStore } from '../stores/project-store';
@@ -1068,74 +1067,29 @@ export function KanbanBoard({ tasks, onTaskClick, onNewTaskClick, onRefresh, isR
isProcessingQueueRef.current = true;
try {
- // Track tasks we've already processed in this call to prevent duplicates
- // This is critical because store updates happen synchronously but we need to ensure
- // we never process the same task twice, even if there are timing issues
- const processedTaskIds = new Set();
+ // Track tasks we've already attempted to promote (to avoid infinite retries)
+ const attemptedTaskIds = new Set();
let consecutiveFailures = 0;
const MAX_CONSECUTIVE_FAILURES = 10; // Safety limit to prevent infinite loop
- // Track promotions in this call to enforce max parallel tasks limit
- let promotedInThisCall = 0;
-
- // Log initial state
- const initialTasks = useTaskStore.getState().tasks;
- const initialInProgress = initialTasks.filter((t) => t.status === 'in_progress' && !t.metadata?.archivedAt);
- const initialQueued = initialTasks.filter((t) => t.status === 'queue' && !t.metadata?.archivedAt);
- debugLog(`[Queue] === PROCESS QUEUE START ===`, {
- maxParallelTasks,
- initialInProgressCount: initialInProgress.length,
- initialInProgressIds: initialInProgress.map(t => t.id),
- initialQueuedCount: initialQueued.length,
- initialQueuedIds: initialQueued.map(t => t.id),
- projectId
- });
-
// Loop until capacity is full or queue is empty
- let iteration = 0;
while (true) {
- iteration++;
- // Calculate total in-progress count: tasks that were already in progress + tasks promoted in this call
- const totalInProgressCount = initialInProgress.length + promotedInThisCall;
-
- debugLog(`[Queue] --- Iteration ${iteration} ---`, {
- initialInProgressCount: initialInProgress.length,
- promotedInThisCall,
- totalInProgressCount,
- capacityCheck: totalInProgressCount >= maxParallelTasks,
- processedCount: processedTaskIds.size
- });
-
- // Stop if no capacity (initial in-progress + promoted in this call)
- if (totalInProgressCount >= maxParallelTasks) {
- debugLog(`[Queue] Capacity reached (${totalInProgressCount}/${maxParallelTasks}), stopping queue processing`);
- break;
- }
-
- // Get CURRENT state from store to find queued tasks
- const latestTasks = useTaskStore.getState().tasks;
- const latestInProgress = latestTasks.filter((t) => t.status === 'in_progress' && !t.metadata?.archivedAt);
- const queuedTasks = latestTasks.filter((t) =>
- t.status === 'queue' && !t.metadata?.archivedAt && !processedTaskIds.has(t.id)
+ // Get CURRENT state from store to ensure accuracy
+ const currentTasks = useTaskStore.getState().tasks;
+ const inProgressCount = currentTasks.filter((t) =>
+ t.status === 'in_progress' && !t.metadata?.archivedAt
+ ).length;
+ const queuedTasks = currentTasks.filter((t) =>
+ t.status === 'queue' && !t.metadata?.archivedAt && !attemptedTaskIds.has(t.id)
);
- debugLog(`[Queue] Current store state:`, {
- totalTasks: latestTasks.length,
- inProgressCount: latestInProgress.length,
- inProgressIds: latestInProgress.map(t => t.id),
- queuedCount: queuedTasks.length,
- queuedIds: queuedTasks.map(t => t.id),
- processedIds: Array.from(processedTaskIds)
- });
-
- // Stop if no queued tasks or too many consecutive failures
- if (queuedTasks.length === 0) {
- debugLog('[Queue] No more queued tasks to process');
+ // Stop if no capacity, no queued tasks, or too many consecutive failures
+ if (inProgressCount >= maxParallelTasks || queuedTasks.length === 0) {
break;
}
if (consecutiveFailures >= MAX_CONSECUTIVE_FAILURES) {
- debugLog(`[Queue] Stopping queue processing after ${MAX_CONSECUTIVE_FAILURES} consecutive failures`);
+ console.warn(`[Queue] Stopping queue processing after ${MAX_CONSECUTIVE_FAILURES} consecutive failures`);
break;
}
@@ -1146,62 +1100,28 @@ export function KanbanBoard({ tasks, onTaskClick, onNewTaskClick, onRefresh, isR
return dateA - dateB; // Ascending order (oldest first)
})[0];
- debugLog(`[Queue] Selected task for promotion:`, {
- id: nextTask.id,
- currentStatus: nextTask.status,
- title: nextTask.title?.substring(0, 50)
- });
-
- // Mark task as processed BEFORE attempting promotion to prevent duplicates
- processedTaskIds.add(nextTask.id);
-
- debugLog(`[Queue] Promoting task ${nextTask.id} (${promotedInThisCall + 1}/${maxParallelTasks})`);
+ console.log(`[Queue] Auto-promoting task ${nextTask.id} from Queue to In Progress (${inProgressCount + 1}/${maxParallelTasks})`);
const result = await persistTaskStatus(nextTask.id, 'in_progress');
- // Check store state after promotion
- const afterPromoteTasks = useTaskStore.getState().tasks;
- const afterPromoteInProgress = afterPromoteTasks.filter((t) => t.status === 'in_progress' && !t.metadata?.archivedAt);
- const afterPromoteQueued = afterPromoteTasks.filter((t) => t.status === 'queue' && !t.metadata?.archivedAt);
-
- debugLog(`[Queue] After promotion attempt:`, {
- resultSuccess: result.success,
- promotedInThisCall,
- inProgressCount: afterPromoteInProgress.length,
- inProgressIds: afterPromoteInProgress.map(t => t.id),
- queuedCount: afterPromoteQueued.length,
- queuedIds: afterPromoteQueued.map(t => t.id)
- });
-
if (result.success) {
- // Increment our local promotion counter
- promotedInThisCall++;
// Reset consecutive failures on success
consecutiveFailures = 0;
} else {
- // If promotion failed, log error and continue to next task
+ // If promotion failed, log error, mark as attempted, and skip to next task
console.error(`[Queue] Failed to promote task ${nextTask.id} to In Progress:`, result.error);
+ attemptedTaskIds.add(nextTask.id);
consecutiveFailures++;
}
}
- // Log summary
- debugLog(`[Queue] === PROCESS QUEUE COMPLETE ===`, {
- totalIterations: iteration,
- tasksProcessed: processedTaskIds.size,
- tasksPromoted: promotedInThisCall,
- processedIds: Array.from(processedTaskIds)
- });
-
- // Trigger UI refresh if tasks were promoted to ensure UI reflects all changes
- // This handles the case where store updates are batched/delayed via IPC events
- if (promotedInThisCall > 0 && onRefresh) {
- debugLog('[Queue] Triggering UI refresh after queue promotion');
- onRefresh();
+ // Log if we had failed tasks
+ if (attemptedTaskIds.size > 0) {
+ console.warn(`[Queue] Skipped ${attemptedTaskIds.size} task(s) that failed to promote`);
}
} finally {
isProcessingQueueRef.current = false;
}
- }, [maxParallelTasks, projectId, onRefresh]);
+ }, [maxParallelTasks]);
// Register task status change listener for queue auto-promotion
// This ensures processQueue() is called whenever a task leaves in_progress
@@ -1210,7 +1130,7 @@ export function KanbanBoard({ tasks, onTaskClick, onNewTaskClick, onRefresh, isR
(taskId, oldStatus, newStatus) => {
// When a task leaves in_progress (e.g., goes to human_review), process the queue
if (oldStatus === 'in_progress' && newStatus !== 'in_progress') {
- debugLog(`[Queue] Task ${taskId} left in_progress, processing queue to fill slot`);
+ console.log(`[Queue] Task ${taskId} left in_progress, processing queue to fill slot`);
processQueue();
}
}
diff --git a/apps/frontend/src/renderer/components/task-detail/TaskMetadata.tsx b/apps/frontend/src/renderer/components/task-detail/TaskMetadata.tsx
index c94a772ebd..e1b3ff8c24 100644
--- a/apps/frontend/src/renderer/components/task-detail/TaskMetadata.tsx
+++ b/apps/frontend/src/renderer/components/task-detail/TaskMetadata.tsx
@@ -1,3 +1,4 @@
+import { useState, useRef, useLayoutEffect, useId } from 'react';
import { useTranslation } from 'react-i18next';
import {
Target,
@@ -13,11 +14,14 @@ import {
GitPullRequest,
ListChecks,
Clock,
- ExternalLink
+ ExternalLink,
+ ChevronDown,
+ ChevronUp
} from 'lucide-react';
import ReactMarkdown from 'react-markdown';
import remarkGfm from 'remark-gfm';
import { Badge } from '../ui/badge';
+import { Button } from '../ui/button';
import { Tooltip, TooltipContent, TooltipTrigger } from '../ui/tooltip';
import { cn, formatRelativeTime } from '../../lib/utils';
import {
@@ -51,8 +55,15 @@ interface TaskMetadataProps {
task: Task;
}
+// Height threshold for collapsing long descriptions (~8 lines)
+const COLLAPSED_HEIGHT = 200;
+
export function TaskMetadata({ task }: TaskMetadataProps) {
const { t } = useTranslation(['tasks', 'errors']);
+ const [isExpanded, setIsExpanded] = useState(false);
+ const [hasOverflow, setHasOverflow] = useState(false);
+ const contentRef = useRef(null);
+ const contentId = useId();
// Handle JSON error description with i18n
const displayDescription = (() => {
@@ -64,6 +75,19 @@ export function TaskMetadata({ task }: TaskMetadataProps) {
return task.description;
})();
+ // Detect if content overflows the collapsed height
+ // Re-check when description changes (content height depends on rendered description)
+ // Reset expand state when switching tasks to avoid stale expanded state
+ // biome-ignore lint/correctness/useExhaustiveDependencies: task.description triggers re-render which changes content height
+ useLayoutEffect(() => {
+ setIsExpanded(false);
+ const element = contentRef.current;
+ if (element) {
+ const hasContentOverflow = element.scrollHeight > COLLAPSED_HEIGHT;
+ setHasOverflow(hasContentOverflow);
+ }
+ }, [task.id, task.description]);
+
const hasClassification = task.metadata && (
task.metadata.category ||
task.metadata.priority ||
@@ -155,14 +179,53 @@ export function TaskMetadata({ task }: TaskMetadataProps) {
{/* Description - Primary Content */}
{displayDescription && (
-
-
- {displayDescription}
-
+ {/* Content container with conditional max-height */}
+
+
+
+ {displayDescription}
+
+
+
+ {/* Gradient overlay when collapsed and has overflow */}
+ {!isExpanded && hasOverflow && (
+
+ )}
+
+ {/* Expand/Collapse button */}
+ {hasOverflow && (
+
+
+
+ )}
)}
diff --git a/apps/frontend/src/renderer/stores/task-store.ts b/apps/frontend/src/renderer/stores/task-store.ts
index 57dea5c912..9743f124ef 100644
--- a/apps/frontend/src/renderer/stores/task-store.ts
+++ b/apps/frontend/src/renderer/stores/task-store.ts
@@ -241,7 +241,7 @@ export const useTaskStore = create
((set, get) => ({
const state = get();
const index = findTaskIndex(state.tasks, taskId);
if (index === -1) {
- console.warn('[updateTaskStatus] Task not found:', taskId);
+ debugLog('[updateTaskStatus] Task not found:', taskId);
return;
}
const oldTask = state.tasks[index];
@@ -262,29 +262,38 @@ export const useTaskStore = create((set, get) => ({
// Perform the state update
set((state) => {
- const updatedTasks = updateTaskAtIndex(state.tasks, index, (t) => {
- // Determine execution progress based on status transition
- let executionProgress = t.executionProgress;
-
- if (status === 'backlog') {
- // When status goes to backlog, reset execution progress to idle
- // This ensures the planning/coding animation stops when task is stopped
- executionProgress = { phase: 'idle' as ExecutionPhase, phaseProgress: 0, overallProgress: 0 };
- } else if (status === 'in_progress' && !t.executionProgress?.phase) {
- // When starting a task and no phase is set yet, default to planning
- // This prevents the "no active phase" UI state during startup race condition
- executionProgress = { phase: 'planning' as ExecutionPhase, phaseProgress: 0, overallProgress: 0 };
- }
-
- return { ...t, status, reviewReason, executionProgress, updatedAt: new Date() };
- });
+ return {
+ tasks: updateTaskAtIndex(state.tasks, index, (t) => {
+ // Determine execution progress based on status transition
+ let executionProgress = t.executionProgress;
+
+ // Track status transition for debugging flip-flop issues
+ const previousStatus = t.status;
+ const statusChanged = previousStatus !== status;
+
+ if (status === 'backlog') {
+ // When status goes to backlog, reset execution progress to idle
+ // This ensures the planning/coding animation stops when task is stopped
+ executionProgress = { phase: 'idle' as ExecutionPhase, phaseProgress: 0, overallProgress: 0 };
+ } else if (status === 'in_progress' && !t.executionProgress?.phase) {
+ // When starting a task and no phase is set yet, default to planning
+ // This prevents the "no active phase" UI state during startup race condition
+ executionProgress = { phase: 'planning' as ExecutionPhase, phaseProgress: 0, overallProgress: 0 };
+ }
- debugLog('[updateTaskStatus] AFTER set():', {
- taskId,
- allInProgress: updatedTasks.filter((t: Task) => t.status === 'in_progress' && !t.metadata?.archivedAt).map(t => t.id)
- });
+ // Log status transitions to help diagnose flip-flop issues
+ debugLog('[updateTaskStatus] Status transition:', {
+ taskId,
+ previousStatus,
+ newStatus: status,
+ statusChanged,
+ currentPhase: t.executionProgress?.phase,
+ newPhase: executionProgress?.phase
+ });
- return { tasks: updatedTasks };
+ return { ...t, status, reviewReason, executionProgress, updatedAt: new Date() };
+ })
+ };
});
// Notify listeners after state update (schedule after current tick)
@@ -723,17 +732,7 @@ export async function persistTaskStatus(
}
// Only update local state after backend confirms success
- debugLog(`[persistTaskStatus] BEFORE store.updateTaskStatus:`, {
- taskId,
- newStatus: status,
- currentStoreStatus: store.tasks.find(t => t.id === taskId)?.status
- });
store.updateTaskStatus(taskId, status);
- debugLog(`[persistTaskStatus] AFTER store.updateTaskStatus:`, {
- taskId,
- updatedStoreStatus: store.tasks.find(t => t.id === taskId)?.status,
- allInProgress: store.tasks.filter(t => t.status === 'in_progress' && !t.metadata?.archivedAt).map(t => t.id)
- });
return { success: true };
} catch (error) {
console.error('Error persisting task status:', error);
diff --git a/apps/frontend/src/shared/i18n/locales/en/common.json b/apps/frontend/src/shared/i18n/locales/en/common.json
index 3187d29ba5..b97b1f452b 100644
--- a/apps/frontend/src/shared/i18n/locales/en/common.json
+++ b/apps/frontend/src/shared/i18n/locales/en/common.json
@@ -478,8 +478,7 @@
"clickToOpenSettings": "Click to open Settings →",
"sessionShort": "5-hour session usage",
"weeklyShort": "7-day weekly usage",
- "swap": "Swap",
- "account": "Account"
+ "swap": "Swap"
},
"oauth": {
"enterCode": "Manual Code Entry (Fallback)",
diff --git a/apps/frontend/src/shared/i18n/locales/en/tasks.json b/apps/frontend/src/shared/i18n/locales/en/tasks.json
index 24cc90f7ae..eaecc5c505 100644
--- a/apps/frontend/src/shared/i18n/locales/en/tasks.json
+++ b/apps/frontend/src/shared/i18n/locales/en/tasks.json
@@ -174,7 +174,9 @@
},
"metadata": {
"severity": "severity",
- "pullRequest": "Pull Request"
+ "pullRequest": "Pull Request",
+ "showMore": "Show more",
+ "showLess": "Show less"
},
"images": {
"removeImageAriaLabel": "Remove image {{filename}}",
diff --git a/apps/frontend/src/shared/i18n/locales/fr/common.json b/apps/frontend/src/shared/i18n/locales/fr/common.json
index f5cc8f6183..f441245134 100644
--- a/apps/frontend/src/shared/i18n/locales/fr/common.json
+++ b/apps/frontend/src/shared/i18n/locales/fr/common.json
@@ -478,8 +478,7 @@
"clickToOpenSettings": "Cliquez pour ouvrir les Paramètres →",
"sessionShort": "Utilisation session 5 heures",
"weeklyShort": "Utilisation hebdomadaire 7 jours",
- "swap": "Changer",
- "account": "Compte"
+ "swap": "Changer"
},
"oauth": {
"enterCode": "Saisie manuelle du code (secours)",
diff --git a/apps/frontend/src/shared/i18n/locales/fr/tasks.json b/apps/frontend/src/shared/i18n/locales/fr/tasks.json
index 3ff3c5e8e7..b778896114 100644
--- a/apps/frontend/src/shared/i18n/locales/fr/tasks.json
+++ b/apps/frontend/src/shared/i18n/locales/fr/tasks.json
@@ -173,7 +173,9 @@
},
"metadata": {
"severity": "sévérité",
- "pullRequest": "Pull Request"
+ "pullRequest": "Pull Request",
+ "showMore": "Afficher plus",
+ "showLess": "Afficher moins"
},
"images": {
"removeImageAriaLabel": "Supprimer l'image {{filename}}",
diff --git a/tests/test_auth.py b/tests/test_auth.py
index d022202ad2..33faf03d05 100644
--- a/tests/test_auth.py
+++ b/tests/test_auth.py
@@ -465,7 +465,7 @@ def test_does_nothing_when_no_token_available(self, monkeypatch):
"""Doesn't set env var when no auth token is available."""
monkeypatch.setattr(platform, "system", lambda: "Linux")
# Ensure keychain returns None
- monkeypatch.setattr("core.auth.get_token_from_keychain", lambda config_dir=None: None)
+ monkeypatch.setattr("core.auth.get_token_from_keychain", lambda _config_dir=None: None)
ensure_claude_code_oauth_token()
@@ -649,7 +649,7 @@ def test_get_auth_token_decrypts_encrypted_env_token(self, monkeypatch):
from unittest.mock import patch
monkeypatch.setenv("CLAUDE_CODE_OAUTH_TOKEN", "enc:testtoken123456789")
- monkeypatch.setattr("core.auth.get_token_from_keychain", lambda config_dir=None: None)
+ monkeypatch.setattr("core.auth.get_token_from_keychain", lambda _config_dir=None: None)
with patch("core.auth.decrypt_token") as mock_decrypt:
# Simulate decryption failure
@@ -672,7 +672,7 @@ def test_get_auth_token_returns_decrypted_token_on_success(self, monkeypatch):
decrypted_token = "sk-ant-oat01-decrypted-token"
monkeypatch.setenv("CLAUDE_CODE_OAUTH_TOKEN", encrypted_token)
- monkeypatch.setattr("core.auth.get_token_from_keychain", lambda config_dir=None: None)
+ monkeypatch.setattr("core.auth.get_token_from_keychain", lambda _config_dir=None: None)
with patch("core.auth.decrypt_token") as mock_decrypt:
mock_decrypt.return_value = decrypted_token
@@ -690,7 +690,7 @@ def test_backward_compatibility_plaintext_tokens(self, monkeypatch):
"""Verify plaintext tokens continue to work unchanged."""
token = "sk-ant-oat01-test"
monkeypatch.setenv("CLAUDE_CODE_OAUTH_TOKEN", token)
- monkeypatch.setattr("core.auth.get_token_from_keychain", lambda config_dir=None: None)
+ monkeypatch.setattr("core.auth.get_token_from_keychain", lambda _config_dir=None: None)
from core.auth import get_auth_token
@@ -993,7 +993,7 @@ def test_keychain_encrypted_token_decryption_attempted(self, monkeypatch):
encrypted_token = "enc:keychaintoken1234"
monkeypatch.setattr(
- "core.auth.get_token_from_keychain", lambda config_dir=None: encrypted_token
+ "core.auth.get_token_from_keychain", lambda _config_dir=None: encrypted_token
)
with patch("core.auth.decrypt_token") as mock_decrypt:
@@ -1015,7 +1015,7 @@ def test_keychain_encrypted_token_decryption_success(self, monkeypatch):
decrypted_token = "sk-ant-oat01-from-keychain"
monkeypatch.setattr(
- "core.auth.get_token_from_keychain", lambda config_dir=None: encrypted_token
+ "core.auth.get_token_from_keychain", lambda _config_dir=None: encrypted_token
)
with patch("core.auth.decrypt_token") as mock_decrypt:
@@ -1034,7 +1034,7 @@ def test_plaintext_keychain_token_not_decrypted(self, monkeypatch):
plaintext_token = "sk-ant-oat01-keychain-plaintext"
monkeypatch.setattr(
- "core.auth.get_token_from_keychain", lambda config_dir=None: plaintext_token
+ "core.auth.get_token_from_keychain", lambda _config_dir=None: plaintext_token
)
with patch("core.auth.decrypt_token") as mock_decrypt:
@@ -1052,7 +1052,7 @@ def test_env_var_takes_precedence_over_keychain(self, monkeypatch):
monkeypatch.setenv("CLAUDE_CODE_OAUTH_TOKEN", env_token)
monkeypatch.setattr(
- "core.auth.get_token_from_keychain", lambda config_dir=None: keychain_token
+ "core.auth.get_token_from_keychain", lambda _config_dir=None: keychain_token
)
from core.auth import get_auth_token
@@ -1070,7 +1070,7 @@ def test_encrypted_env_var_precedence_over_plaintext_keychain(self, monkeypatch)
monkeypatch.setenv("CLAUDE_CODE_OAUTH_TOKEN", encrypted_env)
monkeypatch.setattr(
- "core.auth.get_token_from_keychain", lambda config_dir=None: keychain_token
+ "core.auth.get_token_from_keychain", lambda _config_dir=None: keychain_token
)
with patch("core.auth.decrypt_token") as mock_decrypt:
diff --git a/tests/test_integration_phase4.py b/tests/test_integration_phase4.py
index 060e9ee9ba..ba6d15dd4c 100644
--- a/tests/test_integration_phase4.py
+++ b/tests/test_integration_phase4.py
@@ -106,6 +106,9 @@
_original_modules = {name: sys.modules.get(name) for name in _modules_to_mock}
for name in _modules_to_mock:
sys.modules[name] = MagicMock()
+# IMPORTANT: Register the module in sys.modules BEFORE exec_module
+# This is required for dataclass decorators to find the module by name
+sys.modules["parallel_orchestrator_reviewer"] = orchestrator_module
orchestrator_spec.loader.exec_module(orchestrator_module)
# Restore all mocked modules to avoid polluting other tests
for name in _modules_to_mock: