From f1eb891391a52c2d8165832d88422ace7972da5b Mon Sep 17 00:00:00 2001 From: POM Date: Sat, 14 Mar 2026 12:35:27 +0100 Subject: [PATCH 01/43] fix: strip image blocks from release notes on website The release notes contain a mascot image that renders as a broken or unwanted image on the website. Strip HTML

blocks from release body before rendering. Co-Authored-By: Claude Opus 4.6 (1M context) --- website/main.js | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/website/main.js b/website/main.js index daba58885..6a9ecafa8 100644 --- a/website/main.js +++ b/website/main.js @@ -158,8 +158,10 @@ async function loadReleases() { } container.innerHTML = releases.map(r => { - const fullHtml = renderMarkdownLight(r.body || 'No release notes.'); - const preview = getFirstParagraph(r.body || ''); + // Strip HTML image blocks (mascot etc.) from release notes + const body = (r.body || 'No release notes.').replace(/]*>[\s\S]*?]*>[\s\S]*?<\/p>/gi, '').trim(); + const fullHtml = renderMarkdownLight(body); + const preview = getFirstParagraph(body); const previewHtml = renderMarkdownLight(preview); const hasMore = (r.body || '').trim().length > preview.length + 10; From 45da83da7433679e0d88355fb98c1bf03c1cf6d8 Mon Sep 17 00:00:00 2001 From: POM Date: Sun, 15 Mar 2026 22:42:37 +0100 Subject: [PATCH 02/43] fix: derive project root from state_file path in do_import_run follow-up scan When running `desloppify review --import-run --scan-after-import`, the follow-up scan was using _runtime_project_root() which could return a contaminated path (pointing to the results directory instead of the actual project root). This caused state to be written to the wrong location. Instead, derive the project root from the state_file parameter which is known to be correct: state_file.parent.parent gives us the project root from `/.desloppify/state-.json`. Co-Authored-By: Claude Opus 4.6 (1M context) --- desloppify/app/commands/review/batch/orchestrator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/desloppify/app/commands/review/batch/orchestrator.py b/desloppify/app/commands/review/batch/orchestrator.py index 0d6ea09ca..1902a7005 100644 --- a/desloppify/app/commands/review/batch/orchestrator.py +++ b/desloppify/app/commands/review/batch/orchestrator.py @@ -582,7 +582,7 @@ def do_import_run( lang_name=lang_name, scan_path=scan_path, deps=FollowupScanDeps( - project_root=_runtime_project_root(), + project_root=Path(state_file).parent.parent, timeout_seconds=FOLLOWUP_SCAN_TIMEOUT_SECONDS, python_executable=sys.executable, subprocess_run=subprocess.run, From 82cf0382afa29be2f4e617e1a56969ae22e35f8c Mon Sep 17 00:00:00 2001 From: POM Date: Sun, 15 Mar 2026 22:50:52 +0100 Subject: [PATCH 03/43] fix: preserve plan_start_scores during force-rescan to protect manual clusters _reset_cycle_for_force_rescan() was clearing plan_start_scores, which made is_mid_cycle() return False. This caused auto_cluster_issues() to run full cluster regeneration instead of early-returning, wiping manual cluster items via issue ID reconciliation in scan_issue_reconcile.py. The fix stops clearing plan_start_scores so is_mid_cycle() remains True during force-rescan, preserving manual cluster data. Co-Authored-By: Claude Opus 4.6 (1M context) --- .../app/commands/scan/plan_reconcile.py | 23 +++++++++++-------- ...plan_reconcile_postflight_and_reconcile.py | 16 +++++++++++++ 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/desloppify/app/commands/scan/plan_reconcile.py b/desloppify/app/commands/scan/plan_reconcile.py index 95fbd1257..4955ce629 100644 --- a/desloppify/app/commands/scan/plan_reconcile.py +++ b/desloppify/app/commands/scan/plan_reconcile.py @@ -9,6 +9,7 @@ from desloppify.base.exception_sets import PLAN_LOAD_EXCEPTIONS from desloppify.base.output.fallbacks import log_best_effort_failure from desloppify.base.output.terminal import colorize +from desloppify.app.commands.helpers.transition_messages import emit_transition_message from desloppify.base.config import target_strict_score_from_config from desloppify.engine._plan.constants import ( WORKFLOW_COMMUNICATE_SCORE_ID, @@ -34,16 +35,19 @@ def _reset_cycle_for_force_rescan(plan: dict[str, object]) -> bool: - """Clear all cycle state when --force-rescan is used.""" + """Clear synthetic queue items when --force-rescan is used. + + Preserves ``plan_start_scores`` so that ``is_mid_cycle()`` still + returns True — this prevents ``auto_cluster_issues()`` from running + full cluster regeneration, which would wipe manual cluster items. + """ order: list[str] = plan.get("queue_order", []) synthetic = [item for item in order if is_synthetic_id(item)] - if not synthetic and not plan.get("plan_start_scores"): + if not synthetic: return False for item in synthetic: order.remove(item) - plan["plan_start_scores"] = {} clear_score_communicated_sentinel(plan) - plan.pop("scan_count_at_plan_start", None) meta = plan.get("epic_triage_meta", {}) if isinstance(meta, dict): meta.pop("triage_recommended", None) @@ -295,13 +299,14 @@ def reconcile_plan_post_scan(runtime: Any) -> None: plan, mid_cycle=_is_mid_cycle_scan(plan, runtime.state) or force_rescan, ) + if result.lifecycle_phase_changed: + emit_transition_message(result.lifecycle_phase) dirty = result.dirty or dirty - if not force_rescan: - if _sync_plan_start_scores_and_log(plan, runtime.state): - dirty = True - if _sync_postflight_scan_completion_and_log(plan, runtime.state): - dirty = True + if not force_rescan and _sync_plan_start_scores_and_log(plan, runtime.state): + dirty = True + if _sync_postflight_scan_completion_and_log(plan, runtime.state): + dirty = True if dirty: try: diff --git a/desloppify/tests/commands/scan/test_plan_reconcile_postflight_and_reconcile.py b/desloppify/tests/commands/scan/test_plan_reconcile_postflight_and_reconcile.py index e5dd2e256..9960a3813 100644 --- a/desloppify/tests/commands/scan/test_plan_reconcile_postflight_and_reconcile.py +++ b/desloppify/tests/commands/scan/test_plan_reconcile_postflight_and_reconcile.py @@ -215,6 +215,22 @@ def test_marks_postflight_scan_on_empty_plan(self, monkeypatch): assert isinstance(saved[0]["plan_start_scores"].get("strict"), float) assert saved[0]["refresh_state"]["postflight_scan_completed_at_scan_count"] == 1 + def test_force_rescan_marks_postflight_scan_complete(self, monkeypatch): + plan = empty_plan() + plan["queue_order"] = ["workflow::run-scan"] + plan["plan_start_scores"] = {"strict": 86.4} + state = _make_state(scan_count=5) + + saved: list[dict] = [] + monkeypatch.setattr(reconcile_mod, "load_plan", lambda _path=None: plan) + monkeypatch.setattr(reconcile_mod, "save_plan", lambda p, _path=None: saved.append(p)) + + reconcile_mod.reconcile_plan_post_scan(_runtime(state=state, force_rescan=True)) + + assert len(saved) == 1 + assert saved[0]["plan_start_scores"] == {"strict": 86.4} + assert saved[0]["refresh_state"]["postflight_scan_completed_at_scan_count"] == 5 + def test_superseded_issue_removed_from_clusters(self, monkeypatch): plan = empty_plan() plan["queue_order"] = ["issue-1", "issue-2"] From 10890e65c44d5a4d8c77e8ca5c6dafa7aff727f5 Mon Sep 17 00:00:00 2001 From: POM Date: Sun, 15 Mar 2026 23:05:23 +0100 Subject: [PATCH 04/43] feat: require explicit triage decisions for auto-clusters Auto-clusters (auto/unused, auto/security, etc.) were silently left in backlog because the triage prompt said "silence means leave in backlog" and the output schema had no field for auto-cluster decisions. Now the triager must make an explicit promote/skip/break_up decision for each auto-cluster, and apply_triage_to_plan() processes those decisions. Co-Authored-By: Claude Opus 4.6 (1M context) --- desloppify/engine/_plan/triage/apply.py | 100 +++++++++++++++++- desloppify/engine/_plan/triage/prompt.py | 34 +++++- .../plan/test_epic_triage_prompt_direct.py | 2 +- 3 files changed, 129 insertions(+), 7 deletions(-) diff --git a/desloppify/engine/_plan/triage/apply.py b/desloppify/engine/_plan/triage/apply.py index 0f8177878..2b2004dbc 100644 --- a/desloppify/engine/_plan/triage/apply.py +++ b/desloppify/engine/_plan/triage/apply.py @@ -17,7 +17,7 @@ from desloppify.engine._state.schema import StateModel, ensure_state_defaults, utc_now from .dismiss import dismiss_triage_issues -from .prompt import TriageResult +from .prompt import AutoClusterDecision, TriageResult @dataclass @@ -32,6 +32,9 @@ class TriageMutationResult: strategy_summary: str = "" triage_version: int = 0 dry_run: bool = False + auto_clusters_promoted: int = 0 + auto_clusters_skipped: int = 0 + auto_clusters_broken_up: int = 0 @property def clusters_created(self) -> int: @@ -189,6 +192,90 @@ def _set_triage_meta( } +def _apply_auto_cluster_decisions( + *, + plan: PlanModel, + decisions: list[AutoClusterDecision], + order: list[str], + now: str, + version: int, + result: TriageMutationResult, +) -> None: + """Process auto_cluster_decisions from the triage result. + + - promote: add cluster issue IDs to queue_order + - skip: mark the cluster as skipped in the plan + - break_up: record the decision for downstream processing + """ + clusters = plan["clusters"] + + for decision in decisions: + cluster_name = decision.cluster + cluster = clusters.get(cluster_name) + if cluster is None: + continue + + action = decision.action + + if action == "promote": + issue_ids = cluster.get("issue_ids", []) + existing_in_order = set(order) + new_ids = [ + fid for fid in issue_ids + if isinstance(fid, str) and fid not in existing_in_order + ] + # Determine insertion position based on priority hint + priority = (decision.priority or "").lower().strip() + if priority == "first": + for i, fid in enumerate(new_ids): + order.insert(i, fid) + elif priority.startswith("after "): + target = priority[len("after "):] + insert_idx = len(order) + for idx, item in enumerate(order): + if target in item: + insert_idx = idx + 1 + break + for i, fid in enumerate(new_ids): + order.insert(insert_idx + i, fid) + elif priority.startswith("before "): + target = priority[len("before "):] + insert_idx = len(order) + for idx, item in enumerate(order): + if target in item: + insert_idx = idx + break + for i, fid in enumerate(new_ids): + order.insert(insert_idx + i, fid) + else: + # "last" or unrecognized: append to end + order.extend(new_ids) + + cluster["execution_status"] = EXECUTION_STATUS_ACTIVE + cluster["updated_at"] = now + cluster["triage_version"] = version + result.auto_clusters_promoted += 1 + + elif action == "skip": + cluster["triage_skip"] = { + "reason": decision.reason, + "skipped_at": now, + "triage_version": version, + } + cluster["updated_at"] = now + result.auto_clusters_skipped += 1 + + elif action == "break_up": + cluster["triage_break_up"] = { + "reason": decision.reason, + "sub_clusters": decision.sub_clusters, + "decided_at": now, + "triage_version": version, + } + cluster["updated_at"] = now + result.auto_clusters_broken_up += 1 + + def apply_triage_to_plan( plan: PlanModel, state: StateModel, @@ -250,6 +337,17 @@ def apply_triage_to_plan( dismissed_ids=dismissed_ids, ) + # Process auto-cluster decisions (backward-compatible: no-op if empty) + if triage.auto_cluster_decisions: + _apply_auto_cluster_decisions( + plan=plan, + decisions=triage.auto_cluster_decisions, + order=order, + now=now, + version=version, + result=result, + ) + _set_triage_meta( plan=plan, state=state, diff --git a/desloppify/engine/_plan/triage/prompt.py b/desloppify/engine/_plan/triage/prompt.py index 0284df187..8a41cc06b 100644 --- a/desloppify/engine/_plan/triage/prompt.py +++ b/desloppify/engine/_plan/triage/prompt.py @@ -106,6 +106,16 @@ class ContradictionNote: dismissed: str reason: str +@dataclass +class AutoClusterDecision: + """A triage decision for an auto-cluster.""" + + cluster: str + action: str # "promote", "skip", or "break_up" + reason: str = "" + priority: str = "" # e.g. "after dead-code-fixes", "last", "first" + sub_clusters: list[str] = field(default_factory=list) # for break_up action + @dataclass class TriageResult: """Parsed and validated LLM triage output.""" @@ -115,6 +125,7 @@ class TriageResult: dismissed_issues: list[DismissedIssue] = field(default_factory=list) contradiction_notes: list[ContradictionNote] = field(default_factory=list) priority_rationale: str = "" + auto_cluster_decisions: list[AutoClusterDecision] = field(default_factory=list) @property def clusters(self) -> list[dict]: @@ -285,7 +296,12 @@ def collect_triage_input(plan: PlanModel, state: StateModel) -> TriageInput: "contradiction_notes": [ {"kept": "issue_id", "dismissed": "issue_id", "reason": "why"} ], - "priority_rationale": "why the dependency_order is what it is" + "priority_rationale": "why the dependency_order is what it is", + "auto_cluster_decisions": [ + {"cluster": "auto/security", "action": "promote", "priority": "first", "reason": "high-value security fixes"}, + {"cluster": "auto/unused", "action": "skip", "reason": "mostly test assert noise"}, + {"cluster": "auto/test_coverage", "action": "break_up", "reason": "split by module", "sub_clusters": ["auto/test_coverage_api", "auto/test_coverage_core"]} + ] } """ @@ -463,7 +479,13 @@ def _append_mechanical_backlog_section( parts.append( "These detector-created items stay in backlog unless you explicitly promote them into the active queue." ) - parts.append("Silence means leave the item or cluster in backlog.") + parts.append( + "You MUST make an explicit decision for each auto-cluster listed below. " + "Include every auto-cluster in your `auto_cluster_decisions` output with one of: " + "promote (add to active queue with a priority position), " + "skip (leave in backlog with a reason), or " + "break_up (split into smaller sub-clusters with a reason)." + ) rendered_clusters: list[tuple[str, dict, int]] = [] for name, cluster in auto_clusters.items(): @@ -479,10 +501,11 @@ def _append_mechanical_backlog_section( rendered_clusters.append((name, cluster, member_count)) if rendered_clusters: - parts.append("### Auto-clusters") + parts.append("### Auto-clusters (decision required for each)") parts.append( - "These are pre-grouped detector findings. Promote whole clusters with " - "`desloppify plan promote auto/`." + "These are pre-grouped detector findings. You must decide for each cluster: " + "promote (into active queue), skip (with reason), or break_up (into sub-clusters). " + "Include your decisions in the `auto_cluster_decisions` array in your response." ) rendered_clusters.sort(key=lambda item: (-item[2], item[0])) visible_clusters = rendered_clusters[:15] @@ -589,6 +612,7 @@ def build_triage_prompt(si: TriageInput) -> str: __all__ = [ "_TRIAGE_SYSTEM_PROMPT", + "AutoClusterDecision", "ContradictionNote", "DismissedIssue", "TriageInput", diff --git a/desloppify/tests/plan/test_epic_triage_prompt_direct.py b/desloppify/tests/plan/test_epic_triage_prompt_direct.py index c8e8647d1..6fd9968ec 100644 --- a/desloppify/tests/plan/test_epic_triage_prompt_direct.py +++ b/desloppify/tests/plan/test_epic_triage_prompt_direct.py @@ -135,7 +135,7 @@ def test_build_triage_prompt_includes_mechanical_backlog_context() -> None: prompt = build_triage_prompt(triage_input) assert "## Mechanical backlog (2 items: 1 in 1 auto-clusters, 1 unclustered)" in prompt - assert "### Auto-clusters" in prompt + assert "### Auto-clusters (decision required for each)" in prompt assert "- auto/unused-imports (1 items) [autofix: desloppify autofix import-cleanup --dry-run]" in prompt assert "Remove 1 unused import issue" in prompt assert "### Unclustered items (1 items — needs human judgment or isolated findings)" in prompt From 86e1c052dc3affbca89a3842bee168b136193ea0 Mon Sep 17 00:00:00 2001 From: POM Date: Sun, 15 Mar 2026 23:19:08 +0100 Subject: [PATCH 05/43] feat: require explicit backlog decisions for auto-clusters in staged triage The staged triage pipeline previously treated auto-clusters as optional in the reflect stage ("silence means it stays in backlog"). This change makes auto-cluster decisions mandatory, matching the treatment review issues get via the Coverage Ledger. Changes: - Reflect instructions: require a ## Backlog Decisions section listing every auto-cluster with promote/skip/supersede (replaces "silence means leave") - Organize instructions: clarify that ALL backlog decisions from reflect must be executed, not just promotions - Reflect validation: parse and persist BacklogDecision entries; warn (but don't block) when auto-clusters exist without a Backlog Decisions section - Organize validation: warn when reflect requested promotions that weren't executed during organize Co-Authored-By: Claude Opus 4.6 (1M context) --- .../stage_prompts_instruction_blocks.py | 26 ++-- .../commands/plan/triage/stages/organize.py | 7 ++ .../commands/plan/triage/stages/reflect.py | 24 +++- .../plan/triage/validation/organize_policy.py | 40 ++++++- .../triage/validation/reflect_accounting.py | 112 ++++++++++++++++++ 5 files changed, 197 insertions(+), 12 deletions(-) diff --git a/desloppify/app/commands/plan/triage/runner/stage_prompts_instruction_blocks.py b/desloppify/app/commands/plan/triage/runner/stage_prompts_instruction_blocks.py index 0abc9479a..abfc4f124 100644 --- a/desloppify/app/commands/plan/triage/runner/stage_prompts_instruction_blocks.py +++ b/desloppify/app/commands/plan/triage/runner/stage_prompts_instruction_blocks.py @@ -136,11 +136,13 @@ def _reflect_instructions(mode: PromptMode = "self_record") -> str: another round of fixes. 7. **Consider mechanical backlog** — the backlog section shows auto-clusters (pre-grouped detector findings) and unclustered items. For each auto-cluster: - - **promote**: name it in a `## Backlog Promotions` section. Prefer clusters with + - **promote**: name it in a `## Backlog Decisions` section. Prefer clusters with `[autofix: ...]` hints because they are lower-risk. - - **leave**: say nothing. Silence means it stays in backlog. + - **skip**: explicitly skip with a reason (e.g., "mostly test noise", "low value"). - **supersede**: absorb the underlying work into a review cluster when the same files or root cause already belong together. + You MUST make an explicit decision for every auto-cluster. Include a `## Backlog Decisions` + section listing each auto-cluster with: promote, skip (with reason), or supersede. For unclustered items: promote individually or group related ones into a manual cluster. Mechanical items are NOT part of the Coverage Ledger — that ledger remains review-issues only. 8. **Account for every issue exactly once** — every open issue hash must appear in exactly one @@ -159,8 +161,10 @@ def _reflect_instructions(mode: PromptMode = "self_record") -> str: Cluster "media-lightbox-hooks" (all in src/domains/media-lightbox/) Cluster "task-typing" (both touch src/types/database.ts) -## Backlog Promotions -- Promote auto/unused-imports (overlaps with the files in cluster "task-typing") +## Backlog Decisions +- auto/unused-imports -> promote (overlaps with the files in cluster "task-typing") +- auto/dead-code -> skip "mostly test noise, low value" +- auto/type-assertions -> supersede "absorbed into cluster task-typing" ## Skip Decisions Skip "false-positive-current-code" (false positive per observe) @@ -215,9 +219,10 @@ def _organize_instructions(mode: PromptMode = "self_record") -> str: 3. Create clusters as specified in the blueprint: `desloppify plan cluster create --description "..."` 4. Add issues: `desloppify plan cluster add ` -5. Promote any mechanical backlog items that reflect explicitly selected: - - Auto-clusters: `desloppify plan promote auto/` - - Individual items: `desloppify plan promote ` +5. Execute ALL backlog decisions from the reflect stage's `## Backlog Decisions` section: + - **promote**: `desloppify plan promote auto/` + - **skip**: no CLI action needed — the cluster stays in backlog, skip is documented + - **supersede**: absorb into the named review cluster (already handled by clustering above) - With placement: `desloppify plan promote before -t ` 6. Add steps that consolidate: one step per file or logical change, NOT one step per issue 7. Set `--effort` on each step individually (trivial/small/medium/large) @@ -243,9 +248,10 @@ def _organize_instructions(mode: PromptMode = "self_record") -> str: If reflect skipped additional issues (over-engineering/not-worth-it), include those skip decisions. 3. Define the clusters exactly as they should be created. 4. Assign every kept issue to a cluster. -5. Consolidate steps: one step per file or logical change, NOT one step per issue. -6. Assign an effort level to each planned step (trivial/small/medium/large). -7. Call out cross-cluster dependencies when clusters touch overlapping files. +5. Execute ALL backlog decisions from reflect's `## Backlog Decisions` section (promote/skip/supersede). +6. Consolidate steps: one step per file or logical change, NOT one step per issue. +7. Assign an effort level to each planned step (trivial/small/medium/large). +8. Call out cross-cluster dependencies when clusters touch overlapping files. """ tail = """\ When done, write a plain-text organize report that names the clusters, their issue membership, diff --git a/desloppify/app/commands/plan/triage/stages/organize.py b/desloppify/app/commands/plan/triage/stages/organize.py index 5c007af8e..6f6b137e8 100644 --- a/desloppify/app/commands/plan/triage/stages/organize.py +++ b/desloppify/app/commands/plan/triage/stages/organize.py @@ -21,6 +21,7 @@ _organize_report_or_error, _unclustered_review_issues_or_error, _validate_organize_against_ledger_or_error, + validate_backlog_promotions_executed, ) from ..validation.stage_policy import require_prerequisite from .records import record_organize_stage @@ -135,6 +136,12 @@ def _validate_organize_submission( plan=plan, stages=stages, ): return None + + # Warn (non-blocking) when reflect requested backlog promotions that weren't executed + backlog_warnings = validate_backlog_promotions_executed(plan=plan, stages=stages) + for warning in backlog_warnings: + print(colorize(f" Warning: {warning}", "yellow")) + if not _enforce_cluster_activity_for_organize( plan=plan, stages=stages, diff --git a/desloppify/app/commands/plan/triage/stages/reflect.py b/desloppify/app/commands/plan/triage/stages/reflect.py index 52f512bac..99db03752 100644 --- a/desloppify/app/commands/plan/triage/stages/reflect.py +++ b/desloppify/app/commands/plan/triage/stages/reflect.py @@ -11,8 +11,11 @@ from ..stage_queue import cascade_clear_dispositions, cascade_clear_later_confirmations, has_triage_in_queue from ..services import TriageServices, default_triage_services from ..validation.reflect_accounting import ( + BacklogDecision, ReflectDisposition, + parse_backlog_decisions, parse_reflect_dispositions, + validate_backlog_decisions, validate_reflect_accounting, ) from ..validation.stage_policy import auto_confirm_observe_if_attested @@ -55,7 +58,7 @@ def _validate_reflect_submission( stages: dict, attestation: str | None, services: TriageServices, -) -> tuple[object, int, dict, list[str], set[str], list[str], list[str], list[ReflectDisposition]] | None: +) -> tuple[object, int, dict, list[str], set[str], list[str], list[str], list[ReflectDisposition], list[BacklogDecision]] | None: if "observe" not in stages: print(colorize(" Cannot reflect: observe stage not complete.", "red")) print(colorize(' Run: desloppify plan triage --stage observe --report "..."', "dim")) @@ -127,6 +130,18 @@ def _validate_reflect_submission( # Parse structured disposition ledger from Coverage Ledger section disposition_ledger = parse_reflect_dispositions(report, valid_ids) + # Validate backlog decisions for auto-clusters (warn, don't block) + auto_clusters = getattr(triage_input, "auto_clusters", None) or {} + auto_cluster_names = sorted(auto_clusters.keys()) + _, backlog_warnings = validate_backlog_decisions( + report=report, + auto_cluster_names=auto_cluster_names, + ) + for warning in backlog_warnings: + print(colorize(f" Warning: {warning}", "yellow")) + + backlog_decisions = parse_backlog_decisions(report) + return ( triage_input, issue_count, @@ -136,6 +151,7 @@ def _validate_reflect_submission( missing_ids, duplicate_ids, disposition_ledger, + backlog_decisions, ) @@ -151,6 +167,7 @@ def _persist_reflect_stage( duplicate_ids: list[str], recurring_dims: list[str], disposition_ledger: list[ReflectDisposition], + backlog_decisions: list[BacklogDecision], existing_stage: dict | None, is_reuse: bool, services: TriageServices, @@ -182,6 +199,9 @@ def _persist_reflect_stage( entry["target"] = d.target entry["decision_source"] = "reflect" + if backlog_decisions: + reflect_stage["backlog_decisions"] = [d.to_dict() for d in backlog_decisions] + stages["reflect"] = reflect_stage if is_reuse and existing_stage and existing_stage.get("confirmed_at"): reflect_stage["confirmed_at"] = existing_stage["confirmed_at"] @@ -239,6 +259,7 @@ def _cmd_stage_reflect( ( triage_input, issue_count, recurring, recurring_dims, cited_ids, missing_ids, duplicate_ids, disposition_ledger, + backlog_decisions, ) = submission reflect_stage, cleared = _persist_reflect_stage( plan=plan, @@ -251,6 +272,7 @@ def _cmd_stage_reflect( duplicate_ids=duplicate_ids, recurring_dims=recurring_dims, disposition_ledger=disposition_ledger, + backlog_decisions=backlog_decisions, existing_stage=existing_stage, is_reuse=is_reuse, services=resolved_services, diff --git a/desloppify/app/commands/plan/triage/validation/organize_policy.py b/desloppify/app/commands/plan/triage/validation/organize_policy.py index cf5b4f89b..d46f33d73 100644 --- a/desloppify/app/commands/plan/triage/validation/organize_policy.py +++ b/desloppify/app/commands/plan/triage/validation/organize_policy.py @@ -9,7 +9,7 @@ from ..review_coverage import cluster_issue_ids, manual_clusters_with_issues from ..stages.helpers import unclustered_review_issues, unenriched_clusters -from .reflect_accounting import ReflectDisposition +from .reflect_accounting import BacklogDecision, ReflectDisposition @dataclass(frozen=True) @@ -275,6 +275,43 @@ def _validate_organize_against_ledger_or_error( return False +def validate_backlog_promotions_executed( + *, + plan: dict, + stages: dict, +) -> list[str]: + """Warn when reflect requested backlog promotions that organize didn't execute. + + Returns a list of warning strings (non-blocking). Empty means all good. + """ + reflect_data = stages.get("reflect", {}) + raw_decisions = reflect_data.get("backlog_decisions", []) + if not raw_decisions: + return [] + + decisions = [BacklogDecision.from_dict(d) for d in raw_decisions] + promote_decisions = [d for d in decisions if d.decision == "promote"] + if not promote_decisions: + return [] + + # Check which promoted clusters actually got promoted (are in queue_order + # or have execution_status set to active) + clusters = plan.get("clusters", {}) + warnings: list[str] = [] + for decision in promote_decisions: + cluster = clusters.get(decision.cluster_name) + if cluster is None: + continue + # A promoted cluster should have been activated + execution_status = cluster.get("execution_status", "") + if execution_status not in ("active", "in_progress"): + warnings.append( + f"Reflect requested promoting {decision.cluster_name} " + f"but it was not promoted during organize." + ) + return warnings + + __all__ = [ "ActualDisposition", "LedgerMismatch", @@ -283,6 +320,7 @@ def _validate_organize_against_ledger_or_error( "_organize_report_or_error", "_unclustered_review_issues_or_error", "_validate_organize_against_ledger_or_error", + "validate_backlog_promotions_executed", "validate_organize_against_dispositions", "validate_organize_against_reflect_ledger", ] diff --git a/desloppify/app/commands/plan/triage/validation/reflect_accounting.py b/desloppify/app/commands/plan/triage/validation/reflect_accounting.py index 6aa031509..4f44374f3 100644 --- a/desloppify/app/commands/plan/triage/validation/reflect_accounting.py +++ b/desloppify/app/commands/plan/triage/validation/reflect_accounting.py @@ -366,9 +366,121 @@ def validate_reflect_accounting( return False, cited, missing, duplicates +BacklogDecisionKind = Literal["promote", "skip", "supersede"] + + +@dataclass(frozen=True) +class BacklogDecision: + """One auto-cluster's intended disposition as declared by the reflect stage.""" + + cluster_name: str + decision: BacklogDecisionKind + reason: str = "" + + def to_dict(self) -> dict: + """Serialize for JSON persistence.""" + d: dict = {"cluster_name": self.cluster_name, "decision": self.decision} + if self.reason: + d["reason"] = self.reason + return d + + @classmethod + def from_dict(cls, data: dict | BacklogDecision) -> BacklogDecision: + """Deserialize from persisted plan data, or pass through unchanged.""" + if isinstance(data, cls): + return data + return cls( + cluster_name=data.get("cluster_name", ""), + decision=data.get("decision", "skip"), # type: ignore[arg-type] + reason=data.get("reason", ""), + ) + + +_BACKLOG_DECISION_RE = re.compile( + r"-\s*(\S+)\s*->\s*(promote|skip|supersede)\b\s*(.*)", + re.IGNORECASE, +) + + +def _iter_backlog_decisions_lines(report: str) -> tuple[bool, list[str]]: + """Extract lines from the ## Backlog Decisions section of a reflect report.""" + found_section = False + in_section = False + lines: list[str] = [] + for raw_line in report.splitlines(): + line = raw_line.strip() + if re.fullmatch(r"##\s+Backlog Decisions", line, re.IGNORECASE): + found_section = True + in_section = True + continue + if in_section and re.match(r"##\s+", line): + break + if in_section: + lines.append(line) + return found_section, lines + + +def parse_backlog_decisions(report: str) -> list[BacklogDecision]: + """Parse structured backlog decisions from the ## Backlog Decisions section.""" + _, lines = _iter_backlog_decisions_lines(report) + decisions: list[BacklogDecision] = [] + for line in lines: + match = _BACKLOG_DECISION_RE.match(line) + if not match: + continue + cluster_name = match.group(1).strip().strip("`") + decision_raw = match.group(2).strip().lower() + reason = match.group(3).strip().strip('"\'') + if decision_raw in ("promote", "skip", "supersede"): + decisions.append(BacklogDecision( + cluster_name=cluster_name, + decision=decision_raw, # type: ignore[arg-type] + reason=reason, + )) + return decisions + + +def validate_backlog_decisions( + *, + report: str, + auto_cluster_names: list[str], +) -> tuple[bool, list[str]]: + """Warn when auto-clusters exist but no backlog decisions section is found. + + Returns ``(ok, warnings)`` — always ``ok=True`` (non-blocking), but + populates warnings when backlog decisions are missing or incomplete. + """ + if not auto_cluster_names: + return True, [] + + found_section, _ = _iter_backlog_decisions_lines(report) + if not found_section: + return True, [ + f"Reflect report has {len(auto_cluster_names)} auto-cluster(s) in the backlog " + "but no `## Backlog Decisions` section. Each auto-cluster should have an " + "explicit decision: promote, skip (with reason), or supersede." + ] + + decisions = parse_backlog_decisions(report) + decided_names = {d.cluster_name for d in decisions} + missing = [name for name in auto_cluster_names if name not in decided_names] + if missing: + missing_str = ", ".join(missing[:10]) + suffix = f" (and {len(missing) - 10} more)" if len(missing) > 10 else "" + return True, [ + f"Backlog Decisions section is missing decisions for {len(missing)} " + f"auto-cluster(s): {missing_str}{suffix}" + ] + + return True, [] + + __all__ = [ + "BacklogDecision", "ReflectDisposition", "analyze_reflect_issue_accounting", + "parse_backlog_decisions", "parse_reflect_dispositions", + "validate_backlog_decisions", "validate_reflect_accounting", ] From 5ffc9a9a8c676f7a15c4e0c5b358fe9e548dd06c Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 01:50:01 +0100 Subject: [PATCH 06/43] feat: unified triage pipeline + step detail display improvements Unified triage pipeline: - Widen is_triage_finding to all defects (mechanical + review + concern) - Sub-group auto-clusters by rule kind (auto/security-B602 instead of auto/security) - Add MEDIUM+LOW bandit filter and skip_tests config option - Auto-cluster statistical summaries in triage prompt (severity, confidence, samples) - Cluster-level observe sampling (ClusterVerdict parsing) - Blocking backlog decisions validation (every auto-cluster must have a decision) - Threshold-based staleness (10% mechanical growth, any new review issue) - Two-tier accounting: review issues get per-item ledger, mechanical via cluster decisions - Auto-add manual cluster members to queue_order on add_to_cluster Display improvements: - cluster show: steps now show effort tag, wrapped detail (4 lines), short refs - cluster show: members compact when steps exist (ID list, not full issue detail) - cluster list --verbose: effort summary column (3T 1S), hide empty auto-clusters, drop noise columns - next: cluster drill header shows step done markers and effort tags - next: individual task shows full untruncated step detail matched via issue_refs - next: focus mode shows cluster context + relevant step detail Co-Authored-By: Claude Opus 4.6 (1M context) --- desloppify/app/commands/next/render.py | 40 ++++++- .../app/commands/next/render_workflow.py | 24 +++- .../app/commands/plan/cluster/ops_display.py | 102 ++++++++++++----- desloppify/app/commands/plan/cluster/steps.py | 34 ++++-- .../commands/plan/triage/observe_batches.py | 54 +++++++++ .../stage_prompts_instruction_blocks.py | 42 +++++-- .../plan/triage/stages/evidence_parsing.py | 103 ++++++++++++++++++ .../commands/plan/triage/stages/helpers.py | 7 +- .../commands/plan/triage/stages/observe.py | 15 +++ .../triage/validation/reflect_accounting.py | 14 +-- desloppify/engine/_plan/cluster_strategy.py | 8 ++ desloppify/engine/_plan/operations/cluster.py | 11 +- desloppify/engine/_plan/policy/stale.py | 53 ++++++++- desloppify/engine/_plan/triage/prompt.py | 95 +++++++++++++--- desloppify/engine/_scoring/subjective/core.py | 4 +- desloppify/engine/_state/issue_semantics.py | 2 +- desloppify/engine/_work_queue/synthetic.py | 6 +- desloppify/languages/python/_security.py | 17 ++- .../python/detectors/bandit_adapter.py | 12 +- .../commands/plan/test_cluster_ops_direct.py | 7 +- ...curity_dictkeys_and_smells_split_direct.py | 2 +- .../tests/plan/test_epic_triage_apply.py | 10 +- .../plan/test_epic_triage_prompt_direct.py | 2 +- desloppify/tests/plan/test_stale_policy.py | 17 ++- 24 files changed, 582 insertions(+), 99 deletions(-) diff --git a/desloppify/app/commands/next/render.py b/desloppify/app/commands/next/render.py index 1124c521b..4765bf6d1 100644 --- a/desloppify/app/commands/next/render.py +++ b/desloppify/app/commands/next/render.py @@ -27,6 +27,7 @@ from .render_scoring import render_score_impact as _render_score_impact_impl from .render_workflow import render_workflow_action as _render_workflow_action_impl from .render_workflow import render_workflow_stage as _render_workflow_stage_impl +from .render_workflow import step_full as _step_full_impl from .render_workflow import step_text as _step_text_impl @@ -34,6 +35,10 @@ def _step_text(step: str | dict) -> str: return _step_text_impl(step) +def _step_full(step: str | dict, *, indent: str = " ") -> list[str]: + return _step_full_impl(step, indent=indent) + + def _render_workflow_stage(item: dict) -> None: _render_workflow_stage_impl( item, @@ -108,11 +113,31 @@ def _render_plan_cluster_detail( desc_str = f' — "{cluster_desc}"' if cluster_desc else "" print(colorize(f" Cluster: {cluster_name}{desc_str} ({total} items)", "dim")) steps = plan_cluster.get("action_steps") or [] - if not (steps and single_item and not header_showed_plan): + if not steps: return - print(colorize("\n Steps:", "dim")) - for idx, step in enumerate(steps, 1): - print(colorize(f" {idx}. {_step_text(step)}", "dim")) + + # Find steps relevant to this item (via issue_refs) + item_id = item.get("id", "") + item_hash = item_id.rsplit("::", 1)[-1] if item_id else "" + relevant = [ + (idx, step) for idx, step in enumerate(steps, 1) + if isinstance(step, dict) and ( + item_id in step.get("issue_refs", []) + or any(item_hash and ref.endswith(item_hash) for ref in step.get("issue_refs", [])) + ) + ] + + if relevant: + # Show full detail for relevant steps — this is the execution view + print(colorize("\n Your step(s):", "bold")) + for idx, step in relevant: + for line in _step_full(step, indent=" "): + print(colorize(line, "dim")) + elif single_item and not header_showed_plan: + # No matching steps — show the full plan as context + print(colorize("\n Steps:", "dim")) + for idx, step in enumerate(steps, 1): + print(colorize(f" {idx}. {_step_text(step)}", "dim")) def _render_issue_metadata(item: dict, detail: dict) -> None: @@ -297,12 +322,15 @@ def _render_cluster_drill_header( steps = cluster_data.get("action_steps") or [] if steps: print(colorize(" │", "cyan")) - print(colorize(" │ Action plan:", "cyan")) + print(colorize(" │ Steps:", "cyan")) for idx, step in enumerate(steps, 1): - print(colorize(f" │ {idx}. {_step_text(step)}", "cyan")) + done = isinstance(step, dict) and step.get("done", False) + marker = "[x]" if done else "[ ]" + print(colorize(f" │ {idx}. {marker} {_step_text(step)}", "cyan")) print(colorize(" └" + "─" * 60 + "┘", "cyan")) print(colorize(" Back to full queue: desloppify next", "dim")) if steps: + print(colorize(f" Step detail: desloppify plan cluster show {cluster_name}", "dim")) print(colorize(f" Mark step done: desloppify plan cluster update {cluster_name} --done-step N", "dim")) return bool(steps) diff --git a/desloppify/app/commands/next/render_workflow.py b/desloppify/app/commands/next/render_workflow.py index 37b5aef9a..9fd146fc3 100644 --- a/desloppify/app/commands/next/render_workflow.py +++ b/desloppify/app/commands/next/render_workflow.py @@ -10,10 +10,30 @@ def step_text(step: str | dict) -> str: if isinstance(step, dict): - return step.get("title", str(step)) + title = step.get("title", str(step)) + effort = step.get("effort", "") + return f"{title} [{effort}]" if effort else title return str(step) +def step_full(step: str | dict, *, indent: str = " ") -> list[str]: + """Return the full step rendering: title + effort + detail + refs.""" + import textwrap + + if isinstance(step, str): + return [f"{indent}{step}"] + lines: list[str] = [f"{indent}{step_text(step)}"] + detail = step.get("detail", "") + if detail: + for line in textwrap.wrap(detail, width=90): + lines.append(f"{indent} {line}") + refs = step.get("issue_refs", []) + if refs: + short_refs = [r.rsplit("::", 1)[-1] for r in refs] + lines.append(f"{indent} Refs: {', '.join(short_refs)}") + return lines + + def _detail_mapping(item: dict) -> dict: detail = item.get("detail", {}) return detail if isinstance(detail, dict) else {} @@ -120,4 +140,4 @@ def render_workflow_action(item: dict, *, colorize_fn) -> None: print(colorize_fn(f"\n Action: {item.get('primary_command', '')}", "cyan")) -__all__ = ["render_workflow_action", "render_workflow_stage", "step_text"] +__all__ = ["render_workflow_action", "render_workflow_stage", "step_full", "step_text"] diff --git a/desloppify/app/commands/plan/cluster/ops_display.py b/desloppify/app/commands/plan/cluster/ops_display.py index 7e08573aa..1a3f932e3 100644 --- a/desloppify/app/commands/plan/cluster/ops_display.py +++ b/desloppify/app/commands/plan/cluster/ops_display.py @@ -76,12 +76,38 @@ def _print_cluster_steps(steps: list[dict] | list[str]) -> None: print_step(i, step, colorize_fn=colorize) -def _print_cluster_members(args: argparse.Namespace, issue_ids: list[str]) -> None: +def _short_member_id(fid: str) -> str: + """Shorten an issue ID to its last segment for compact display.""" + return fid.rsplit("::", 1)[-1] + + +def _print_cluster_members(args: argparse.Namespace, issue_ids: list[str], *, has_steps: bool) -> None: print() if not issue_ids: print(colorize(" Members: (none)", "dim")) return + # When steps exist, members are audit trail — show compact list only. + # When no steps, members ARE the work — show full detail. + if has_steps: + short_ids = [_short_member_id(fid) for fid in issue_ids] + label = f" Members ({len(issue_ids)}): " + # Wrap IDs to fit ~100 char lines + lines: list[str] = [] + current = label + for i, sid in enumerate(short_ids): + sep = ", " if i > 0 else "" + if len(current) + len(sep) + len(sid) > 100 and current != label: + lines.append(current) + current = " " + sid + else: + current += sep + sid + lines.append(current) + for line in lines: + print(colorize(line, "dim")) + print(colorize(f" Full detail: desloppify show --no-budget", "dim")) + return + issues = _load_issues_best_effort(args) print(colorize(f" Members ({len(issue_ids)}):", "dim")) for idx, fid in enumerate(issue_ids, 1): @@ -106,7 +132,7 @@ def _cmd_cluster_show(args: argparse.Namespace) -> None: steps = cluster.get("action_steps") or [] issue_ids = cluster_issue_ids(cluster) _print_cluster_steps(steps) - _print_cluster_members(args, issue_ids) + _print_cluster_members(args, issue_ids, has_steps=bool(steps)) _print_cluster_commands(cluster_name) @@ -135,22 +161,37 @@ def _print_cluster_list_verbose( active: str | None, ) -> None: """Print the verbose table view of the cluster list.""" - name_width = _cluster_list_name_width(sorted_clusters) - total = len(sorted_clusters) - has_dep = any(c.get("dependency_order") is not None for _, c in sorted_clusters) - print(colorize(f" Clusters ({total} total, sorted by priority/queue position):", "bold")) + # Filter out empty auto-clusters — they're noise + visible = [ + (name, cluster) for name, cluster in sorted_clusters + if len(cluster_issue_ids(cluster)) > 0 or not cluster.get("auto") + ] + if not visible: + print(" No clusters with items.") + return + + empty_auto = len(sorted_clusters) - len(visible) + name_width = max(20, min(35, max(len(name) for name, _ in visible))) + total_items = sum(len(cluster_issue_ids(c)) for _, c in visible) + total_steps = sum(len(c.get("action_steps") or []) for _, c in visible) + print(colorize( + f" {len(visible)} clusters ({total_items} issues, {total_steps} steps):", + "bold", + )) + if empty_auto: + print(colorize(f" ({empty_auto} empty auto-clusters hidden)", "dim")) print() - header, sep = _cluster_list_verbose_header(name_width, has_dep) + header = f" {'Name':<{name_width}} {'Issues':>6} {'Steps':>5} {'Effort':<10}" + sep = f" {'─'*name_width} {'─'*6} {'─'*5} {'─'*10}" print(colorize(header, "dim")) print(colorize(sep, "dim")) - for name, cluster in sorted_clusters: + for name, cluster in visible: print( _cluster_list_verbose_row( name, cluster, min_pos_cache[name], name_width=name_width, - has_dep=has_dep, active=active, ) ) @@ -165,12 +206,12 @@ def _cluster_list_verbose_header(name_width: int, has_dep: bool) -> tuple[str, s dep_header = f" {'Dep':>3}" if has_dep else "" header = ( f" {'#pos':<5} {'Pri':>3}{dep_header} {'Name':<{name_width}}" - f" {'Items':>5} {'Steps':>5} {'Type':<6} Description" + f" {'Items':>5} {'Steps':>5} {'Effort':<14} {'Type':<6} Description" ) dep_sep = f" {'─'*3}" if has_dep else "" sep = ( f" {'─'*4} {'─'*3}{dep_sep} {'─'*name_width}" - f" {'─'*5} {'─'*5} {'─'*6} {'─'*40}" + f" {'─'*5} {'─'*5} {'─'*14} {'─'*6} {'─'*40}" ) return header, sep @@ -189,34 +230,45 @@ def _cluster_dependency_token(cluster: dict, *, has_dep: bool) -> str: return f" {dep_token:>3}" +def _effort_summary(steps: list[dict]) -> str: + """Summarize step effort tags into a compact string like '3T 1S'.""" + if not steps: + return "—" + from collections import Counter + counts: Counter[str] = Counter() + for s in steps: + if isinstance(s, dict): + effort = s.get("effort", "") + if effort: + counts[effort] += 1 + if not counts: + return "—" + # Order: trivial < small < medium < large + order = {"trivial": 0, "small": 1, "medium": 2, "large": 3} + abbrev = {"trivial": "T", "small": "S", "medium": "M", "large": "L"} + parts = [] + for effort, _ in sorted(counts.items(), key=lambda kv: order.get(kv[0], 9)): + parts.append(f"{counts[effort]}{abbrev.get(effort, effort[0].upper())}") + return " ".join(parts) + + def _cluster_list_verbose_row( name: str, cluster: dict, min_pos: int, *, name_width: int, - has_dep: bool, active: str | None, ) -> str: member_count = len(cluster_issue_ids(cluster)) - desc = _cluster_list_description( - cluster.get("description") or "", - min_pos=min_pos, - member_count=member_count, - ) - pos_str = f"#{min_pos}" if min_pos < 999_999 else "—" - priority = cluster.get("priority") - pri_str = str(priority) if priority is not None else "—" - dep_str = _cluster_dependency_token(cluster, has_dep=has_dep) steps = cluster.get("action_steps") or [] steps_str = str(len(steps)) if steps else "—" - type_str = "auto" if cluster.get("auto") else "manual" - desc_truncated = (desc[:39] + "…") if len(desc) > 40 else desc + effort_str = _effort_summary(steps) name_display = (name[: name_width - 1] + "…") if len(name) > name_width else name focused = " *" if name == active else "" return ( - f" {pos_str:>5} {pri_str:>3}{dep_str} {name_display:<{name_width}}" - f" {member_count:>5} {steps_str:>5} {type_str:<6} {desc_truncated}{focused}" + f" {name_display:<{name_width}}" + f" {member_count:>6} {steps_str:>5} {effort_str:<10}{focused}" ) diff --git a/desloppify/app/commands/plan/cluster/steps.py b/desloppify/app/commands/plan/cluster/steps.py index 7152658aa..50c3cce93 100644 --- a/desloppify/app/commands/plan/cluster/steps.py +++ b/desloppify/app/commands/plan/cluster/steps.py @@ -2,23 +2,43 @@ from __future__ import annotations +import textwrap + +_DETAIL_WIDTH = 90 +_DETAIL_MAX_LINES = 4 + + +def _truncate_detail(detail: str) -> list[str]: + """Wrap and truncate detail to a readable block.""" + # Wrap long single-line details, then cap total lines + wrapped = textwrap.wrap(detail, width=_DETAIL_WIDTH) + if not wrapped: + return [] + if len(wrapped) <= _DETAIL_MAX_LINES: + return wrapped + return wrapped[:_DETAIL_MAX_LINES] + ["..."] + + +def _short_refs(refs: list[str]) -> list[str]: + """Shorten issue refs to their last segment for display.""" + return [r.rsplit("::", 1)[-1] for r in refs] + def print_step(i: int, step: dict, *, colorize_fn) -> None: - """Print a single step with title, detail, refs, and done status.""" + """Print a single step with title, effort, detail, and refs.""" done = step.get("done", False) marker = "[x]" if done else "[ ]" title = step.get("title", "") - print(f" {i}. {marker} {title}") - if done: - print(colorize_fn(" (completed)", "dim")) - return + effort = step.get("effort", "") + effort_tag = f" [{effort}]" if effort else "" + print(f" {i}. {marker} {title}{effort_tag}") detail = step.get("detail", "") if detail: - for line in detail.splitlines(): + for line in _truncate_detail(detail): print(colorize_fn(f" {line}", "dim")) refs = step.get("issue_refs", []) if refs: - print(colorize_fn(f" Refs: {', '.join(refs)}", "dim")) + print(colorize_fn(f" Refs: {', '.join(_short_refs(refs))}", "dim")) __all__ = ["print_step"] diff --git a/desloppify/app/commands/plan/triage/observe_batches.py b/desloppify/app/commands/plan/triage/observe_batches.py index 8bec4e5b0..edb2e9bc6 100644 --- a/desloppify/app/commands/plan/triage/observe_batches.py +++ b/desloppify/app/commands/plan/triage/observe_batches.py @@ -3,6 +3,7 @@ from __future__ import annotations from collections import defaultdict +from dataclasses import dataclass from desloppify.engine._state.schema import Issue from desloppify.engine.plan_triage import TriageInput @@ -56,7 +57,60 @@ def group_issues_into_observe_batches( return result +@dataclass +class AutoClusterSample: + """A sampled auto-cluster for observe-stage verification.""" + + cluster_name: str + total_count: int + sample_ids: list[str] + sample_issues: dict[str, Issue] + + +def sample_auto_clusters( + si: TriageInput, + sample_size: int = 5, +) -> list[AutoClusterSample]: + """Sample representative issues from each auto-cluster for verification. + + For each auto-cluster, pick up to *sample_size* issues (biased toward + higher severity) so the observe stage can spot-check false-positive rates. + """ + auto_clusters = getattr(si, "auto_clusters", {}) + backlog = getattr( + si, "objective_backlog_issues", + getattr(si, "mechanical_issues", {}), + ) + samples: list[AutoClusterSample] = [] + for name, cluster in sorted(auto_clusters.items()): + issue_ids = cluster.get("issue_ids", []) + if not isinstance(issue_ids, list): + continue + member_ids = [iid for iid in issue_ids if isinstance(iid, str) and iid in backlog] + if not member_ids: + continue + + # Sort by severity (high first) for representative sampling + def _severity_key(iid: str) -> int: + issue = backlog.get(iid, {}) + detail = issue.get("detail") or {} + sev = str(detail.get("severity", "medium")).lower() if isinstance(detail, dict) else "medium" + return {"high": 0, "medium": 1, "low": 2}.get(sev, 1) + + member_ids.sort(key=_severity_key) + selected = member_ids[:sample_size] + samples.append(AutoClusterSample( + cluster_name=name, + total_count=len(member_ids), + sample_ids=selected, + sample_issues={iid: backlog[iid] for iid in selected}, + )) + return samples + + __all__ = [ + "AutoClusterSample", "group_issues_into_observe_batches", "observe_dimension_breakdown", + "sample_auto_clusters", ] diff --git a/desloppify/app/commands/plan/triage/runner/stage_prompts_instruction_blocks.py b/desloppify/app/commands/plan/triage/runner/stage_prompts_instruction_blocks.py index abfc4f124..2548589ea 100644 --- a/desloppify/app/commands/plan/triage/runner/stage_prompts_instruction_blocks.py +++ b/desloppify/app/commands/plan/triage/runner/stage_prompts_instruction_blocks.py @@ -75,11 +75,28 @@ def _observe_instructions(mode: PromptMode = "self_record") -> str: {observe_example_report_quality()} +### Auto-Cluster Sampling + +For each auto-cluster provided, sample-check 3-5 items and render a **cluster-level verdict**. +Auto-cluster members do NOT need individual per-issue assessments — the cluster verdict covers them. + +Use this template for each auto-cluster: +``` +- cluster: auto/security-B602 + verdict: mostly-false-positives + sample_count: 5 + false_positive_rate: 0.8 + recommendation: skip +``` + +Verdict options: `actionable`, `mostly-false-positives`, `mixed`, `low-value` +Recommendation options: `promote`, `skip`, `break_up` + **Validation checks (all blocking):** -- Every entry must have a recognized `verdict` keyword -- Every entry must have non-empty `verdict_reasoning` -- Every entry must have non-empty `files_read` list -- Every entry must have non-empty `recommendation` +- Every per-issue entry must have a recognized `verdict` keyword +- Every per-issue entry must have non-empty `verdict_reasoning` +- Every per-issue entry must have non-empty `files_read` list +- Every per-issue entry must have non-empty `recommendation` - Template fields left empty or with placeholder text @@ -134,17 +151,18 @@ def _reflect_instructions(mode: PromptMode = "self_record") -> str: 6. **Check recurring patterns** — compare current issues against resolved history. If the same dimension keeps producing issues, that's a root cause that needs addressing, not just another round of fixes. -7. **Consider mechanical backlog** — the backlog section shows auto-clusters - (pre-grouped detector findings) and unclustered items. For each auto-cluster: - - **promote**: name it in a `## Backlog Decisions` section. Prefer clusters with - `[autofix: ...]` hints because they are lower-risk. - - **skip**: explicitly skip with a reason (e.g., "mostly test noise", "low value"). - - **supersede**: absorb the underlying work into a review cluster when the same files - or root cause already belong together. +7. **Decide on auto-clusters** — auto-clusters are first-class triage candidates, not + an afterthought. The observe stage includes cluster-level verdicts with false-positive + rates from sampling. Use these verdicts to make informed decisions: + - **promote**: add to the active queue. Prefer clusters with `[autofix: ...]` hints + (lower risk) and low false-positive rates from observe sampling. + - **skip**: explicitly skip with a reason citing the observe sampling results + (e.g., "80% false positive rate per observe sampling", "low value"). + - **supersede**: absorb into a review cluster when the same files or root cause overlap. You MUST make an explicit decision for every auto-cluster. Include a `## Backlog Decisions` section listing each auto-cluster with: promote, skip (with reason), or supersede. For unclustered items: promote individually or group related ones into a manual cluster. - Mechanical items are NOT part of the Coverage Ledger — that ledger remains review-issues only. + The Coverage Ledger remains review-issues only — auto-clusters are covered by Backlog Decisions. 8. **Account for every issue exactly once** — every open issue hash must appear in exactly one cluster line or one skip line. Do not drop hashes, and do not repeat a hash in multiple clusters or in both a cluster and a skip. diff --git a/desloppify/app/commands/plan/triage/stages/evidence_parsing.py b/desloppify/app/commands/plan/triage/stages/evidence_parsing.py index 2ed5c2335..1197da778 100644 --- a/desloppify/app/commands/plan/triage/stages/evidence_parsing.py +++ b/desloppify/app/commands/plan/triage/stages/evidence_parsing.py @@ -58,6 +58,17 @@ class ObserveEvidence: has_parseable_ids: bool = True # False if valid_ids had no hex-hash IDs +@dataclass +class ClusterVerdict: + """A cluster-level verdict from observe-stage sampling.""" + + cluster_name: str + verdict: str # "actionable", "mostly-false-positives", "mixed", "low-value" + sample_count: int = 0 + false_positive_rate: float = 0.0 + recommendation: str = "" # "promote", "skip", "break_up" + + @dataclass class DecisionLedger: """Parsed keep/tighten/skip coverage from a value-check report.""" @@ -430,6 +441,96 @@ def validate_report_has_file_paths(report: str) -> list[EvidenceFailure]: )] +# --------------------------------------------------------------------------- +# Cluster-level verdict parsing (observe stage) +# --------------------------------------------------------------------------- + +_CLUSTER_VERDICT_KEYWORDS = frozenset({ + "actionable", "mostly-false-positives", "mostly false positives", + "mixed", "low-value", "low value", +}) + +# Matches: - cluster: auto/security-B602 +_YAML_CLUSTER_RE = re.compile(r"^\s*-?\s*cluster\s*:\s*(\S+)", re.IGNORECASE) +_YAML_SAMPLE_COUNT_RE = re.compile(r"^\s*sample_count\s*:\s*(\d+)", re.IGNORECASE) +_YAML_FP_RATE_RE = re.compile(r"^\s*false_positive_rate\s*:\s*([\d.]+)", re.IGNORECASE) + + +def parse_cluster_verdicts(report: str) -> list[ClusterVerdict]: + """Parse cluster-level verdicts from an observe-stage report. + + Supports YAML-like format: + - cluster: auto/security-B602 + verdict: mostly-false-positives + sample_count: 5 + false_positive_rate: 0.8 + recommendation: skip + """ + verdicts: list[ClusterVerdict] = [] + current: dict | None = None + + for line in report.splitlines(): + m_cluster = _YAML_CLUSTER_RE.match(line) + if m_cluster: + if current is not None: + v = _flush_cluster_verdict(current) + if v: + verdicts.append(v) + current = {"cluster": m_cluster.group(1).strip()} + continue + + if current is None: + continue + + m_verdict = _YAML_VERDICT_RE.match(line) + if m_verdict: + current["verdict"] = m_verdict.group(1).strip() + continue + + m_sample = _YAML_SAMPLE_COUNT_RE.match(line) + if m_sample: + current["sample_count"] = int(m_sample.group(1)) + continue + + m_fp = _YAML_FP_RATE_RE.match(line) + if m_fp: + try: + current["false_positive_rate"] = float(m_fp.group(1)) + except ValueError: + pass + continue + + m_rec = _YAML_RECOMMENDATION_RE.match(line) + if m_rec: + current["recommendation"] = m_rec.group(1).strip() + continue + + if current is not None: + v = _flush_cluster_verdict(current) + if v: + verdicts.append(v) + + return verdicts + + +def _flush_cluster_verdict(current: dict) -> ClusterVerdict | None: + """Convert a collected cluster verdict dict into a ClusterVerdict.""" + cluster_name = current.get("cluster", "") + if not cluster_name: + return None + raw_verdict = current.get("verdict", "") + # Accept any verdict text (don't enforce keywords — let the LLM express itself) + if not raw_verdict: + return None + return ClusterVerdict( + cluster_name=cluster_name, + verdict=raw_verdict.lower().strip(), + sample_count=current.get("sample_count", 0), + false_positive_rate=current.get("false_positive_rate", 0.0), + recommendation=current.get("recommendation", ""), + ) + + _VALUE_LEDGER_RE = re.compile( r"^\s*-\s*(?P.+?)\s*->\s*(?Pkeep|tighten|skip)\s*$", re.IGNORECASE, @@ -497,12 +598,14 @@ def resolve_short_hash_to_full_id(short_hash: str, valid_ids: set[str]) -> str | __all__ = [ + "ClusterVerdict", "DecisionLedger", "EvidenceFailure", "ObserveAssessment", "ObserveEvidence", "VERDICT_KEYWORDS", "format_evidence_failures", + "parse_cluster_verdicts", "parse_value_check_decision_ledger", "parse_observe_evidence", "resolve_short_hash_to_full_id", diff --git a/desloppify/app/commands/plan/triage/stages/helpers.py b/desloppify/app/commands/plan/triage/stages/helpers.py index 880292aa7..bf1860d8f 100644 --- a/desloppify/app/commands/plan/triage/stages/helpers.py +++ b/desloppify/app/commands/plan/triage/stages/helpers.py @@ -4,7 +4,7 @@ from desloppify.base.output.terminal import colorize from desloppify.engine._plan.constants import is_synthetic_id -from desloppify.engine._state.issue_semantics import is_triage_finding +from desloppify.engine._state.issue_semantics import is_review_work_item, is_triage_finding from desloppify.engine.plan_triage import TRIAGE_IDS from ..review_coverage import ( @@ -155,10 +155,13 @@ def unclustered_review_issues(plan: dict, state: dict | None = None) -> list[str } if state is not None: + # Only count review-type issues for ledger purposes — mechanical + # defects are covered by cluster-level backlog decisions, not + # per-item ledger entries. review_ids = [ fid for fid, finding in (state.get("work_items") or state.get("issues", {})).items() if finding.get("status") == "open" - and is_triage_finding(finding) + and is_review_work_item(finding) ] frozen_ids = (plan.get("epic_triage_meta", {}) or {}).get("active_triage_issue_ids") if isinstance(frozen_ids, list) and frozen_ids: diff --git a/desloppify/app/commands/plan/triage/stages/observe.py b/desloppify/app/commands/plan/triage/stages/observe.py index 081089c2a..0ee922e94 100644 --- a/desloppify/app/commands/plan/triage/stages/observe.py +++ b/desloppify/app/commands/plan/triage/stages/observe.py @@ -96,6 +96,7 @@ def cmd_stage_observe( from .evidence_parsing import ( format_evidence_failures, + parse_cluster_verdicts, parse_observe_evidence, resolve_short_hash_to_full_id, validate_observe_evidence, @@ -104,6 +105,7 @@ def cmd_stage_observe( valid_ids = set(review_issues.keys()) cited = resolved_services.extract_issue_citations(report, valid_ids) evidence = parse_observe_evidence(report, valid_ids) + cluster_verdicts = parse_cluster_verdicts(report) evidence_failures = validate_observe_evidence(evidence, issue_count) blocking = [failure for failure in evidence_failures if failure.blocking] advisory = [failure for failure in evidence_failures if not failure.blocking] @@ -144,6 +146,19 @@ def cmd_stage_observe( } meta["issue_dispositions"] = dispositions + # Store cluster-level verdicts from auto-cluster sampling + if cluster_verdicts: + meta["cluster_verdicts"] = [ + { + "cluster": v.cluster_name, + "verdict": v.verdict, + "sample_count": v.sample_count, + "false_positive_rate": v.false_positive_rate, + "recommendation": v.recommendation, + } + for v in cluster_verdicts + ] + cleared = record_observe_stage( stages, report=report, diff --git a/desloppify/app/commands/plan/triage/validation/reflect_accounting.py b/desloppify/app/commands/plan/triage/validation/reflect_accounting.py index 4f44374f3..b9b562047 100644 --- a/desloppify/app/commands/plan/triage/validation/reflect_accounting.py +++ b/desloppify/app/commands/plan/triage/validation/reflect_accounting.py @@ -445,19 +445,19 @@ def validate_backlog_decisions( report: str, auto_cluster_names: list[str], ) -> tuple[bool, list[str]]: - """Warn when auto-clusters exist but no backlog decisions section is found. + """Require every auto-cluster to have an explicit backlog decision. - Returns ``(ok, warnings)`` — always ``ok=True`` (non-blocking), but - populates warnings when backlog decisions are missing or incomplete. + Returns ``(ok, messages)`` — ``ok=False`` (blocking) when auto-clusters + are missing decisions. Every auto-cluster must be accounted for. """ if not auto_cluster_names: return True, [] found_section, _ = _iter_backlog_decisions_lines(report) if not found_section: - return True, [ - f"Reflect report has {len(auto_cluster_names)} auto-cluster(s) in the backlog " - "but no `## Backlog Decisions` section. Each auto-cluster should have an " + return False, [ + f"Reflect report has {len(auto_cluster_names)} auto-cluster(s) " + "but no `## Backlog Decisions` section. Every auto-cluster must have an " "explicit decision: promote, skip (with reason), or supersede." ] @@ -467,7 +467,7 @@ def validate_backlog_decisions( if missing: missing_str = ", ".join(missing[:10]) suffix = f" (and {len(missing) - 10} more)" if len(missing) > 10 else "" - return True, [ + return False, [ f"Backlog Decisions section is missing decisions for {len(missing)} " f"auto-cluster(s): {missing_str}{suffix}" ] diff --git a/desloppify/engine/_plan/cluster_strategy.py b/desloppify/engine/_plan/cluster_strategy.py index baae1ac3a..0c6c25051 100644 --- a/desloppify/engine/_plan/cluster_strategy.py +++ b/desloppify/engine/_plan/cluster_strategy.py @@ -32,8 +32,16 @@ def grouping_key(issue: dict, meta: DetectorMeta | None) -> str | None: return None if meta.action_type == "auto_fix": + detail = issue.get("detail") or {} + kind = detail.get("kind", "") + if kind: + return f"auto::{detector}::{kind}" return f"auto::{detector}" + detail = issue.get("detail") or {} + kind = detail.get("kind", "") + if kind: + return f"detector::{detector}::{kind}" return f"detector::{detector}" diff --git a/desloppify/engine/_plan/operations/cluster.py b/desloppify/engine/_plan/operations/cluster.py index 1c9fd6632..2dd22e3e0 100644 --- a/desloppify/engine/_plan/operations/cluster.py +++ b/desloppify/engine/_plan/operations/cluster.py @@ -98,16 +98,25 @@ def add_to_cluster( cluster_name: str, issue_ids: list[str], ) -> int: - """Add issue IDs to a cluster. Returns count added.""" + """Add issue IDs to a cluster. Returns count added. + + For non-auto clusters, also ensures the issue IDs appear in + ``queue_order`` so they're visible in ``desloppify next``. + """ ensure_plan_defaults(plan) cluster = _cluster_or_raise(plan, cluster_name) member_ids: list[str] = cluster["issue_ids"] + queue_order: list[str] = plan.get("queue_order", []) + is_manual = not cluster.get("auto") count = 0 now = utc_now() for fid in issue_ids: if fid not in member_ids: member_ids.append(fid) count += 1 + # Ensure manual cluster members are in the queue + if is_manual and fid not in queue_order: + queue_order.append(fid) _upsert_cluster_override( plan, fid, diff --git a/desloppify/engine/_plan/policy/stale.py b/desloppify/engine/_plan/policy/stale.py index 17f12f8d0..a6dd32b74 100644 --- a/desloppify/engine/_plan/policy/stale.py +++ b/desloppify/engine/_plan/policy/stale.py @@ -5,20 +5,36 @@ import hashlib from desloppify.base.config import DEFAULT_TARGET_STRICT_SCORE -from desloppify.engine._state.issue_semantics import is_triage_finding from desloppify.engine._state.schema import StateModel from desloppify.engine._work_queue.helpers import slugify from desloppify.engine.planning.scorecard_projection import all_subjective_entries def open_review_ids(state: StateModel) -> set[str]: - """Return IDs of open review/concerns issues from state.""" + """Return IDs of open review/concerns issues from state. + + With the unified pipeline, ``is_triage_finding`` now includes mechanical + defects. For staleness/snapshot purposes we still track only review-type + issues — mechanical changes use threshold-based staleness via + ``is_triage_stale``. + """ + from desloppify.engine._state.issue_semantics import is_review_work_item return { fid for fid, f in (state.get("work_items") or state.get("issues", {})).items() - if f.get("status") == "open" and is_triage_finding(f) + if f.get("status") == "open" and is_review_work_item(f) } +def open_mechanical_count(state: StateModel) -> int: + """Return the count of open mechanical defects from state.""" + from desloppify.engine._state.issue_semantics import is_objective_finding + return sum( + 1 + for f in (state.get("work_items") or state.get("issues", {})).values() + if f.get("status") == "open" and is_objective_finding(f) + ) + + def _subjective_entry_id( dimension_key: object, *, @@ -154,8 +170,18 @@ def compute_new_issue_ids(plan: dict, state: StateModel) -> set[str]: def is_triage_stale( plan: dict, state: StateModel, + *, + mechanical_growth_threshold: float = 0.10, ) -> bool: - """Return True when genuinely new review issues appeared since last triage. + """Return True when triage should be re-run. + + Stale when: + - ANY new review issues appeared since last triage, OR + - Mechanical defect count grew by more than *mechanical_growth_threshold* + (default 10%) since last triage. + + The threshold prevents trivial scan changes from forcing full re-triage + while still catching significant new mechanical findings. In-progress triage (confirmed stages + stage IDs in queue) is NOT considered stale — the lifecycle filter in the work queue already @@ -165,7 +191,23 @@ def is_triage_stale( triaged_ids = set(meta.get("triaged_ids", [])) active_ids = set(meta.get("active_triage_issue_ids", [])) known = triaged_ids | active_ids - return bool(open_review_ids(state) - known) + + # Any new review issue → stale + if open_review_ids(state) - known: + return True + + # Check mechanical growth threshold + last_mechanical = meta.get("last_mechanical_count", 0) + current_mechanical = open_mechanical_count(state) + if last_mechanical > 0: + growth = (current_mechanical - last_mechanical) / last_mechanical + if growth > mechanical_growth_threshold: + return True + elif current_mechanical > 0 and not known: + # First triage with mechanical issues + return True + + return False __all__ = [ @@ -174,6 +216,7 @@ def is_triage_stale( "current_under_target_ids", "current_unscored_ids", "is_triage_stale", + "open_mechanical_count", "open_review_ids", "review_issue_snapshot_hash", ] diff --git a/desloppify/engine/_plan/triage/prompt.py b/desloppify/engine/_plan/triage/prompt.py index 8a41cc06b..df186a4b0 100644 --- a/desloppify/engine/_plan/triage/prompt.py +++ b/desloppify/engine/_plan/triage/prompt.py @@ -170,15 +170,29 @@ def _recurring_dimensions( def _split_open_issue_buckets( issues: dict[str, dict], ) -> tuple[dict[str, dict], dict[str, dict]]: + """Split open issues into review (individual triage) and mechanical (cluster triage). + + With the unified pipeline, all defects are triage findings. Review issues + get individual treatment; mechanical defects flow through auto-cluster + summaries. + """ open_review: dict[str, dict] = {} open_mechanical: dict[str, dict] = {} for issue_id, issue in issues.items(): if issue.get("status") != "open": continue - if is_triage_finding(issue): + kind = issue.get("work_item_kind", issue.get("issue_kind", "")) + if kind in ("review_defect", "review_concern"): open_review[issue_id] = issue - continue - open_mechanical[issue_id] = issue + elif kind == "mechanical_defect": + open_mechanical[issue_id] = issue + elif is_triage_finding(issue): + # Fallback for items without explicit kind — infer from semantics + from desloppify.engine._state.issue_semantics import is_review_work_item + if is_review_work_item(issue): + open_review[issue_id] = issue + else: + open_mechanical[issue_id] = issue return open_review, open_mechanical @@ -262,10 +276,10 @@ def collect_triage_input(plan: PlanModel, state: StateModel) -> TriageInput: - `desloppify scan` — re-scan after making changes to verify progress - `desloppify show review --status open` — see all open review issues -Your output defines the active work plan for review findings and any explicitly -promoted backlog work. Mechanical backlog items you do not mention remain in -backlog by default. Dismissed issues will be removed from the queue with your -stated reason. +Your output defines the active work plan for all open defects. Review issues are +triaged individually; auto-clusters are triaged at the cluster level (promote/skip/break_up). +Every auto-cluster must have an explicit decision. Dismissed issues will be removed from +the queue with your stated reason. Respond with a single JSON object matching this schema: { @@ -472,18 +486,19 @@ def _append_mechanical_backlog_section( ) parts.append( - "## Mechanical backlog " + "## Auto-cluster candidates " f"({len(objective_backlog_issues)} items: {clustered_issue_count} in " f"{auto_cluster_count} auto-clusters, {len(unclustered)} unclustered)" ) parts.append( - "These detector-created items stay in backlog unless you explicitly promote them into the active queue." + "These are detector-created findings grouped by rule type. Each auto-cluster " + "is a first-class triage candidate — decide its fate just like review issues." ) parts.append( "You MUST make an explicit decision for each auto-cluster listed below. " "Include every auto-cluster in your `auto_cluster_decisions` output with one of: " "promote (add to active queue with a priority position), " - "skip (leave in backlog with a reason), or " + "skip (with a specific reason — e.g. 'mostly false positives per sampling'), or " "break_up (split into smaller sub-clusters with a reason)." ) @@ -503,9 +518,8 @@ def _append_mechanical_backlog_section( if rendered_clusters: parts.append("### Auto-clusters (decision required for each)") parts.append( - "These are pre-grouped detector findings. You must decide for each cluster: " - "promote (into active queue), skip (with reason), or break_up (into sub-clusters). " - "Include your decisions in the `auto_cluster_decisions` array in your response." + "Each cluster below includes a statistical summary with severity breakdown " + "and sample issues. Decide for each: promote, skip (with reason), or break_up." ) rendered_clusters.sort(key=lambda item: (-item[2], item[0])) visible_clusters = rendered_clusters[:15] @@ -515,6 +529,10 @@ def _append_mechanical_backlog_section( summary = _cluster_backlog_summary(name, cluster, member_count) parts.append(f"- {name} ({member_count} items){hint_suffix}") parts.append(f" {summary}") + # Statistical summary: severity/confidence breakdown + samples + stats = _cluster_stats(cluster, objective_backlog_issues) + if stats: + parts.append(f" {stats}") if len(rendered_clusters) > len(visible_clusters): remaining = rendered_clusters[len(visible_clusters):] remaining_issues = sum(item[2] for item in remaining) @@ -552,6 +570,57 @@ def _append_mechanical_backlog_section( parts.append("") +def _cluster_stats(cluster: dict[str, Any], all_issues: dict[str, dict]) -> str: + """Build a compact statistical summary for an auto-cluster.""" + issue_ids = cluster.get("issue_ids", []) + if not isinstance(issue_ids, list): + return "" + members = [ + all_issues[iid] for iid in issue_ids + if isinstance(iid, str) and iid in all_issues + ] + if not members: + return "" + + # Severity/confidence breakdown + from collections import Counter + severities: Counter[str] = Counter() + confidences: Counter[str] = Counter() + rules: Counter[str] = Counter() + for m in members: + detail = m.get("detail") or {} + if isinstance(detail, dict): + severities[detail.get("severity", "unknown")] += 1 + rules[detail.get("kind", detail.get("test_name", "unknown"))] += 1 + confidences[str(m.get("confidence", "medium"))] += 1 + + parts: list[str] = [] + if severities: + sev_str = ", ".join(f"{k}: {v}" for k, v in severities.most_common(3)) + parts.append(f"severity=[{sev_str}]") + if confidences: + conf_str = ", ".join(f"{k}: {v}" for k, v in confidences.most_common(3)) + parts.append(f"confidence=[{conf_str}]") + if rules: + top_rules = rules.most_common(5) + rule_str = ", ".join(f"{k}({v})" for k, v in top_rules) + if len(rules) > 5: + rule_str += f", +{len(rules) - 5} more" + parts.append(f"top_rules=[{rule_str}]") + + # Sample issues (3-5) + samples = members[:5] + sample_strs = [] + for s in samples: + f = s.get("file", "") + summary = str(s.get("summary", ""))[:80] + sample_strs.append(f"{f}: {summary}") + if sample_strs: + parts.append("samples: " + " | ".join(sample_strs)) + + return " ".join(parts) + + def _cluster_autofix_hint(cluster: dict[str, Any]) -> str: return cluster_autofix_hint(cluster) or "" diff --git a/desloppify/engine/_scoring/subjective/core.py b/desloppify/engine/_scoring/subjective/core.py index ca7ed9c62..9df373d97 100644 --- a/desloppify/engine/_scoring/subjective/core.py +++ b/desloppify/engine/_scoring/subjective/core.py @@ -10,7 +10,7 @@ ) from desloppify.base.text_utils import is_numeric from desloppify.engine._scoring.policy.core import SUBJECTIVE_CHECKS -from desloppify.engine._state.issue_semantics import is_triage_finding +from desloppify.engine._state.issue_semantics import is_review_work_item def _display_fallback(dim_name: str) -> str: @@ -173,7 +173,7 @@ def _subjective_issue_count( return sum( 1 for issue in issues.values() - if is_triage_finding(issue) + if is_review_work_item(issue) and issue.get("status") in failure_set and _normalize_dimension_key(issue.get("detail", {}).get("dimension")) == dim_name ) diff --git a/desloppify/engine/_state/issue_semantics.py b/desloppify/engine/_state/issue_semantics.py index 1da50dba0..de832f280 100644 --- a/desloppify/engine/_state/issue_semantics.py +++ b/desloppify/engine/_state/issue_semantics.py @@ -166,7 +166,7 @@ def is_review_work_item(issue: Mapping[str, Any]) -> bool: def is_triage_finding(issue: Mapping[str, Any]) -> bool: - return is_review_work_item(issue) + return is_defect_work_item(issue) def is_assessment_request(issue: Mapping[str, Any]) -> bool: diff --git a/desloppify/engine/_work_queue/synthetic.py b/desloppify/engine/_work_queue/synthetic.py index e7997d7b8..7bfdd6d63 100644 --- a/desloppify/engine/_work_queue/synthetic.py +++ b/desloppify/engine/_work_queue/synthetic.py @@ -10,7 +10,7 @@ from desloppify.engine.plan_triage import TRIAGE_STAGE_SPECS from desloppify.engine._scoring.subjective.core import DISPLAY_NAMES -from desloppify.engine._state.issue_semantics import is_triage_finding +from desloppify.engine._state.issue_semantics import is_review_work_item, is_triage_finding from desloppify.engine._state.schema import StateModel from desloppify.engine._work_queue.helpers import ( detail_dict, @@ -221,11 +221,13 @@ def build_subjective_items( } # Review issues are keyed by raw dimension name (snake_case). + # Only review-type issues contribute to subjective dimension counts, + # not mechanical defects (even those with a dimension field). review_open_by_dim: dict[str, int] = {} for issue in issues.values(): if issue.get("status") != "open": continue - if is_triage_finding(issue): + if is_review_work_item(issue): dim_key = str(detail_dict(issue).get("dimension", "")).strip().lower() if dim_key: review_open_by_dim[dim_key] = review_open_by_dim.get(dim_key, 0) + 1 diff --git a/desloppify/languages/python/_security.py b/desloppify/languages/python/_security.py index 9930b89f0..9b623657d 100644 --- a/desloppify/languages/python/_security.py +++ b/desloppify/languages/python/_security.py @@ -4,6 +4,7 @@ import shutil +from desloppify.base.config import load_config from desloppify.base.discovery.source import collect_exclude_dirs from desloppify.languages._framework.base.types import DetectorCoverageStatus, LangSecurityResult from desloppify.languages.python.detectors.bandit_adapter import detect_with_bandit @@ -32,13 +33,27 @@ def python_scan_coverage_prerequisites() -> list[DetectorCoverageStatus]: return [missing_bandit_coverage()] +def _load_bandit_skip_tests() -> list[str] | None: + """Read ``languages.python.bandit_skip_tests`` from project config.""" + cfg = load_config() + lang_cfg = cfg.get("languages", {}) + py_cfg = lang_cfg.get("python", {}) if isinstance(lang_cfg, dict) else {} + raw = py_cfg.get("bandit_skip_tests") if isinstance(py_cfg, dict) else None + if isinstance(raw, list) and all(isinstance(t, str) for t in raw): + return raw + return None + + def detect_python_security(files, zone_map) -> LangSecurityResult: scan_root = scan_root_from_files(files) if scan_root is None: return LangSecurityResult(entries=[], files_scanned=0) exclude_dirs = collect_exclude_dirs(scan_root) - result = detect_with_bandit(scan_root, zone_map, exclude_dirs=exclude_dirs) + skip_tests = _load_bandit_skip_tests() + result = detect_with_bandit( + scan_root, zone_map, exclude_dirs=exclude_dirs, skip_tests=skip_tests, + ) coverage = result.status.coverage() return LangSecurityResult( entries=result.entries, diff --git a/desloppify/languages/python/detectors/bandit_adapter.py b/desloppify/languages/python/detectors/bandit_adapter.py index 3c66f2c12..361daed3f 100644 --- a/desloppify/languages/python/detectors/bandit_adapter.py +++ b/desloppify/languages/python/detectors/bandit_adapter.py @@ -155,8 +155,10 @@ def _to_security_entry( raw_severity = result.get("issue_severity", "MEDIUM").upper() raw_confidence = result.get("issue_confidence", "MEDIUM").upper() - # Suppress LOW-severity + LOW-confidence (very noisy, low signal). - if raw_severity == "LOW" and raw_confidence == "LOW": + # Suppress noisy low-signal combinations: + # - LOW severity + LOW confidence (very noisy, low signal) + # - MEDIUM severity + LOW confidence (e.g. "tokenizer_name" flagged as hardcoded secret) + if raw_confidence == "LOW" and raw_severity in ("LOW", "MEDIUM"): return None tier = _SEVERITY_TO_TIER.get(raw_severity, 3) @@ -188,6 +190,7 @@ def detect_with_bandit( zone_map: FileZoneMap | None, timeout: int = 120, exclude_dirs: list[str] | None = None, + skip_tests: list[str] | None = None, ) -> BanditScanResult: """Run bandit on *path* and return issues + typed execution status. @@ -197,6 +200,9 @@ def detect_with_bandit( Absolute directory paths to pass to bandit's ``--exclude`` flag. When non-empty, bandit will skip these directories during its recursive scan. + skip_tests: + Bandit test IDs to suppress via ``--skip`` (e.g. ``["B101", "B601"]``). + Allows users to disable entire rule families from ``config.json``. """ cmd = [ sys.executable, @@ -209,6 +215,8 @@ def detect_with_bandit( ] if exclude_dirs: cmd.extend(["--exclude", ",".join(exclude_dirs)]) + if skip_tests: + cmd.extend(["--skip", ",".join(skip_tests)]) cmd.append(str(path.resolve())) try: diff --git a/desloppify/tests/commands/plan/test_cluster_ops_direct.py b/desloppify/tests/commands/plan/test_cluster_ops_direct.py index 9a7e1e8e6..42ccfc29c 100644 --- a/desloppify/tests/commands/plan/test_cluster_ops_direct.py +++ b/desloppify/tests/commands/plan/test_cluster_ops_direct.py @@ -33,7 +33,7 @@ def test_cluster_steps_print_step_variants(capsys) -> None: assert "1. [ ] Structured" in out assert "line one" in out assert "Refs: x, y" in out - assert "(completed)" in out + assert "2. [x] Done step" in out def test_cluster_display_helpers_and_renderers(monkeypatch, capsys) -> None: @@ -71,14 +71,13 @@ def test_cluster_display_helpers_and_renderers(monkeypatch, capsys) -> None: cluster_display_mod._cmd_cluster_show(argparse.Namespace(cluster_name="alpha")) out_show = capsys.readouterr().out assert "Cluster: alpha" in out_show - assert "Members (1)" in out_show - assert "File: src/a.py at lines: 3, 7" in out_show + assert "Members (1): i1" in out_show cluster_display_mod._cmd_cluster_list( argparse.Namespace(verbose=True, missing_steps=False) ) out_list = capsys.readouterr().out - assert "Clusters (2 total" in out_list + assert "2 clusters" in out_list assert "alpha" in out_list assert "beta" in out_list diff --git a/desloppify/tests/lang/python/test_python_security_dictkeys_and_smells_split_direct.py b/desloppify/tests/lang/python/test_python_security_dictkeys_and_smells_split_direct.py index c667ae242..8870e9dda 100644 --- a/desloppify/tests/lang/python/test_python_security_dictkeys_and_smells_split_direct.py +++ b/desloppify/tests/lang/python/test_python_security_dictkeys_and_smells_split_direct.py @@ -41,7 +41,7 @@ def coverage(self): monkeypatch.setattr( py_security_mod, "detect_with_bandit", - lambda _root, _zone_map, *, exclude_dirs: SimpleNamespace( + lambda _root, _zone_map, *, exclude_dirs, skip_tests=None: SimpleNamespace( entries=[{"file": "a.py", "line": 1}], files_scanned=3, status=_Status(), diff --git a/desloppify/tests/plan/test_epic_triage_apply.py b/desloppify/tests/plan/test_epic_triage_apply.py index 1258b06a8..94a3b4af1 100644 --- a/desloppify/tests/plan/test_epic_triage_apply.py +++ b/desloppify/tests/plan/test_epic_triage_apply.py @@ -575,14 +575,15 @@ def test_strategy_summary_in_result(self): assert result.strategy_summary == "My strategy" - def test_only_open_review_issues_in_triaged_ids(self): - """triaged_ids should only contain IDs of open review/concerns issues.""" + def test_only_open_defect_issues_in_triaged_ids(self): + """triaged_ids should contain IDs of all open defect issues (unified pipeline).""" state: dict = { "issues": { "r1": {"status": "open", "detector": "review"}, "r2": {"status": "fixed", "detector": "review"}, "u1": {"status": "open", "detector": "unused"}, "c1": {"status": "open", "detector": "concerns"}, + "a1": {"status": "open", "detector": "subjective_review"}, }, "scan_count": 1, "dimension_scores": {}, @@ -595,9 +596,10 @@ def test_only_open_review_issues_in_triaged_ids(self): triaged = plan["epic_triage_meta"]["triaged_ids"] assert "r1" in triaged assert "c1" in triaged - # Fixed review and non-review issues should not appear + assert "u1" in triaged # mechanical defects are now triage findings + # Fixed issues and assessment requests should not appear assert "r2" not in triaged - assert "u1" not in triaged + assert "a1" not in triaged # --------------------------------------------------------------------------- diff --git a/desloppify/tests/plan/test_epic_triage_prompt_direct.py b/desloppify/tests/plan/test_epic_triage_prompt_direct.py index 6fd9968ec..072da5be7 100644 --- a/desloppify/tests/plan/test_epic_triage_prompt_direct.py +++ b/desloppify/tests/plan/test_epic_triage_prompt_direct.py @@ -134,7 +134,7 @@ def test_build_triage_prompt_includes_mechanical_backlog_context() -> None: prompt = build_triage_prompt(triage_input) - assert "## Mechanical backlog (2 items: 1 in 1 auto-clusters, 1 unclustered)" in prompt + assert "## Auto-cluster candidates (2 items: 1 in 1 auto-clusters, 1 unclustered)" in prompt assert "### Auto-clusters (decision required for each)" in prompt assert "- auto/unused-imports (1 items) [autofix: desloppify autofix import-cleanup --dry-run]" in prompt assert "Remove 1 unused import issue" in prompt diff --git a/desloppify/tests/plan/test_stale_policy.py b/desloppify/tests/plan/test_stale_policy.py index 9112197fb..701534713 100644 --- a/desloppify/tests/plan/test_stale_policy.py +++ b/desloppify/tests/plan/test_stale_policy.py @@ -512,13 +512,28 @@ def test_closed_review_issues_ignored(self): } assert is_triage_stale(plan, state) is False - def test_non_review_issues_ignored(self): + def test_mechanical_issues_trigger_staleness_on_first_triage(self): + """Mechanical issues trigger staleness when no prior triage exists.""" plan = {"epic_triage_meta": {"triaged_ids": []}} state = { "issues": { "u1": {"status": "open", "detector": "unused"}, } } + assert is_triage_stale(plan, state) is True + + def test_mechanical_within_threshold_not_stale(self): + """Mechanical count growth within threshold does not trigger staleness.""" + plan = {"epic_triage_meta": { + "triaged_ids": ["r1"], + "last_mechanical_count": 100, + }} + state = { + "issues": { + "r1": {"status": "open", "detector": "review"}, + **{f"u{i}": {"status": "open", "detector": "unused"} for i in range(105)}, + } + } assert is_triage_stale(plan, state) is False def test_not_stale_when_stages_in_queue_but_all_triaged(self): From ad784a851cdd66dc8b473b6b15933d1085968b95 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 02:00:43 +0100 Subject: [PATCH 07/43] feat: lifecycle transition messages and agent directives Add transition_messages config and directives CLI for phase-specific agent instructions (model switching, constraints). Emit transition messages at lifecycle phase changes across resolve, skip, reopen, review import, and reconcile flows. Auto-focus cluster during mid-cluster execution so desloppify next stays in context. Hermes reset includes cluster-aware next-task instructions. Co-Authored-By: Claude Opus 4.6 (1M context) --- desloppify/app/cli_support/parser.py | 2 + desloppify/app/cli_support/parser_groups.py | 2 + .../app/cli_support/parser_groups_admin.py | 13 ++ desloppify/app/commands/directives.py | 166 ++++++++++++++ .../commands/helpers/transition_messages.py | 202 ++++++++++++++++++ desloppify/app/commands/plan/override/misc.py | 12 +- .../plan/override/resolve_workflow.py | 9 +- desloppify/app/commands/plan/override/skip.py | 24 ++- desloppify/app/commands/registry.py | 2 + .../app/commands/resolve/living_plan.py | 37 +++- desloppify/app/commands/resolve/messages.py | 62 ++++++ .../commands/review/importing/plan_sync.py | 10 + desloppify/app/commands/status/flow.py | 6 + desloppify/base/config/schema.py | 18 ++ .../commands/test_transition_messages.py | 100 +++++++++ 15 files changed, 650 insertions(+), 15 deletions(-) create mode 100644 desloppify/app/commands/directives.py create mode 100644 desloppify/app/commands/helpers/transition_messages.py create mode 100644 desloppify/tests/commands/test_transition_messages.py diff --git a/desloppify/app/cli_support/parser.py b/desloppify/app/cli_support/parser.py index d558f586c..840bf56b3 100644 --- a/desloppify/app/cli_support/parser.py +++ b/desloppify/app/cli_support/parser.py @@ -10,6 +10,7 @@ _add_backlog_parser, _add_config_parser, _add_detect_parser, + _add_directives_parser, _add_dev_parser, _add_exclude_parser, _add_autofix_parser, @@ -139,6 +140,7 @@ def create_parser(*, langs: list[str], detector_names: list[str]) -> argparse.Ar # configure _add_zone_parser(sub) _add_config_parser(sub) + _add_directives_parser(sub) _add_langs_parser(sub) _add_dev_parser(sub) _add_update_skill_parser(sub) diff --git a/desloppify/app/cli_support/parser_groups.py b/desloppify/app/cli_support/parser_groups.py index 90aae0ce2..3101ac4d6 100644 --- a/desloppify/app/cli_support/parser_groups.py +++ b/desloppify/app/cli_support/parser_groups.py @@ -7,6 +7,7 @@ from desloppify.app.cli_support.parser_groups_admin import ( # noqa: F401 (re-exports) _add_config_parser, _add_detect_parser, + _add_directives_parser, _add_dev_parser, _add_autofix_parser, _add_langs_parser, @@ -25,6 +26,7 @@ "_add_backlog_parser", "_add_config_parser", "_add_detect_parser", + "_add_directives_parser", "_add_dev_parser", "_add_exclude_parser", "_add_autofix_parser", diff --git a/desloppify/app/cli_support/parser_groups_admin.py b/desloppify/app/cli_support/parser_groups_admin.py index 4cf629ce8..3b797d537 100644 --- a/desloppify/app/cli_support/parser_groups_admin.py +++ b/desloppify/app/cli_support/parser_groups_admin.py @@ -90,6 +90,17 @@ def _add_config_parser(sub) -> None: c_unset.add_argument("config_key", type=str, help="Config key name") +def _add_directives_parser(sub) -> None: + p = sub.add_parser("directives", help="View/set agent directives for phase transitions") + d_sub = p.add_subparsers(dest="directives_action") + d_sub.add_parser("show", help="Show all configured directives") + d_set = d_sub.add_parser("set", help="Set a directive for a lifecycle phase") + d_set.add_argument("phase", type=str, help="Lifecycle phase name") + d_set.add_argument("message", type=str, help="Message to show at this transition") + d_unset = d_sub.add_parser("unset", help="Remove a directive for a lifecycle phase") + d_unset.add_argument("phase", type=str, help="Lifecycle phase name") + + def _fixer_help_lines(langs: list[str]) -> list[str]: fixer_help_lines: list[str] = [] for lang_name in langs: @@ -168,6 +179,8 @@ def _add_dev_parser(sub) -> None: ) d_scaffold.set_defaults(wire_pyproject=True) + dev_sub.add_parser("test-hermes", help="Test Hermes model switching (switch and switch back)") + def _add_langs_parser(sub) -> None: sub.add_parser("langs", help="List all available language plugins with depth and tools") diff --git a/desloppify/app/commands/directives.py b/desloppify/app/commands/directives.py new file mode 100644 index 000000000..36d651ba0 --- /dev/null +++ b/desloppify/app/commands/directives.py @@ -0,0 +1,166 @@ +"""directives command: view and manage agent directives.""" + +from __future__ import annotations + +import argparse + +from desloppify.app.commands.helpers.command_runtime import command_runtime +from desloppify.base.config import save_config +from desloppify.base.exception_sets import CommandError +from desloppify.base.output.terminal import colorize +from desloppify.engine._plan.refresh_lifecycle import VALID_LIFECYCLE_PHASES + +# The four phases that actually matter as agent directive hooks. +# Each has: short description, when it fires, example use case. +_PHASES: list[tuple[str, str, str, str]] = [ + ( + "execute", + "Working the fix queue — code changes, refactors, and fixes.", + "Fires when the queue fills with work items after triage,\n" + " or when you unskip/reopen issues that push back into work mode.", + "Commit after every 3 fixes. Don't refactor beyond what the issue asks.", + ), + ( + "postflight", + "Execution done — transitioning into planning/review phases.", + "Fires when the execution queue drains and the system moves into\n" + " review, workflow, or triage. Catch-all for leaving work mode.", + "Stop and summarise what you fixed before continuing.", + ), + ( + "triage", + "Reading issues, deciding what's real, clustering into a plan.", + "Fires when workflow items complete and triage stages are injected,\n" + " or when review surfaces issues that need strategic decisions.", + "Open every flagged file before deciding. Skip nothing without reading the code.", + ), + ( + "review", + "Scoring subjective dimensions — reading code and assessing quality.", + "Fires when the system needs subjective scores (first review, stale\n" + " dimensions, or post-triage re-review). Covers all review sub-phases.", + "Use review_packet_blind.json only. Do not read previous scores or targets.", + ), + ( + "scan", + "Running detectors and analyzing the codebase.", + "Fires when the lifecycle resets to the scan phase after a cycle\n" + " completes or when no other phase applies.", + "Include --skip-slow if this is a mid-cycle rescan.", + ), +] + +_PHASE_NAMES = {name for name, _, _, _ in _PHASES} + +_EXAMPLE_DIRECTIVES = { + "execute": "Commit after every 3 fixes. Don't refactor beyond what the issue asks.", + "triage": "Open every flagged file before deciding. Skip nothing without reading the code.", + "review": "Use review_packet_blind.json only. Do not read previous scores or targets.", +} + + +def cmd_directives(args: argparse.Namespace) -> None: + """Handle directives subcommands: show, set, unset.""" + action = getattr(args, "directives_action", None) + if action == "set": + _directives_set(args) + elif action == "unset": + _directives_unset(args) + else: + _directives_show(args) + + +def _directives_show(args: argparse.Namespace) -> None: + """Show all phases with their directives (if configured).""" + config = command_runtime(args).config + messages = config.get("transition_messages", {}) + if not isinstance(messages, dict): + messages = {} + + active = { + phase: text + for phase, text in messages.items() + if isinstance(text, str) and text.strip() + } + + print(colorize("\n Agent Directives\n", "bold")) + print( + " Messages shown to AI agents at lifecycle phase transitions.\n" + " Use them to switch models, set constraints, or give context-\n" + " specific instructions at key moments in the workflow.\n" + ) + + for name, description, when, example_use in _PHASES: + directive = active.get(name) + if directive: + marker = colorize("*", "green") + print(f" {marker} {colorize(name, 'cyan')} {description}") + print(colorize(f" When: {when}", "dim")) + print(f" Directive: {directive}") + else: + print(f" {colorize(name, 'cyan')} {description}") + print(colorize(f" When: {when}", "dim")) + print(colorize(f" e.g.: {example_use}", "dim")) + print() + + count = len(active) + if count: + print(colorize(f" {count} directive{'s' if count != 1 else ''} configured.\n", "green")) + else: + print(colorize(" No directives configured.\n", "dim")) + + print(colorize(" Examples:", "dim")) + for phase, text in _EXAMPLE_DIRECTIVES.items(): + print(colorize(f' desloppify directives set {phase} "{text}"', "dim")) + print() + print(colorize(" Commands:", "dim")) + print(colorize(' desloppify directives set ""', "dim")) + print(colorize(" desloppify directives unset ", "dim")) + print() + + +def _directives_set(args: argparse.Namespace) -> None: + """Set a directive for a lifecycle phase.""" + phase = args.phase + text = args.message + + # Accept the main phases (including postflight) plus any valid lifecycle phase. + if phase != "postflight" and phase not in VALID_LIFECYCLE_PHASES: + valid = ", ".join(sorted(_PHASE_NAMES)) + raise CommandError(f"unknown phase {phase!r}; valid phases: {valid}") + + config = command_runtime(args).config + messages = config.get("transition_messages", {}) + if not isinstance(messages, dict): + messages = {} + messages[phase] = text + config["transition_messages"] = messages + + try: + save_config(config) + except OSError as e: + raise CommandError(f"could not save config: {e}") from e + print(colorize(f" Set directive for {phase}:", "green")) + print(f" {text}") + + +def _directives_unset(args: argparse.Namespace) -> None: + """Remove a directive for a lifecycle phase.""" + phase = args.phase + + config = command_runtime(args).config + messages = config.get("transition_messages", {}) + if not isinstance(messages, dict): + messages = {} + + if phase not in messages: + raise CommandError(f"no directive set for phase {phase!r}") + + del messages[phase] + config["transition_messages"] = messages + + try: + save_config(config) + except OSError as e: + raise CommandError(f"could not save config: {e}") from e + print(colorize(f" Removed directive for {phase}", "green")) diff --git a/desloppify/app/commands/helpers/transition_messages.py b/desloppify/app/commands/helpers/transition_messages.py new file mode 100644 index 000000000..24c840422 --- /dev/null +++ b/desloppify/app/commands/helpers/transition_messages.py @@ -0,0 +1,202 @@ +"""Emit user-configured messages at lifecycle phase transitions.""" + +from __future__ import annotations + +import json as _json +import logging +import os as _os +import urllib.error as _urlerr +import urllib.request as _urlreq + +from desloppify.base.config import load_config +from desloppify.base.output.user_message import print_user_message +from desloppify.engine._plan.refresh_lifecycle import ( + COARSE_PHASE_MAP, + LIFECYCLE_PHASE_EXECUTE, + LIFECYCLE_PHASE_SCAN, +) + +logger = logging.getLogger(__name__) + +# Phases that are NOT postflight — everything else counts as postflight. +_NON_POSTFLIGHT = frozenset({LIFECYCLE_PHASE_EXECUTE, LIFECYCLE_PHASE_SCAN}) + +_HERMES_PORT_FILE = _os.path.expanduser("~/.hermes/control_api.port") + + +def _hermes_available() -> bool: + """Check if Hermes integration is enabled in config.""" + try: + config = load_config() + except (OSError, ValueError): + return False + return bool(config.get("hermes_enabled", False)) + + +def _hermes_port() -> int: + try: + with open(_HERMES_PORT_FILE) as f: + return int(f.read().strip()) + except (OSError, ValueError): + return 47823 + + +def _hermes_get(path: str) -> dict: + """GET a Hermes control API endpoint. Stdlib-only, no deps.""" + url = f"http://127.0.0.1:{_hermes_port()}{path}" + req = _urlreq.Request(url, method="GET", + headers={"X-Hermes-Control": "1"}) + try: + with _urlreq.urlopen(req, timeout=5) as resp: + return _json.loads(resp.read()) + except _urlerr.HTTPError as e: + return _json.loads(e.read()) + except (_urlerr.URLError, OSError) as e: + return {"error": str(e)} + + +def _hermes_send_message(text: str, mode: str = "queue") -> dict: + """Send a message/command to the running Hermes agent. Stdlib-only, no deps.""" + url = f"http://127.0.0.1:{_hermes_port()}/sessions/_any/message" + data = _json.dumps({"text": text, "mode": mode}).encode() + req = _urlreq.Request(url, data=data, method="POST", + headers={"Content-Type": "application/json", + "X-Hermes-Control": "1"}) + try: + with _urlreq.urlopen(req, timeout=5) as resp: + return _json.loads(resp.read()) + except _urlerr.HTTPError as e: + return _json.loads(e.read()) + except (_urlerr.URLError, OSError) as e: + return {"error": str(e)} + + +def _resolve_hermes_model(phase: str, hermes_models: dict) -> str | None: + """Resolve a phase to a 'provider:model' string from hermes_models config. + + Lookup: exact phase → coarse phase → 'review' (fallback for non-execute). + Returns None if no model is configured for this phase. + """ + spec = hermes_models.get(phase) + if not spec: + coarse = COARSE_PHASE_MAP.get(phase) + if coarse: + spec = hermes_models.get(coarse) + if not spec and phase not in _NON_POSTFLIGHT: + spec = hermes_models.get("review") + return spec or None + + +def _switch_hermes_model(phase: str) -> bool: + """Switch the running Hermes agent's model based on the phase. + + Reads model mapping from hermes_models in config.json. + Returns True if switch was triggered. + """ + if not _hermes_available(): + return False + + try: + config = load_config() + except (OSError, ValueError): + return False + + hermes_models = config.get("hermes_models", {}) + if not hermes_models: + return False + + spec = _resolve_hermes_model(phase, hermes_models) + if not spec: + return False + + try: + result = _hermes_send_message(f"/model {spec}", mode="interrupt") + if result.get("success"): + _hermes_send_message("continue", mode="queue") + print(f"🔄 Hermes model → {spec} (phase: {phase})") + return True + else: + logger.debug("Hermes model switch failed: %s", result.get("error", "")) + return False + except Exception as exc: + logger.debug("Hermes model switch skipped: %s", exc) + return False + + +_AUTOREPLY_PROMPT = ( + "You are an autonomous code repair agent working through a desloppify queue. " + "After each task, run the next desloppify command as instructed. " + "Do not stop or ask for confirmation — keep going until the queue is empty." +) + + +def _ensure_hermes_autoreply() -> None: + """Enable autoreply on the Hermes session if not already active. + + Checks the session state via GET /sessions/_any. If autoreply is + already enabled, does nothing — so it's safe to call on every + phase transition without clobbering an existing config. + """ + if not _hermes_available(): + return + try: + info = _hermes_get("/sessions/_any") + if info.get("autoreply", {}).get("enabled"): + return + _hermes_send_message( + f"/autoreply {_AUTOREPLY_PROMPT}", + mode="queue", + ) + logger.debug("Hermes autoreply enabled for desloppify session") + except Exception as exc: + logger.debug("Hermes autoreply check skipped: %s", exc) + + +def emit_transition_message(new_phase: str) -> bool: + """Print a transition message if one is configured for *new_phase*. + + Lookup order: exact phase → coarse phase → ``postflight`` (if the + phase is not execute/scan). + + Also triggers a Hermes model switch if the control API is available. + + Returns True if a message was emitted. + """ + # Ensure autoreply is enabled so the agent keeps working autonomously + _ensure_hermes_autoreply() + + # Switch Hermes model for this phase (best-effort, non-blocking) + _switch_hermes_model(new_phase) + + try: + config = load_config() + except (OSError, ValueError) as exc: + logger.debug("transition message skipped (config load): %s", exc) + return False + + messages = config.get("transition_messages") + if not isinstance(messages, dict) or not messages: + return False + + # Try exact phase first, then coarse fallback, then postflight. + text = messages.get(new_phase) + if text is None: + coarse = COARSE_PHASE_MAP.get(new_phase) + if coarse and coarse != new_phase: + text = messages.get(coarse) + if text is None and new_phase not in _NON_POSTFLIGHT: + text = messages.get("postflight") + + if not isinstance(text, str) or not text.strip(): + return False + + clean = text.strip() + print(f"\n{'─' * 60}") + print(f"TRANSITION INSTRUCTION — entering {new_phase} phase") + print(clean) + print(f"{'─' * 60}") + print_user_message(f"Hey, did you see the above? Please act on this: {clean}") + return True + + +__all__ = ["emit_transition_message"] diff --git a/desloppify/app/commands/plan/override/misc.py b/desloppify/app/commands/plan/override/misc.py index b5025566b..ce274d01d 100644 --- a/desloppify/app/commands/plan/override/misc.py +++ b/desloppify/app/commands/plan/override/misc.py @@ -25,7 +25,11 @@ describe_issue, set_focus, ) -from desloppify.engine._plan.refresh_lifecycle import clear_postflight_scan_completion +from desloppify.app.commands.helpers.transition_messages import emit_transition_message +from desloppify.engine._plan.refresh_lifecycle import ( + LIFECYCLE_PHASE_EXECUTE, + clear_postflight_scan_completion, +) from desloppify.engine._state.resolution import resolve_issues from desloppify.state_io import load_state @@ -113,7 +117,9 @@ def cmd_plan_reopen(args: argparse.Namespace) -> None: count += 1 append_log_entry(plan, "reopen", issue_ids=reopened, actor="user") - clear_postflight_scan_completion(plan, issue_ids=reopened, state=state_data) + transition_phase: str | None = None + if clear_postflight_scan_completion(plan, issue_ids=reopened, state=state_data): + transition_phase = LIFECYCLE_PHASE_EXECUTE save_plan_state_transactional( plan=plan, plan_path=plan_file, @@ -124,6 +130,8 @@ def cmd_plan_reopen(args: argparse.Namespace) -> None: print(colorize(f" Reopened {len(reopened)} issue(s).", "green")) if count: print(colorize(" Plan updated: items moved back to queue.", "dim")) + if transition_phase: + emit_transition_message(transition_phase) def cmd_plan_focus(args: argparse.Namespace) -> None: diff --git a/desloppify/app/commands/plan/override/resolve_workflow.py b/desloppify/app/commands/plan/override/resolve_workflow.py index 5363ee598..8854f2fc1 100644 --- a/desloppify/app/commands/plan/override/resolve_workflow.py +++ b/desloppify/app/commands/plan/override/resolve_workflow.py @@ -12,6 +12,7 @@ ensure_active_triage_issue_ids, has_open_review_issues, ) +from desloppify.app.commands.helpers.transition_messages import emit_transition_message from desloppify.base.config import target_strict_score_from_config from .resolve_helpers import blocked_triage_stages from desloppify.app.commands.plan.triage.stage_queue import ( @@ -350,15 +351,19 @@ def _reconcile_if_queue_drained( if WORKFLOW_CREATE_PLAN_ID in synthetic_ids and has_open_review_issues(state_data): ensure_active_triage_issue_ids(plan, state_data) inject_triage_stages(plan) - set_lifecycle_phase(plan, LIFECYCLE_PHASE_TRIAGE_POSTFLIGHT) + changed = set_lifecycle_phase(plan, LIFECYCLE_PHASE_TRIAGE_POSTFLIGHT) save_plan(plan) + if changed: + emit_transition_message(LIFECYCLE_PHASE_TRIAGE_POSTFLIGHT) return - reconcile_plan( + result = reconcile_plan( plan, state_data, target_strict=target_strict_score_from_config(state_data.get("config")), ) save_plan(plan) + if result.lifecycle_phase_changed: + emit_transition_message(result.lifecycle_phase) def resolve_workflow_patterns( diff --git a/desloppify/app/commands/plan/override/skip.py b/desloppify/app/commands/plan/override/skip.py index 51f294f83..64a40c8d7 100644 --- a/desloppify/app/commands/plan/override/skip.py +++ b/desloppify/app/commands/plan/override/skip.py @@ -22,7 +22,11 @@ from desloppify.base.exception_sets import CommandError from desloppify.base.output.terminal import colorize from desloppify.base.output.user_message import print_user_message -from desloppify.engine._plan.refresh_lifecycle import clear_postflight_scan_completion +from desloppify.app.commands.helpers.transition_messages import emit_transition_message +from desloppify.engine._plan.refresh_lifecycle import ( + LIFECYCLE_PHASE_EXECUTE, + clear_postflight_scan_completion, +) from desloppify.engine.plan_ops import ( SKIP_KIND_LABELS, append_log_entry, @@ -245,7 +249,9 @@ def cmd_plan_skip(args: argparse.Namespace) -> None: note=note, detail={"kind": kind, "reason": reason}, ) - clear_postflight_scan_completion(plan, issue_ids=issue_ids, state=state) + transition_phase: str | None = None + if clear_postflight_scan_completion(plan, issue_ids=issue_ids, state=state): + transition_phase = LIFECYCLE_PHASE_EXECUTE _save_skip_plan_state( plan=plan, plan_file=plan_file, @@ -271,6 +277,8 @@ def cmd_plan_skip(args: argparse.Namespace) -> None: " plan --help` to see all available plan tools. Otherwise" " no need to reply, just keep going." ) + if transition_phase: + emit_transition_message(transition_phase) def cmd_plan_unskip(args: argparse.Namespace) -> None: @@ -304,7 +312,9 @@ def cmd_plan_unskip(args: argparse.Namespace) -> None: actor="user", detail={"need_reopen": need_reopen}, ) - clear_postflight_scan_completion(plan, issue_ids=unskipped_ids, state=state) + transition_phase: str | None = None + if clear_postflight_scan_completion(plan, issue_ids=unskipped_ids, state=state): + transition_phase = LIFECYCLE_PHASE_EXECUTE reopened: list[str] = [] if need_reopen: @@ -330,6 +340,8 @@ def cmd_plan_unskip(args: argparse.Namespace) -> None: "yellow", ) ) + if transition_phase: + emit_transition_message(transition_phase) def cmd_plan_backlog(args: argparse.Namespace) -> None: @@ -374,7 +386,9 @@ def cmd_plan_backlog(args: argparse.Namespace) -> None: issue_ids=removed, actor="user", ) - clear_postflight_scan_completion(plan, issue_ids=removed, state=state_data) + transition_phase: str | None = None + if clear_postflight_scan_completion(plan, issue_ids=removed, state=state_data): + transition_phase = LIFECYCLE_PHASE_EXECUTE if reopen_ids: save_plan_state_transactional( @@ -389,6 +403,8 @@ def cmd_plan_backlog(args: argparse.Namespace) -> None: print(colorize(f" Moved {len(removed)} item(s) to backlog.", "green")) if reopen_ids: print(colorize(f" Reopened {len(reopen_ids)} deferred/triaged-out issue(s) in state.", "dim")) + if transition_phase: + emit_transition_message(transition_phase) __all__ = [ diff --git a/desloppify/app/commands/registry.py b/desloppify/app/commands/registry.py index 861d7133d..e8fb46413 100644 --- a/desloppify/app/commands/registry.py +++ b/desloppify/app/commands/registry.py @@ -15,6 +15,7 @@ def _build_handlers() -> dict[str, CommandHandler]: from desloppify.app.commands.backlog import cmd_backlog from desloppify.app.commands.config import cmd_config from desloppify.app.commands.detect import cmd_detect + from desloppify.app.commands.directives import cmd_directives from desloppify.app.commands.dev import cmd_dev from desloppify.app.commands.exclude import cmd_exclude from desloppify.app.commands.langs import cmd_langs @@ -47,6 +48,7 @@ def _build_handlers() -> dict[str, CommandHandler]: "zone": cmd_zone, "review": cmd_review, "config": cmd_config, + "directives": cmd_directives, "dev": cmd_dev, "langs": cmd_langs, "update-skill": cmd_update_skill, diff --git a/desloppify/app/commands/resolve/living_plan.py b/desloppify/app/commands/resolve/living_plan.py index 17044eee9..fcf301e27 100644 --- a/desloppify/app/commands/resolve/living_plan.py +++ b/desloppify/app/commands/resolve/living_plan.py @@ -7,6 +7,7 @@ from pathlib import Path from typing import NamedTuple +from desloppify.app.commands.helpers.transition_messages import emit_transition_message from desloppify.base.config import target_strict_score_from_config from desloppify.base.exception_sets import PLAN_LOAD_EXCEPTIONS from desloppify.base.output.terminal import colorize @@ -17,7 +18,10 @@ auto_complete_steps, purge_ids, ) -from desloppify.engine._plan.refresh_lifecycle import clear_postflight_scan_completion +from desloppify.engine._plan.refresh_lifecycle import ( + LIFECYCLE_PHASE_EXECUTE, + clear_postflight_scan_completion, +) from desloppify.engine.plan_state import ( add_uncommitted_issues, has_living_plan, @@ -36,12 +40,19 @@ class ClusterContext(NamedTuple): cluster_remaining: int -def _reconcile_if_queue_drained(plan: dict, state: dict | None) -> None: - """Advance the living plan when a resolve drains the explicit live queue.""" +def _reconcile_if_queue_drained(plan: dict, state: dict | None) -> str | None: + """Advance the living plan when a resolve drains the explicit live queue. + + Returns the new lifecycle phase if a transition occurred, so the caller + can emit the directive after all other output. + """ if state is None or not live_planned_queue_empty(plan): - return + return None target_strict = target_strict_score_from_config(state.get("config")) - reconcile_plan(plan, state, target_strict=target_strict) + result = reconcile_plan(plan, state, target_strict=target_strict) + if result is not None and result.lifecycle_phase_changed: + return result.lifecycle_phase + return None def capture_cluster_context(plan: dict, resolved_ids: list[str]) -> ClusterContext: @@ -102,15 +113,27 @@ def update_living_plan_after_resolve( cluster_name=ctx.cluster_name, actor="user", ) + # Clear focus when cluster is done + if plan.get("active_cluster") == ctx.cluster_name: + plan["active_cluster"] = None + elif ctx.cluster_name and ctx.cluster_remaining > 0: + # Auto-focus on the cluster while there's still work in it + plan["active_cluster"] = ctx.cluster_name if args.status == "fixed": add_uncommitted_issues(plan, all_resolved) elif args.status == "open": purge_uncommitted_ids(plan, all_resolved) - clear_postflight_scan_completion(plan, issue_ids=all_resolved, state=state) - _reconcile_if_queue_drained(plan, state) + transition_phase: str | None = None + if clear_postflight_scan_completion(plan, issue_ids=all_resolved, state=state): + transition_phase = LIFECYCLE_PHASE_EXECUTE + reconcile_phase = _reconcile_if_queue_drained(plan, state) + if reconcile_phase: + transition_phase = reconcile_phase save_plan(plan, plan_path) if purged: print(colorize(f" Plan updated: {purged} item(s) removed from queue.", "dim")) + if transition_phase: + emit_transition_message(transition_phase) except PLAN_LOAD_EXCEPTIONS as exc: _logger.debug("plan update failed after resolve", exc_info=True) warn_plan_load_degraded_once( diff --git a/desloppify/app/commands/resolve/messages.py b/desloppify/app/commands/resolve/messages.py index f9326bc24..a984dcddc 100644 --- a/desloppify/app/commands/resolve/messages.py +++ b/desloppify/app/commands/resolve/messages.py @@ -3,12 +3,68 @@ from __future__ import annotations import argparse +import logging from desloppify.base.output.terminal import colorize from desloppify.base.output.user_message import print_user_message from .living_plan import ClusterContext +logger = logging.getLogger(__name__) + +_NEXT_TASK_INSTRUCTIONS = ( + "A desloppify task was just marked complete. Here's what to do next:\n" + "\n" + "1. Run `desloppify next` to see the next task in the queue\n" + "2. Read and understand the issue — explore the relevant files and scope\n" + "3. Execute the fix thoroughly and verify it works\n" + "4. Once you're happy with it, commit and push:\n" + " `git add -A && git commit -m '' && git push`\n" + "5. Record the commit: `desloppify plan commit-log record`\n" + "6. Mark it resolved: `desloppify resolve --fixed --attest ''`" +) + + +def _hermes_reset_and_instruct( + *, + cluster_name: str | None = None, + cluster_remaining: int = 0, +) -> None: + """Reset Hermes context and inject next-task instructions via control API.""" + from desloppify.app.commands.helpers.transition_messages import ( + _hermes_available, + _hermes_send_message, + ) + + if not _hermes_available(): + return + try: + # Reset conversation to clear stale context from the previous task + result = _hermes_send_message("/reset", mode="interrupt") + if not result.get("success"): + return + + # Build context-aware instructions + if cluster_name and cluster_remaining > 0: + instructions = ( + f"A desloppify task was just marked complete. You're working through " + f"cluster '{cluster_name}' — {cluster_remaining} item(s) remaining.\n" + f"\n" + f"1. Run `desloppify next` to see the next task (focus is on '{cluster_name}')\n" + f"2. Read the step detail shown under 'Your step(s):' — it has exact file paths and line numbers\n" + f"3. Execute the fix and verify it works\n" + f"4. Commit: `git add -A && git commit -m '' && git push`\n" + f"5. Record: `desloppify plan commit-log record`\n" + f"6. Resolve: `desloppify resolve --fixed --attest ''`\n" + f"\nKeep going until the cluster is finished." + ) + else: + instructions = _NEXT_TASK_INSTRUCTIONS + + _hermes_send_message(instructions, mode="queue") + except Exception as exc: + logger.debug("Hermes next-task injection skipped: %s", exc) + def print_no_match_warning(args: argparse.Namespace) -> None: status_label = "resolved" if args.status == "open" else "open" @@ -49,5 +105,11 @@ def print_fixed_next_user_message( " to commit and push. Otherwise just keep going." ) + # Also inject via Hermes control API for a clean context switch + _hermes_reset_and_instruct( + cluster_name=cluster_ctx.cluster_name if mid_cluster else None, + cluster_remaining=cluster_ctx.cluster_remaining if mid_cluster else 0, + ) + __all__ = ["print_fixed_next_user_message", "print_no_match_warning"] diff --git a/desloppify/app/commands/review/importing/plan_sync.py b/desloppify/app/commands/review/importing/plan_sync.py index d54ef012b..8b8e68dd7 100644 --- a/desloppify/app/commands/review/importing/plan_sync.py +++ b/desloppify/app/commands/review/importing/plan_sync.py @@ -6,6 +6,7 @@ from pathlib import Path from desloppify.app.commands.helpers.issue_id_display import short_issue_id +from desloppify.app.commands.helpers.transition_messages import emit_transition_message from desloppify.app.commands.review.importing.flags import imported_assessment_keys from desloppify.base.config import target_strict_score_from_config from desloppify.base.exception_sets import PLAN_LOAD_EXCEPTIONS @@ -69,6 +70,7 @@ class _ImportPlanTransition: covered_pruned: list[str] import_scores_result: object reconcile_result: ReconcileResult + transition_phase: str | None = None def _print_review_import_sync( @@ -270,11 +272,17 @@ def _apply_import_plan_transitions( triage_deferred=import_result.triage_deferred, ) + transition_phase = ( + reconcile_result.lifecycle_phase + if reconcile_result.lifecycle_phase_changed + else None + ) return _ImportPlanTransition( import_result=import_result, covered_pruned=covered_pruned, import_scores_result=import_scores_result, reconcile_result=reconcile_result, + transition_phase=transition_phase, ) @@ -445,6 +453,8 @@ def sync_plan_after_import( outcome=outcome, ) _print_workflow_injected_message(result.workflow_injected_ids) + if transition.transition_phase: + emit_transition_message(transition.transition_phase) return outcome except PLAN_LOAD_EXCEPTIONS as exc: message = f"skipped plan sync after review import ({exc})" diff --git a/desloppify/app/commands/status/flow.py b/desloppify/app/commands/status/flow.py index 431c03837..e61683c93 100644 --- a/desloppify/app/commands/status/flow.py +++ b/desloppify/app/commands/status/flow.py @@ -52,6 +52,12 @@ def _print_status_warnings(config: dict) -> None: + if config.get("hermes_enabled"): + print(colorize( + ' ⚕ Hermes agent mode — model switching, autoreply, task handoff active' + '\n To disable: set "hermes_enabled": false in config.json', + "cyan", + )) skill_warning = check_skill_version() if skill_warning: print(colorize(f" {skill_warning}", "yellow")) diff --git a/desloppify/base/config/schema.py b/desloppify/base/config/schema.py index 1497c412c..03e8fa004 100644 --- a/desloppify/base/config/schema.py +++ b/desloppify/base/config/schema.py @@ -100,6 +100,24 @@ class ConfigKey: False, "Allow loading user plugins from .desloppify/plugins/ (security opt-in)", ), + "transition_messages": ConfigKey( + dict, + {}, + "Messages shown to agents at lifecycle phase transitions {phase: message}", + ), + "hermes_enabled": ConfigKey( + bool, + False, + "Enable Hermes agent integration (model switching, autoreply, task handoff)", + ), + "hermes_models": ConfigKey( + dict, + { + "execute": "openrouter:x-ai/grok-4.20-beta", + "review": "openrouter:google/gemini-3.1-pro-preview", + }, + "Phase → provider:model mapping for Hermes model switching", + ), } diff --git a/desloppify/tests/commands/test_transition_messages.py b/desloppify/tests/commands/test_transition_messages.py new file mode 100644 index 000000000..088198b2a --- /dev/null +++ b/desloppify/tests/commands/test_transition_messages.py @@ -0,0 +1,100 @@ +"""Tests for lifecycle transition messages.""" + +from __future__ import annotations + +import pytest + +from desloppify.app.commands.helpers import transition_messages as mod + + +@pytest.fixture() +def _config_with_messages(monkeypatch): + """Patch load_config to return transition messages.""" + def _make(messages: dict): + monkeypatch.setattr(mod, "load_config", lambda: {"transition_messages": messages}) + return _make + + +def test_emit_exact_phase_match(_config_with_messages, capsys): + _config_with_messages({"execute": "Switch to Sonnet for speed."}) + assert mod.emit_transition_message("execute") is True + assert "Switch to Sonnet for speed." in capsys.readouterr().out + + +def test_emit_coarse_fallback(_config_with_messages, capsys): + _config_with_messages({"review": "Use blind packet."}) + assert mod.emit_transition_message("review_initial") is True + assert "Use blind packet." in capsys.readouterr().out + + +def test_exact_phase_takes_priority_over_coarse(_config_with_messages, capsys): + _config_with_messages({ + "review_initial": "Exact message.", + "review": "Coarse message.", + }) + assert mod.emit_transition_message("review_initial") is True + out = capsys.readouterr().out + assert "Exact message." in out + assert "Coarse message." not in out + + +def test_no_message_configured(_config_with_messages, capsys): + _config_with_messages({}) + assert mod.emit_transition_message("execute") is False + assert capsys.readouterr().out == "" + + +def test_no_transition_messages_key(monkeypatch, capsys): + monkeypatch.setattr(mod, "load_config", lambda: {}) + assert mod.emit_transition_message("execute") is False + assert capsys.readouterr().out == "" + + +def test_empty_string_message_skipped(_config_with_messages, capsys): + _config_with_messages({"execute": " "}) + assert mod.emit_transition_message("execute") is False + assert capsys.readouterr().out == "" + + +def test_non_string_message_skipped(_config_with_messages, capsys): + _config_with_messages({"execute": 42}) + assert mod.emit_transition_message("execute") is False + assert capsys.readouterr().out == "" + + +def test_postflight_fallback_for_review_phase(_config_with_messages, capsys): + _config_with_messages({"postflight": "Summarise what you fixed."}) + assert mod.emit_transition_message("review_initial") is True + assert "Summarise what you fixed." in capsys.readouterr().out + + +def test_postflight_fallback_for_triage_phase(_config_with_messages, capsys): + _config_with_messages({"postflight": "Stop and review."}) + assert mod.emit_transition_message("triage_postflight") is True + assert "Stop and review." in capsys.readouterr().out + + +def test_postflight_does_not_fire_for_execute(_config_with_messages, capsys): + _config_with_messages({"postflight": "Should not appear."}) + assert mod.emit_transition_message("execute") is False + assert capsys.readouterr().out == "" + + +def test_postflight_does_not_fire_for_scan(_config_with_messages, capsys): + _config_with_messages({"postflight": "Should not appear."}) + assert mod.emit_transition_message("scan") is False + assert capsys.readouterr().out == "" + + +def test_coarse_takes_priority_over_postflight(_config_with_messages, capsys): + _config_with_messages({"review": "Specific.", "postflight": "Generic."}) + assert mod.emit_transition_message("review_initial") is True + out = capsys.readouterr().out + assert "Specific." in out + assert "Generic." not in out + + +def test_config_load_failure_returns_false(monkeypatch, capsys): + monkeypatch.setattr(mod, "load_config", lambda: (_ for _ in ()).throw(OSError("nope"))) + assert mod.emit_transition_message("execute") is False + assert capsys.readouterr().out == "" From 58811e9e120766bda8c1ba070ccd7fc699c596ab Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 02:00:51 +0100 Subject: [PATCH 08/43] feat: add dev test-hermes command and bump skill doc version desloppify dev test-hermes: smoke-test Hermes model switching by switching to a random model and back. Skill doc version bumped to v6. Co-Authored-By: Claude Opus 4.6 (1M context) --- desloppify/app/commands/dev.py | 54 ++++++++++++++++++++++++++++++++++ desloppify/app/skill_docs.py | 2 +- 2 files changed, 55 insertions(+), 1 deletion(-) diff --git a/desloppify/app/commands/dev.py b/desloppify/app/commands/dev.py index 6396bf45e..96940a860 100644 --- a/desloppify/app/commands/dev.py +++ b/desloppify/app/commands/dev.py @@ -23,6 +23,9 @@ def cmd_dev(args: argparse.Namespace) -> None: except ValueError as ex: raise CommandError(str(ex)) from ex return + if action == "test-hermes": + _cmd_test_hermes() + return raise CommandError("Unknown dev action. Use `desloppify dev scaffold-lang`.") @@ -165,3 +168,54 @@ def _cmd_scaffold_lang(args: object) -> None: " Next: implement real phases/commands/detectors and run pytest.", "dim" ) ) + + +def _cmd_test_hermes() -> None: + """Test Hermes model switching — switch to a random model and back.""" + import random + import time + + from desloppify.app.commands.helpers.transition_messages import ( + _hermes_available, + _hermes_get, + _hermes_send_message, + ) + + if not _hermes_available(): + print(colorize('Hermes not enabled. Set "hermes_enabled": true in config.json', "yellow")) + return + + # Get current model + info = _hermes_get("/sessions/_any") + if "error" in info: + print(colorize(f"Cannot reach Hermes: {info['error']}", "red")) + return + + original_model = info.get("model", "unknown") + original_provider = info.get("provider", "unknown") + print(f" Current model: {original_provider}:{original_model}") + + # Pick a random test model + test_models = [ + ("openrouter", "google/gemini-2.5-flash"), + ("openrouter", "meta-llama/llama-4-scout"), + ("openrouter", "mistralai/mistral-medium-3"), + ] + test_provider, test_model = random.choice(test_models) + + # Switch to test model + print(f" Switching to: {test_provider}:{test_model}") + result = _hermes_send_message(f"/model {test_provider}:{test_model}", mode="queue") + if not result.get("success"): + print(colorize(f" Switch failed: {result.get('error', '?')}", "red")) + return + print(colorize(" ✓ Switch command sent", "green")) + + # Wait a moment, then switch back + time.sleep(2) + print(f" Switching back to: {original_provider}:{original_model}") + result = _hermes_send_message(f"/model {original_provider}:{original_model}", mode="queue") + if not result.get("success"): + print(colorize(f" Switch-back failed: {result.get('error', '?')}", "red")) + return + print(colorize(" ✓ Restored original model", "green")) diff --git a/desloppify/app/skill_docs.py b/desloppify/app/skill_docs.py index 0e9c7585b..ec75862ed 100644 --- a/desloppify/app/skill_docs.py +++ b/desloppify/app/skill_docs.py @@ -9,7 +9,7 @@ # Bump this integer whenever docs/SKILL.md changes in a way that agents # should pick up (new commands, changed workflows, removed sections). -SKILL_VERSION = 5 +SKILL_VERSION = 6 SKILL_VERSION_RE = re.compile(r"") SKILL_OVERLAY_RE = re.compile(r"") From 25bca6eec2aa7473fa4a0fbf1d5b2a912f32da60 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 02:00:58 +0100 Subject: [PATCH 09/43] fix: handle missing git in review coordinator, remove unused import Wrap git status call in try/except OSError so review coordinator doesn't crash when git is unavailable. Remove unused triage_scoped_plan import from stage_validation. Co-Authored-By: Claude Opus 4.6 (1M context) --- .../plan/triage/runner/stage_validation.py | 1 - desloppify/app/commands/review/coordinator.py | 29 ++++++++-------- .../commands/scan/test_plan_reconcile.py | 3 +- .../tests/commands/test_review_coordinator.py | 33 +++++++++++++++++++ 4 files changed, 51 insertions(+), 15 deletions(-) create mode 100644 desloppify/tests/commands/test_review_coordinator.py diff --git a/desloppify/app/commands/plan/triage/runner/stage_validation.py b/desloppify/app/commands/plan/triage/runner/stage_validation.py index d73d9309b..22533c213 100644 --- a/desloppify/app/commands/plan/triage/runner/stage_validation.py +++ b/desloppify/app/commands/plan/triage/runner/stage_validation.py @@ -31,7 +31,6 @@ from ..stages.helpers import ( active_triage_issue_scope, scoped_manual_clusters_with_issues, - triage_scoped_plan, unclustered_review_issues, unenriched_clusters, value_check_targets, diff --git a/desloppify/app/commands/review/coordinator.py b/desloppify/app/commands/review/coordinator.py index 5d75580f0..f7c4bed28 100644 --- a/desloppify/app/commands/review/coordinator.py +++ b/desloppify/app/commands/review/coordinator.py @@ -66,19 +66,22 @@ def git_baseline( if head_proc.returncode != 0: return None, None head = head_proc.stdout.strip() or None - status_proc = subprocess_run( - [ - "git", - "-C", - str(project_root), - "status", - "--porcelain", - "--untracked-files=normal", - ], - capture_output=True, - text=True, - check=False, - ) + try: + status_proc = subprocess_run( + [ + "git", + "-C", + str(project_root), + "status", + "--porcelain", + "--untracked-files=normal", + ], + capture_output=True, + text=True, + check=False, + ) + except OSError: + return None, None status_raw = status_proc.stdout if status_proc.returncode == 0 else "" status_hash = _stable_json_sha256(status_raw) return head, status_hash diff --git a/desloppify/tests/commands/scan/test_plan_reconcile.py b/desloppify/tests/commands/scan/test_plan_reconcile.py index 617cd1f31..a3ab7b5d7 100644 --- a/desloppify/tests/commands/scan/test_plan_reconcile.py +++ b/desloppify/tests/commands/scan/test_plan_reconcile.py @@ -19,11 +19,12 @@ # Helpers # --------------------------------------------------------------------------- -def _runtime(*, state=None, config=None) -> SimpleNamespace: +def _runtime(*, state=None, config=None, force_rescan=False) -> SimpleNamespace: return SimpleNamespace( state=state or {}, state_path=Path("/tmp/fake-state.json"), config=config or {}, + force_rescan=force_rescan, ) diff --git a/desloppify/tests/commands/test_review_coordinator.py b/desloppify/tests/commands/test_review_coordinator.py new file mode 100644 index 000000000..ffeca3841 --- /dev/null +++ b/desloppify/tests/commands/test_review_coordinator.py @@ -0,0 +1,33 @@ +"""Tests for review coordinator baseline helpers.""" + +from __future__ import annotations + +from pathlib import Path +from types import SimpleNamespace + +from desloppify.app.commands.review import coordinator as mod + + +def test_git_baseline_returns_none_tuple_when_status_raises_oserror(): + calls: list[list[str]] = [] + + def _run(command, **_kwargs): + calls.append(command) + if command[-2:] == ["rev-parse", "HEAD"]: + return SimpleNamespace(returncode=0, stdout="abc123\n") + raise OSError("git unavailable") + + assert mod.git_baseline(Path("/tmp/project"), subprocess_run=_run) == (None, None) + assert len(calls) == 2 + + +def test_git_baseline_hashes_status_output_when_both_commands_succeed(): + def _run(command, **_kwargs): + if command[-2:] == ["rev-parse", "HEAD"]: + return SimpleNamespace(returncode=0, stdout="abc123\n") + return SimpleNamespace(returncode=0, stdout=" M foo.py\n?? bar.py\n") + + head, status_hash = mod.git_baseline(Path("/tmp/project"), subprocess_run=_run) + + assert head == "abc123" + assert status_hash == mod._stable_json_sha256(" M foo.py\n?? bar.py\n") From dcb9f58b8f9900c8b7002ef31f75764d0e5c4710 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 02:01:07 +0100 Subject: [PATCH 10/43] docs: update Hermes overlay for delegate_task, add directives docs, update website Rewrite HERMES.md: delegate_task subagent pattern replaces worktree-based parallel review. Add agent directives section to SKILL.md. Website: initiative #2 now active with $1k bounty challenge details. Co-Authored-By: Claude Opus 4.6 (1M context) --- .agents/skills/desloppify/SKILL.md | 289 +++++++++++++++++++++++++++++ assets/scorecard.png | Bin 88958 -> 88229 bytes docs/HERMES.md | 73 +++++--- docs/SKILL.md | 16 +- website/index.html | 2 + website/token.css | 7 +- website/token.html | 33 +++- 7 files changed, 388 insertions(+), 32 deletions(-) create mode 100644 .agents/skills/desloppify/SKILL.md diff --git a/.agents/skills/desloppify/SKILL.md b/.agents/skills/desloppify/SKILL.md new file mode 100644 index 000000000..32f4908c0 --- /dev/null +++ b/.agents/skills/desloppify/SKILL.md @@ -0,0 +1,289 @@ +--- +name: desloppify +description: > + Codebase health scanner and technical debt tracker. Use when the user asks + about code quality, technical debt, dead code, large files, god classes, + duplicate functions, code smells, naming issues, import cycles, or coupling + problems. Also use when asked for a health score, what to fix next, or to + create a cleanup plan. Supports 29 languages. +allowed-tools: Bash(desloppify *) +--- + + + + +# Desloppify + +## 1. Your Job + +Maximise the **strict score** honestly. Your main cycle: **scan → plan → execute → rescan**. Follow the scan output's **INSTRUCTIONS FOR AGENTS** — don't substitute your own analysis. + +**Don't be lazy.** Do large refactors and small detailed fixes with equal energy. If it takes touching 20 files, touch 20 files. If it's a one-line change, make it. No task is too big or too small — fix things properly, not minimally. + +## 2. The Workflow + +Three phases, repeated as a cycle. + +### Phase 1: Scan and review — understand the codebase + +```bash +desloppify scan --path . # analyse the codebase +desloppify status # check scores — are we at target? +``` + +The scan will tell you if subjective dimensions need review. Follow its instructions. To trigger a review manually: +```bash +desloppify review --prepare # then follow your runner's review workflow +``` + +### Phase 2: Plan — decide what to work on + +After reviews, triage stages and plan creation appear in the execution queue surfaced by `next`. Complete them in order — `next` tells you what each stage expects in the `--report`: +```bash +desloppify next # shows the next execution workflow step +desloppify plan triage --stage observe --report "themes and root causes..." +desloppify plan triage --stage reflect --report "comparison against completed work..." +desloppify plan triage --stage organize --report "summary of priorities..." +desloppify plan triage --complete --strategy "execution plan..." +``` + +For automated triage: `desloppify plan triage --run-stages --runner codex` (Codex) or `--runner claude` (Claude). Options: `--only-stages`, `--dry-run`, `--stage-timeout-seconds`. + +Then shape the queue. **The plan shapes everything `next` gives you** — `next` is the execution queue, not the full backlog. Don't skip this step. + +```bash +desloppify plan # see the living plan details +desloppify plan queue # compact execution queue view +desloppify plan reorder top # reorder — what unblocks the most? +desloppify plan cluster create # group related issues to batch-fix +desloppify plan focus # scope next to one cluster +desloppify plan skip # defer — hide from next +``` + +### Phase 3: Execute — grind the queue to completion + +Trust the plan and execute. Don't rescan mid-queue — finish the queue first. + +**Branch first.** Create a dedicated branch — never commit health work directly to main: +```bash +git checkout -b desloppify/code-health # or desloppify/ +desloppify config set commit_pr 42 # link a PR for auto-updated descriptions +``` + +**The loop:** +```bash +# 1. Get the next item from the execution queue +desloppify next + +# 2. Fix the issue in code + +# 3. Resolve it (next shows the exact command including required attestation) + +# 4. When you have a logical batch, commit and record +git add && git commit -m "desloppify: fix 3 deferred_import findings" +desloppify plan commit-log record # moves findings uncommitted → committed, updates PR + +# 5. Push periodically +git push -u origin desloppify/code-health + +# 6. Repeat until the queue is empty +``` + +Score may temporarily drop after fixes — cascade effects are normal, keep going. +If `next` suggests an auto-fixer, run `desloppify autofix --dry-run` to preview, then apply. + +**When the queue is clear, go back to Phase 1.** New issues will surface, cascades will have resolved, priorities will have shifted. This is the cycle. + +## 3. Reference + +### Key concepts + +- **Tiers**: T1 auto-fix → T2 quick manual → T3 judgment call → T4 major refactor. +- **Auto-clusters**: related findings are auto-grouped in `next`. Drill in with `next --cluster `. +- **Zones**: production/script (scored), test/config/generated/vendor (not scored). Fix with `zone set`. +- **Wontfix cost**: widens the lenient↔strict gap. Challenge past decisions when the gap grows. + +### Scoring + +Overall score = **25% mechanical** + **75% subjective**. + +- **Mechanical (25%)**: auto-detected issues — duplication, dead code, smells, unused imports, security. Fixed by changing code and rescanning. +- **Subjective (75%)**: design quality review — naming, error handling, abstractions, clarity. Starts at **0%** until reviewed. The scan will prompt you when a review is needed. +- **Strict score** is the north star: wontfix items count as open. The gap between overall and strict is your wontfix debt. +- **Score types**: overall (lenient), strict (wontfix counts), objective (mechanical only), verified (confirmed fixes only). + +### Reviews + +Four paths to get subjective scores: + +- **Local runner (Codex)**: `desloppify review --run-batches --runner codex --parallel --scan-after-import` — automated end-to-end. +- **Local runner (Claude)**: `desloppify review --prepare` → launch parallel subagents → `desloppify review --import merged.json` — see skill doc overlay for details. +- **Cloud/external**: `desloppify review --external-start --external-runner claude` → follow session template → `--external-submit`. +- **Manual path**: `desloppify review --prepare` → review per dimension → `desloppify review --import file.json`. + +- Import first, fix after — import creates tracked state entries for correlation. +- Target-matching scores trigger auto-reset to prevent gaming. Use the blind-review workflow described in your agent overlay doc (e.g. `docs/CLAUDE.md`, `docs/HERMES.md`). +- Even moderate scores (60-80) dramatically improve overall health. +- Stale dimensions auto-surface in `next` — just follow the queue. + +**Integrity rules:** Score from evidence only — no prior chat context, score history, or target-threshold anchoring. When evidence is mixed, score lower and explain uncertainty. Assess every requested dimension; never drop one. + +#### Review output format + +Return machine-readable JSON for review imports. For `--external-submit`, include `session` from the generated template: + +```json +{ + "session": { + "id": "", + "token": "" + }, + "assessments": { + "": 0 + }, + "findings": [ + { + "dimension": "", + "identifier": "short_id", + "summary": "one-line defect summary", + "related_files": ["relative/path/to/file.py"], + "evidence": ["specific code observation"], + "suggestion": "concrete fix recommendation", + "confidence": "high|medium|low" + } + ] +} +``` + +`findings` MUST match `query.system_prompt` exactly (including `related_files`, `evidence`, and `suggestion`). Use `"findings": []` when no defects found. Import is fail-closed: invalid findings abort unless `--allow-partial` is passed. Assessment scores are auto-applied from trusted internal or cloud session imports. Legacy `--attested-external` remains supported. + +#### Import paths + +- Robust session flow (recommended): `desloppify review --external-start --external-runner claude` → use generated prompt/template → run printed `--external-submit` command. +- Durable scored import (legacy): `desloppify review --import findings.json --attested-external --attest "I validated this review was completed without awareness of overall score and is unbiased."` +- Findings-only fallback: `desloppify review --import findings.json` + +#### Reviewer agent prompt + +Runners that support agent definitions (Cursor, Copilot, Gemini) can create a dedicated reviewer agent. Use this system prompt: + +``` +You are a code quality reviewer. You will be given a codebase path, a set of +dimensions to score, and what each dimension means. Read the code, score each +dimension 0-100 from evidence only, and return JSON in the required format. +Do not anchor to target thresholds. When evidence is mixed, score lower and +explain uncertainty. +``` + +See your editor's overlay section below for the agent config format. + +### Plan commands + +```bash +desloppify plan reorder top # move all cluster members at once +desloppify plan reorder top # mix clusters + findings in one reorder +desloppify plan reorder before -t X # position relative to another item/cluster +desloppify plan cluster reorder a,b top # reorder multiple clusters as one block +desloppify plan resolve # mark complete +desloppify plan reopen # reopen +desloppify backlog # broader non-execution backlog +``` + +### Commit tracking + +```bash +desloppify plan commit-log # see uncommitted + committed status +desloppify plan commit-log record # record HEAD commit, update PR description +desloppify plan commit-log record --note "why" # with rationale +desloppify plan commit-log record --only "smells::*" # record specific findings only +desloppify plan commit-log history # show commit records +desloppify plan commit-log pr # preview PR body markdown +desloppify config set commit_tracking_enabled false # disable guidance +``` + +After resolving findings as `fixed`, the tool shows uncommitted work, committed history, and a suggested commit message. After committing externally, run `record` to move findings from uncommitted to committed and auto-update the linked PR description. + +### Quick reference + +```bash +desloppify next --count 5 # top 5 execution items +desloppify next --cluster # drill into a cluster +desloppify backlog --count 5 # top 5 backlog items outside execution +desloppify show # filter by file/detector/ID +desloppify show --status open # all open findings +desloppify plan skip --permanent "" --note "reason" --attest "..." # accept debt +desloppify exclude # exclude a directory from scanning +desloppify config show # show all config including excludes +desloppify scan --path . --reset-subjective # reset subjective baseline to 0 +``` + +## 4. Fix Tool Issues Upstream + +When desloppify itself appears wrong or inconsistent — a bug, a bad detection, a crash, confusing output — **fix it and open a PR**. If you can't confidently fix it, file an issue instead. + +### Fix and PR (preferred) + +Clone the tool repo to a temp directory, make the fix there, and verify it works against the project you're scanning before pushing. + +```bash +git clone https://github.com/peteromallet/desloppify.git /tmp/desloppify-fix +cd /tmp/desloppify-fix +git checkout -b fix/ +``` + +Make your changes, then run the test suite and verify the fix against the original project: + +```bash +python -m pytest desloppify/tests/ -q +python -m desloppify scan --path # the project you were scanning +``` + +Once it looks good, push and open a PR: + +```bash +git add && git commit -m "fix: " +git push -u origin fix/ +gh pr create --title "fix: " --body "$(cat <<'EOF' +## Problem + + +## Fix + +EOF +)" +``` + +Clean up after: `rm -rf /tmp/desloppify-fix` + +### File an issue (fallback) + +If the fix is unclear or the change needs discussion, open an issue at `https://github.com/peteromallet/desloppify/issues` with a minimal repro: command, path, expected output, actual output. + +## Prerequisite + +`command -v desloppify >/dev/null 2>&1 && echo "desloppify: installed" || echo "NOT INSTALLED — run: pip install --upgrade git+https://github.com/peteromallet/desloppify.git"` + + + +## Codex Overlay + +This is the canonical Codex overlay used by the README install command. + +1. Prefer first-class batch runs: `desloppify review --run-batches --runner codex --parallel --scan-after-import`. +2. The command writes immutable packet snapshots under `.desloppify/review_packets/holistic_packet_*.json`; use those for reproducible retries. +3. Keep reviewer input scoped to the immutable packet and the source files named in each batch. +4. If a batch fails, retry only that slice with `desloppify review --run-batches --packet --only-batches `. +5. Manual override is safety-scoped: you cannot combine it with `--allow-partial`, and provisional manual scores expire on the next `scan` unless replaced by trusted internal or attested-external imports. + +### Triage workflow + +Prefer automated triage: `desloppify plan triage --run-stages --runner codex` + +Options: `--only-stages observe,reflect` (subset), `--dry-run` (prompts only), `--stage-timeout-seconds N` (per-stage). + +Run artifacts go to `.desloppify/triage_runs//` — each run gets its own directory with `run.log` (live timestamped events), `run_summary.json`, per-stage `prompts/`, `output/`, and `logs/`. Check `run.log` to diagnose stalls or failures. Re-running resumes from the last confirmed stage. + +If automated triage stalls, check `run.log` for the last event, then use `desloppify plan triage --stage-prompt ` to get the full prompt with gate rules. + + + diff --git a/assets/scorecard.png b/assets/scorecard.png index a5eb76c86ff4b28ec12aa63a1d40dad79808e968..9537c8cac0c23625f5d2fadcac081b9f056911d3 100644 GIT binary patch literal 88229 zcmd42bxu(I65jZDgpumx}=1t5&{C^D+Gk6SjbO- zJ9pNnzYv}vAV`XSP;rOvFSzO8UOpln%ezWEMF}k9VJCh0zS17<#b}FV{9Pu#35BHM^p=YI>*rTb->mLkAFl-H2wl%FY_S{-G#xo* zWbHp382#z)uftwgg%jxV?*ofE=AX;YucM#;bB&3kd)XEDLVA?)pF2NX-dR$8h@}4K z_v>0H`ajpmIFkQdzK@dlcSm1DasIg-C3HGbUA=0Eks1EaEpl8CRUS?juH@ytz(4oS z&@T@Z1GVS=`Tf}-?0ED!iX;Tqujwg zT1YP@!U!EE5%LiNLgWh3ll!&|iL2mN^C46t%OJ5m0}7zoER z>DVI_>)$~^&k;rrI5W9eHWgS0X}Y>d+j$WY-m~%71m{9iNB%zn?#Z|8urS4uFLO zJ{HXX_agkC&;P#rf02a$SdIS_xc?X^!hguke}DV+e-#Ua|7iQazy05bjqpE<`2R15 z`XAacLgV~L?FH0U4cDse7KB{yX7a~#t9{fdqqm!l%Qf&&bB1gB*6OAZJ|iO{{5@%( zBW&jCnd$iR%F4=YC?#{0U@2_UZvC6ZsX+trh2vVL7X-V;WqtfIl`yLwe^rizyh!L*y{|zrLaHAp_{t-RE#$NSfpLdm+ttmA3l)7g|EUeIXm|0 z=hC;g!>uK8`*9WUerON3N#f#S?YbXUpKBugAPH_fy5W;aq%E zUX+6?Qe|uLKsOtAKVX9!+nyrQ z5R}+8>ou-Hx0ooq1(8Y?reQ&%mcrc3F|}<0i*4M^+=IZ&y9(>*)`X6a?sR{Nq`>Q= z=1Z20;ptR!oAC$vpF146@)>ORdaCHJ-n~B+H&R|(8~+T^vVzT$m0SydUzh5dzMV2j zSS02*pBemtf*Q~MVA_uv>?hyX^82JVFN|@Po=%zd&d0Y#DQ6;;C0e?LzqUAki=w>#Y>P? z31hQwiKGsn`D$qWI7wlsl$}ltbfOZ*MCSdmy!PpWo7H!A7+GU^C^6w8#c(b94?6nW z_TRN+i$ej=n!Yl|olK;vKL}ReggBJWh0ber{Yr30#q^?Dmib(Amhh49 zt|e$KVJCh+L303atZD0^jJt+{&%8Y-P9&J+{&u3%?Mz5d#lh)vc1bVnu5;T(o!(e= zhj0&#BW}zTF_G1t#CnBS^tJ>%rV;O3p1+>}9_w62eO}T`klvOZ%hT%mHZ=F7@%y^+ zNiJVP9n@xoI@h95U);qb-E_cpI%*JgcwAkIQAYYd)|_E$MgH*9Qzsg9)eWF3on?H@HJ5skzV_G-0M5V0;@;} z&#RW)P$CG(VUE&HxKus6>HYD0Q>WQsZ&5=x&hH>8UOXyb_)WZ&A|xZh%OkR0&k21u2$c*|%(!6MoV;3=(`GM-ry8Q>Ni&tvJ^yL1)^!RI&ay!;-3FT$FCm|=vfdyhc36@7@+d7#PU7)rZZBA9! zyy|L(;(7-UqeBdV@h;XHui-N|o|`2dYv}kdd`9MA+&W1s{7Sf;1g7n4s*=`m@8C7~ z(ljh_k6WDJnWJO5vpQhl#GHmCwOja2PCxr(-+g-7Vn*yqt2zEEWwOrd7IrQKoT=b_ z8Q!T|+J z_Ehefk-aTBbd_?0ffrTs;v|^_zK+Psz!siSbD6;9Cl}<3rCOHXc|U0@wgG(U`Y36* z%Wii5f)H_`22%u_0H2UVU0a(G54GcK5VKBI)WC*oIot|m+Vx=G<#bcFPUdiTm(Q-P zG4qp1c&dw<+SEny-1zvoiBfVvfXI4LuftjoQIkO|oy^q3{lrhTOCB;_UqJW#k+KwY z7^0=0>G;5#@z*ymPec6HQF#M~wpAoMUevxMJ|g+#8M!FP#21T287+wnVrCCBAn3DOWg8E&agTHTo(@-VUM;`&*jy5rs)U-Ggav47ALI?!X_87 z(-ITimZCH?73*x~sspbAZ|XgxCGfCb9e4hud%pPv$qC_$`0af96azblxeWMQ$}bnX z=2zGGw!uI6>!?xVu~5UM_AWHXEr4e-AeumP994Lo`eedKh?ek(OKPA|NbB zi)Jb`U+tnD96c~U+U|Hl@bU5OT7GkGI8P&QUp%E?OJZwEvoBE9t8m{d3C)B=0AM=m|i zjE#-8VJVW0S7qitnIo(1{01yst{5#%;9K*9y9=X{oj|O7aIPdKk=~enr3ha=eE&^V z*7$ah@-U2){kQcEJ|hPx5e5p3n~3wP-pT{nHV=oV$k)e7shP{Y&bHSJMC|Th17fiwVz*YI5N3Y%UlqA?skO=~`rU>`X9Bdb{<^zZ%DJ;JE*Si z`X?P>i0IG8#>S(EqlQtVk9(>!v#+cqB$((~JhqD6YIP3N>YR#-i>0NcFhnV89XI;r zt$fF14bDfNAd~B=ZC)@T-l|D{l`zPIxK~myz}x*L&2|zvKS5#K1$OH+@3iW6Xaed@ zrYQZH!=#_LEb>J`!A^BcYM`Y0I&G&=6DUWwIVQA^&T`qa$@4UA@m`q1#B8BBu z)>VAn?Vot%6#1h;_=(F2e@sfn_qcI_#mogRG(`3HIUFRY9I!M*PCS!}D@w40uAY_& zoVN!XE!hj*;nNo(3EW*c2SRZ|T1FSQyUD%@r=b4e6>s#kJY5EoWu+vFN!V z3(e5Bc7_IHkZRqYYN3VlPnXilq#eT^6_?Of6YbbLO1VirTJQfCh&I3TN*6ky2I>+M)FfQ;#?M&HnnHdwc6Ni z#YrZENNDTGi9eFI$6kZhPF@p?od2uv#`;n|fV5GM{1Punp^`Gm`vzrNcxAF%gEB}FR-?>Y>xS`I&7wSWHEo(e;dRzB7RCT{JG%QO@4T(`HX zqH`_hcW|NTl4%Njixf0xPcAN+b=_+;xVpF9=?)uAtaCU+sn%% zA|DzW8wL3JZ{yYVM~$O7b#!#QBS^gNW*ipy_(_b8NN}tyEn{M1^gTCd7WgOJ1r}p+ zq+?=Y$XfwbwP?QoT7bgqX|fkm$d3Y&;j=p2P9!UyKZ+N-@x7f(X{n<%<@hRWWnw!m zQi*X!%r;^op&L8XKKT8E*%Z&DwQ${(+BPTG-A+lc8~9seUGr7HPp8Xjb0o0ST4BYG^-%8B8boLZ;dggo@H8InU_dkLDy`vNz!9=}wjP~+7S{hBB@ z!k~{cC+p)Ch~*(8i?jv;;az`)rpwsx#^0``iXDXQ*6|V$5*AKcYV&J1Jx_o@bj8Gi zWN3}ple;J9{oecCpZZxbf+duVVmD{8q$FaRHq5)}EY3krS5>#4=@9Xzo6%DF`;FtXHd< z{>Pt6TQLaO=;~*jrNx)mplHHUg-k*Jy=Vby@t05V8#QO}dh z-NS2#CWh~?1w=zbLjmdO-Be;=pa7>Y0@CZ(k;iLz)JUukx?1tVhR4p)t#bKFacUa} zybWzVg3kLm?Ik+U+L?e2Q4;EqbY`r97kff&p9SHX@gBBDjZ$?@`H4qnGEi zAmn@Y2g~>@zAO{)G3$JrG&Lm^8D!V=pw@82&B=)z^zngwHFal6s>6UiEY(Fsq@%^} z(QD%bzg*)^;cpduE1tRVNYMPaHA^N;{vg6}XZd;WwG&HAi*C!q#XS&U9qeio3(cLz zzK}2ThD!%VQD8jUGM0I=drt{(x!Jsfx=a>LhnLKbbq$Yo$%AY_q7WyGM5y@N@U}x0 z|B^!HKKeR`HkV#bz5!9V%ASxjL3kTZ9Xg8yU&wu>uAZKruHRd59%Y^i;H;Chw=s2c z=8}7BUwJJ>{gQkMg^WJqM`+91>)l`uhq*{o34gjLgf^z#-3xX+3f>4tb^w8sfQ8dQ zOF(?;rk&kZ+mm$cM{->smYwCRWe-7~JyS`ei9VNH(zc>#X=KnXt2H|he)!980+EcD zn*kQsHhXF2Y>RgPZ6t>mT56Gic&Hw37k3diV1kDPZ8=X#B|idZ46V)n<`JDb^6#Mn#b1P8FVho zHQr9huT?kA^Ie{1Uok&!Vkr2MKT_1($B7i!D_X{H9_@w=s2~nB>Khb3<*e5#-Y?O_ z0`H>P#?e8X!;*%%Yoe#ru~CD%jd8)-OeIn*Vt;{`$e_+Z8!&QtS(&+4A(%DU#X{1; zQR$>WUTsH6yS}^}dkzB`luIk1jH~CVc#&Qxu1{LoJ2ug{M@H!}HY&RZ)rJhgAA$RQUMz&J%vX3@ea9hUGJ36_fAl zJQpH7DZ4Fg?Qui9C>a%x1Q0$Psf;T~BD3*WnbolI;6o)+sVhujm18N?=wkRh3;T#~ zw+E|fOi=Sh;e&^RpM?;Iv=|aaAzJd83Y*~`VyAd@xe1=&Z$#5+Scd&v>V>MXv5)DJ zc^&*6opH8?aD})WeqUD+#e|R%y;Gj}wvxV(t%fTF7qz-_>R>9zsltMjn>u)^WWuxd zBcb20GR+A>(>S_x`)^4D)n&jgen;k~TH(49LqJX(RDsCnR85(o*&PU|ARF8363BJF z_w^6S_R`W87Ws=od%^5`1-lG3pMIWpZkMor&K3S96M=yjS&b+;o|2XNRM)5bCeW?P zSnI>)*pfQay^omICjvId&zL)jw^Mz1Cy{1NZJ(gT*&pyA16QA(v7+X9x6MqS^%(5l97<(sT!)uP2r9Mf2^-sl3q9>Zv%e$9Hvg z4UDRq;Iv8J+}sR6!f^F@!kO3~8yizjA~5F5`#Vx9$J69ac|pk$RxZ2R&F5gWP;b;d zKTW0F+cNto?c>xzSGhFdBbSzYs;KJ2198u>l7IT&U}YKuCA{c?;TG_=novM@Zy0CW~z04X7>Ke&&nkP1fIF~%q#Lm*()e~K2 zY3B}@G@^cwyzaj(TvW1IN3P5xSNw8_Y?*O1|;0zE7E~_VpF~0^>?1cCL&%A zC1aZY21t$FQu9vq_~pT@nNAqjyw!l7*X4oRlEy`M)y{ZPQIV9iG$#4GH?jQ*jkVc{ zf0QyN5P}z^z!!7w%%z#@)5E zm_L-lFpqRdcuK-I8wO`K0V3~P0Hx$I&KI#*-6)Z*u7Bg-7wb9#Py?*NM|DJcx@dQn((zb5 zVz^#D8eeGJAQ}8rv^bK}y_w5rZh_3NnruC+CW*~>N0KbqXLETC?QbOtEX^XDx{q8O zpK08$?ZO_d7w?Wt>vRq`535)2@!6BDm(qRe7r0(VmVd2uE7NcAJ*Y081F$qzb!|GF zkf%?drn{e@G>O*6q z4Azv{9uqV6>wsTNORG#50inuO^kA^B?|$pZZxjz962zAR4DaSM91b_j@bER)*!E&f zSSplRKhr#rixes&M4u^)i;EMrSz@!D*T4uq9bs%do&CKgBO`P3@pO?%E^?SpTT>gH zHFm(P+kq&Hg|g45UGeK=YBt*k8a~NQ5V7`U6V*MzMp#cF+N!T+rObUMrwtheWxDz9 zuJ9t@dXOkI!v|6J%s!l0u;AA(_xaj#^Y7wpjaHo16OSHOg_0n7cfK17I?Bdpf5qA4 zxaw;q1(Um-R+8+=VSUEc+MBWR!nDBV#msJr;CyP%E-CrT2Sl4lkW6;_1hM(xuhUEt%7JXq~PTSjd zwgG@P(sA*6?2;Xp^=-KPW5=J$U^=c7{Q#qXKI4DT!4OPCe zbf11xR~2e>z_3l24f%2`_lNoY+%p@`SiZQp2skoH$vzOM?I3}ObXgu*u^=um>VuHb z6%;j+qz>!}+9)g4-k$lvn?nCg_p2yG(~sNZ@n^Z+E4_+}>MAif-XX_qce^4(Vw%(p z6~ND&HK5G1JG3>U`CVP;Jvqcxd02LyO0urbu0$JuaifsvG8y{US-OmSq#dtOCHKj{$ zVad{Bga0nX##Y+MiC2xK{R7qR>DZz6`%#_J80l-o(KA z7Q*qbK(}ImT1;}@L`ZzS5aPq$XfO?6(8^KrdF*gIyq|{#IaN5h?$6|C5_)+x01$3C5r4|OS5*F1hL6AV zyP-ILE7`*^Q?mG&H2Bz<;gNx`4IGc7l0|omvkcsk7=`syg#pm}FkY{cf`aeETfrFU znq~Gvkv$ysKIEH`!o(t%G;2fB&=ndkzg|2g@HZ6}P~eD#TZy{*bo_x(P1_v(>vlJ( zBUIeKIF*Lm@kFp07G&@ElI6!#v-BGk8-6al>`)GtxgeP7@gQ~}1@%#o{*665OBZAnnZ_%eDP-MB7hM{;q$Sykv+&)&TF~8d0f;k7qFfg8Jemy-a&)Gm{Zs?QPEJS#<`MJ$R0|UwMIjPHNIT|BO1TbM>=Juva5+$fT*YsZr^~SAOK{7HP+ioe6 zlapckZ-OVw?K=6YqeIB!q_^5`CN0x^7+S8@U$L1H?q(bpS)(m|svtC^B zn{Q!(fl*)ENmj$8t3Oo|<<{=)u5i4aA|l5b==wc2aA%s?^&r(_&${qT%6 zt>ssu1Op4p5U|g-U%3-!Y_yv1ZOB!j_g(;>=U(<5%OU66jFW|6eL0T!lXxc3oULSG zXgC%t&q^6MGds)2$JebF;)4*9joxCrd{SHJccnJYO!LlN!oD}EX31NNX)fEZl8R{mOg z&bsGkl3Lbs`|!wT`>}&aH}b-%c(QqH z^9P6ws^6=$9qPKK%`o-h`7xvfc7cy)N!j;(b%`1;FXA%gvBA43oef65PF1@+Eaohr zIK5Ylq7O!lIFVZvj}UN9zN%E{la+dr5?6)ycyBWq&Sgx-P!QE$qb7|gB!QY8Mgdct zn~#NE+e6hTN*OJ2Mq8o5v@l|9P{biXZF_g%h8R$O6MV#}tNSG&_<8X2mhw>i1FAGJZC_4eyf0cAY{ zh2<>#MHG-Fln=ycS|(mg+v8lwNp;hnA)QyentXg~?4CHKCSf&D^o14|Y>YnH#ip&B z*CdCWsbKMsAz`3r(`1OZM4G`sK^Jm4ISXZF&Qb6@u8|WH6O)rW1ax?NaejS&U%>M0 zmolBaMc%~J6s?}L=PiT>+Tw`qvl54VGma0O8{r) z)HV5y@6Qkm0yxs-g>NC*V18{H16~qkZ5*z%=SEKUc{;RvsvE{4w4ir zx2`)=o0WHL#-JduV&SCV%?r*iKXWn#9{wYs4LDnGAAB?o>?2*cyeN8L-%mE$LHncMP zb?xr%0@RPb#!cfPy0V;H1Yldg@sbz;ajt@_tc|7AZcGmA_E1XCn&9R9Y8()yFs=AR zGAV_KDu>t(nQtH+4H#FHP(NuX9~$aDxGkyhFD}L=;C5cKYte6Z7OTlR>pnHVl_VK)e(UDDYzzE_mHB2s_;93MU~x5f>}~ zpLg5bnH{bbjq%4P9{qX=p9&8T_oSjZH)XF|mzb|+clcC0Y%JXzu9U-sOSs>1HT^#E zhhl)4!kY=Xcw-J;YuaF}G3xv!veVR{{7FDt<3rIR^%oS!HB11T}t2BTm!jzEO62@O!5bX>_dp$hY0I(@6`n z;-_>}c$ZO^>Ltr)XGbo899}Zm85HgP(2#Tp(JH<;8^NR^CnwL7hm7We!cy&ezK4EC z29c-VpXDNUG0^&==&VtOG``364A!?8MBA$K@N*6D;;i| z3#|>TtnA;VMU6zEZLwP7N_u!)iv@`dBsAUh2DyPHz*Cy~HJK?0izUr?izer)I1c4+ z48TZSuN$fUq!>pM<|rj&WxTb$(>P$2MiNINk0kV>sTRkTOhGHywD~9uEi>n0F;aHo z02yC5Fv5$o%gfu^!H;f97OCX#!=_s5!hgkLjr~q44Mz{xuC@*h4KK{k-|<{~)f||z zH%YL$X_GSglaa9XNIMhT#=X>rIliA9B7nn7-cnhfb66+Im=wmpl}wHy)O?sZCttjl zd>n8a$^Fwfw93J45Rp7s*k^_#FiJ$7JN1-_>Ml1;TFCtbe&~8s;EwDMrnSgts6w^m zde1bxShxyTe1--PVWI4WB|S4=hqBAxoqR0!==5d?pgDe}6m9=1vv$kf-P3uOMe$ir z&EbzES|Nb?3C4yQnvY|3!)lCh^5d`!ZEZhEgdI%aEME4un^p>6pRGZyrkFh*#CUk7 z{R0p&P{`Tsmk}5yugtvN>>!Zvxl3J>XWH6U6Th(ay&qIfOyVVsC6VjgSAG^37jIs4 z1J039b+PCdeLBI#pwHeu$x0I~f5g$0uN!rs+h5QQTYN&w)Wif{lbzpL^L_nzsZNB? zMO_!BugAo$YkGLa>ix#kQ&;l4(Rj01mufj><#CLJ>p44h!gazy2v5&wU-TcQhn<7{ z;o8edJ>@5()q*X@MfJhh0ov?1af+4GcO}3-6$rvcmYst9^{7@khAoFVU$W9iL-&Ph zqWcWQzY2@zu`yVF2aFh9FjEC#9l#2AQd218$Eh$JWR^B)(bENPFGYOFESO|?hQ>)? zrkVqRqC@&{GzN$ZP=dfFWyl~-0&$u|5Z)y@StlRWcb8jlFAh4hyh&kibq$TAl$eC{ z^oL18eeLf|OWNf+my1=wdS>#Z^62ZrGy1QW_3Pc~9XDTsd%MsQ;X-;@mjDqh;k~Je ziKLGR$?ubGHk^*dncK-Bp=8hI8T=;x*ZFI(E|KmQq6&p9p~Ea z{xYh)g@qieVFJZ=S=JDB8B+0oB+~d>ieZLq6!EOmvPSqI(UC=j zpHI_sM5madq4OSmk)LQx9Q)EwN$BzVi&msL^C)Cv0Aht+{XH_<2J(cZYGqZ3nc>CA zG-POVbha^fOh-A--_7l0jI=rk0(wClT3MhpJQMh_V$cQ|q{0{bYmy--OvG}n zS!V6!+p-#Iq^??03{dt*ovbDj>8yG!AQ{e)S2x+NXNCzNpmIMJ?VOG^Nt?eG|8w-v zE7S3CbNim2KNdf@p2N?r>TC5Dl+0=<MW=dI0?_Csb<9i5 zqrj-SoaDf$!7tQJF^{CTa;H6Wr)rlu$A|BBEf!8rx>G8y0x+luu@A(a2D+_%Vg3V< zkShW^0hWv1UPY=K%csLh`nvP%G&QndT`tXmr0r!TV zy=*eX3CS)h8mwhuUeL>{t=G~nRx>eC?V&R`@zT%b@J2bB44a*uU1-ofW8nOWKDRI> zKwf%)AbDw%5ggRRO%tdxtzK1$IgvPS@p8&9u>zf*UcL81?@A;yZkW6|Zt{ls-A<4X z^bKC9KlbGGFp$)z#yLGe(h2OW!-II3Zh{wTKL4z1W;hR%?R{BYcd&3u5TR)!_s4l3 zn#P2TJQ=NJEr(VD*&nmKH~QCUX{4DWKNoJ{^`T7x&wvl2Q@8U3GQu)i>~=?KfPAXr z0!#c04}rzbkTo<^)YO!eFl~t3fT zpzdiqrKYAvPJVx}KRfqp+=&jzv)$b1T`=?(TAH3VJ6iNHEZ5+9gl_ggfw(5zVOVhI zg3rdr1_iV;Jq}e)NlnG15OOR%NeQjVr_a~b2itJ$rmIxhe59(D?)l?d4{Z02zu8m4 zw?37>t*qvDt4;C*l3I81O6r^Mv@Win4Ee+HTkIrrq_BM)ltZ%}hQ1NXYRos#anJ%B zh>sIGD7!iTt5l}rOq0oT`t+tm)sgkwn6&oF)KJ#LC@z(`pkcF=5CD&mo#1X8hZ5Fs zZ81;z$EGsc5~g3P=R~3Se5544qckzfQgdT`xnFNHW2P)tJQ4&`=jTQ)A*N(N4rfJ`lJ#VAbv2SFX?DfcFE3Feu$f(DHz7&(Vt&$nq370)qR? zF1-){!Z;BA;Wt+UfDG83_!9xKQbNkON*iL@G zHwcAq3?@(9)MQeHRH-Wlhgsj1JuXjJC|`(EnCn{a+#dNH4`~nN4JTP1Ei0&N6XH6t z(D82pP%XD-7kW|Uud-hi6%0FCI3Q|zHpMqp)PZHQOicv1TBraBe0SZW4?qZqhqm$Y zsz7Xnf`&Gyt08*m5Zm7B|Loz)N7=z4BrgvsxI>iz-*dQ@&AV9N^STRDSOaHNXWbPusR}Q(eBfyht8ibx}n1z?7YsSLzOKLE@yte zho2IjYUfC4YC-(&)zWB|Tx*w9I;06y)~C4ChAHkD5*I9WUS%e8Wmq}TA0+9wly#<| zu~aXrNU_$N_LV!XXw*4ZBLsAq*`H+)ceB?xyedcjaP?#)d{RqoiqgyLcBq^0StvGG z&)cH!`=wg*?U2`at}L_)Z<7#jf61+NboA5WU0t{8Og{3(!!1Q4@LV}phCnl`o#~6Srs+U~ZpTtQHu?(^2Gl5p%L8pZ2ns=PxQhF(V!I}0h zPP(=B%`&vNC!wW}^Jfi^2y?u2nj2aNS~BKij@_Wz1cq{ z)@F_kjd`?3U8jW3j94v5G|uFkKeo0vTog_pYDaUJG5KC312WPfvI@Dvf^yK)=QlUE zfz8>hHD=~LoB-TCtYpLuCz3?Q=C!(&p^JkiPTDoy(-I>A zd0J2HN76qQx);BkOc2 z=NeMh3jzXA$5$)8#rkAHYR6qkuG|^%+wHS* z5kwZ_X)6SGQ8rC$PN0Uf9jNa6>-%WtZ#=#i76LN_RW@eVRQ#YPT>6lbh#K@$jfp^0 z+e#0he5Bd(P(T2sg*XJxSHJ@QQjsb=dKu4<*hj7bBu@Wz09AOOSq#f(Bji|W94n#9 zsJ2U-xX6JCl(2z>Oef93`Hs)oi7y4lrSEc+oAklmU%lb!&oS2SV*UFiF2j$#K4W=BW zPutT0Qe>ofj%=G}2dTW4P_0abY#Lo@X*uVEt?O3He>M906&1>ly|jh-p$s8CeRZ?$ zt$~e`pLdO!*jZIaW80j-&O&$LcfhUAg5a007{Gwzq=<`g8=CCiV+Dch>^IkUvVvd9 z~C{(;-UG;$rrk~7>QUo=fe~o zA0vpi_ezsz=4X0W9Qe5iGur%x0-gat>7;0r+uSBdq+`}P7D?xCRu%!_Phwtnp<4}l zZG5a4a$S7tg8Wu9H;@P?M;BP@Z^m+XR85Q(Zcs8NqtxSB!*@vsb6*ah({Z zD)zTi2dQ;bJ+eO(-qd=ir!NHLtMJH|I^{*o&dgQH%D@+u)pS)|Ho!7QB0k}@ zH)o?FI_dd^4Zf80f;5^8(eo10b<;Y=N;}sabO9ZdgI8Z%(VUxr8pOYjKkA_)EPn)i zN95Ec)S_&bcHDHh5I@w-_WHK9QWU?W0dYGYM%cJ6j~&Z2K1WYJv|VTFu= zhKYf8^rN-c0oSV@7)VDvUSgV{m+RENCN1r&ItqjzPSrLG39%4m4UKm$aFr@VkSI_b zX?btuaXJM9OM_Cd|K@u~f+wdYShQ-*HLl8l%8nlkPt{BH?wu#|;E9~Xu>O9_tKOAU z!a=jj*?Jl}`J{PUbnys^9U4Y$6`d-V)2R?#I?CwhAAA3&0!6+zOEXHBdyA2ILBeYX zq(2}U8tGo_zUs0fImII$V@q1-^TvzUR9hTH5?N(AdPp z=GrZ^THyUsRGiO>`#c7ayy~ylfiLB;3uAmF*S_zpojwq=y}Xg|?$+ce2}&F{IKWE= z*I&+4Ol@9>Aj#H7Ic|^S_+_O0TH>nxwYqs&+0yjtTJlKKEgq}}6b3fG1#*{6?XREK zv=O9>zzG=NHN;!(v7BK{%2_ZB7fJ}x*cBKhYtJFd8m3h5<|`&9_PhP=Dw)Lfd_xS> zWcu8M?OZbF*4-IMETstIzAzUyGS}Wv+w48?n?hEjG0NXamiJ4_*Z#n; zq`|xFJldv?Ev>W3V?7{CE)BQT+}0pPOT#_r7=i?9#Qo!m5m@x=fvob!!LUJqA{)uf zykywi+R788c}2|a-1!T8Ku@z8VA<$0e3i6m-@f@n1=64QKNHwBM%|h1yni6?p#91+ zA@kb-bHRH*IQDk~p}cSDLZrk7bKfkXI1+4(>{HqQSm}4xFgpXJW9ypvqLaOgFdGX! zDWfUBjd#qyACUm6DvOKXNDE}fNHZaQ+)l74VTzhk=M^2Clv6B>qbVN+>S4U|{NtyG z6KP3Z$X$4JCKl!Ln;lR9R{8nps~>Y6x0%w~R{|?QdU&p)MG8GIOm4E9&523fO7W%{ zx(+WFHR}VIaq4Fjre+f-RYi|g2Qc!I2Ua_On*mVRfBeqM(ppMVE}7Fq>G8=ut8Sz5 z6%qsjp_56oKk1aL0tsL+bc(Upbq-U>@CSi2uP)G-3t6HVjkE8i=Lw0Nr(V}!Erj4` za)1H{xTykv5faFT~b+dW9ZWhe#6P?SpUY&Sw)=$x1re6OUO>r;5>YN>kjqGtuI%<4%i751VP zF@to0E#_%*HjEM*JR>6)b=f<=>Kv~%dD-5em4jo}4M&t+h1ZS&^?RhZp@`vFW?jJY z1N#CXQ9wsWCrv5`6$7QDxOf2@Dw3%%vh$UE`Nrdn-+Q}Elya1WKA%KYO~rPuni(XN z*<7MWjq~C;qCojJihtb?NKAY@kY*+SP5xJDX-!pC*emQ&S7bWfX`GS+c-`J6EY&f2 zesa=f@r#E@$Jbo~tk5pSxt~CFx22U;5ctzfSy@mhfn%Lcljoww#YZ_;jBC3v)eun} zs2~F~S8+jsRE|=yZcBBBc#E9suO=F5R@Txr8qP+amuXz{k3AIOnYS??jBvmc*^ppP z`uOjF)Gt3Va+c>s200T06@bbTC?KV}K&+oldb+CG#4eWi@25dj&nUenwUmFUfRRkC73cSoCNXU@qgo4cDgKuusA!^WDE{z|~hBdK`DA9GmRnX!vVvb>v?P$^2A zL$4KGdMncu%@mDAGRE&Vwp4ZnB7d_B1wF~GLk^wb zT%sXIuh58R7(hZwSgq&-iF1fRYk9Kg;d_H0K;2|W6AlS4qY}X2Z)_YaG+k=7L%H^* zfrK-c!>}d}nEA5*YdU_jt`mG9AviQ$CtLHMGDD-p!;oX!zM5q2{p4zj(gH@4%6GI( zwo7P7&fM~sU%9~Fit-cM@c&^Txbjs}qN2*KX1fBhfRKZZE>D_}l_*_;f$7b z1ryla*PqMKI>BzZ;LRLsbYXsb;Wga0Gqooj@%1!*nvZA!Z5ow_a(#eZ&-|1L*m43W zv*mB6(77q^?bb}Fa4EE`)~U|Ay%VLh=>$H-tPO1$^3%Jl1VCc>J>M~Epj^kOC)~$k zN1DI-Xr!>r@9|;1#b2V@c9zfTqUaiJs|J$wpNnY7K4bL7G$U-O>j&En@6{XC?{R4Z zvoGjRdzz=J%e0Ckq=m`W1(lp|94oJGN;q1Y_->_@BR>Dv0y^-UCi`=nFSPMGf=OQX zWIgWBXg!ssv}Hf_s?u#{bRd?6kLqHGte(>%RiMHz&W0!plaL;uxQD}Ivl=wM zds3>No$f+h{@KJd1X6Ih{cah6F<{@M;Tq!P%QG^9Zm-{t0MLi;>~ZL(1R)U2DBE`L z8E|2>6VrjDd}ctM5McWkj`P~0fu-_@B6u*z2Cj}x8L>6 zV!2o@l)0|C&htFt|NkAM%lUn9UU$gZ6yoBrSQ6(J^+f` zLQ#SJWsn!2i~_?hKC9Zq)Zf%tUQL`oS?h1YFgqcbyD z*EB4!$37%>YXVCxdSvIe-T5g+A=Bz+LJ4!}9zy{4R&jl{pS`a*6M zI=RJt3PlZ$F*fj@0#Mr7J=u(lg_ynFI5+C6U9oXi#z6U^zE*=6r%rIpc^+D6`%lYQ zIuLSabS}S}^4i>^cu$>3Esjq%D|EE6Wx(a==n5L0yY*k3(-j$9QEn_;TtkC{e|*NX zoUj9vt_n@$8&GCgJ=7piAURzuf91>7v{U%H6|@r&q#=LnD?WE!dfM6V*-uVRrX*!a zOS{`H#yHYTqqt4eSXR!YEqZxRO`AdHSU#|H? zq{P6Fr_80?91GlQ%*tuDNSW_9AFjPri;GJ@XEsvNR-m!2POC&?Zg$3cQnPPxu=LwE zDW*7{@kkfpHRCIx?uxf_OD5uN))6(F*mRlP}TO)Tqer#%Lo>da|+x0LpF&T30<~MDOPe^nH z>dPYUCrvvIUz`}AXBXcN94g6n0X@i-pU7eg_S)$YD#Bcg@t^h`=nE__FEc5neOKfW z5*9x2Xi`YxtVxyCIlLrTNb3npK|fhi#Q8k2Kx|i$btQSEb4po`G>Na2yPO z$Nk8QxaH(R7d}{;m9=DGQ$nTLxWKrp7D&aFwFEagZa^Dp-EGRPsJFLTWu7Ix&&3RZ z4DCar{f1WdgK^2rIJDnnsB&_q2`>ktCTG~KB1Gy3REB}}IY`vDB1Dp(uj_BR#d$LI!V!_pORB}ZvkDJA|a#r^5rSJ%)y^?%k zQpwRItx&q#RXRa2FZf%GUEJ$2_viY+dKl4wUJikkGIOj5j8i4K+P}j;0|U`8YTv2< zT3m$V(n?ez3Divyd$z6#W(Ti!g9eAOKL`Ay-PEv_w^lV71LB<1U0Am0)nP}ULL#S? z?saywBmu_VKQ<_)N`jN~%tD&di{bhCrgXzoB(-SmN`+?R_kG&Z2h|r@^J;PGpT!{k zMGZ;mn6$Su%=SNwIBw;;4DlI{DSHXym_OB0@eDT)d(AnfcWmCPi$TjCgh?wyXA>n- z6V<_&%zMEo68V;iv)G|9nWSZUq_?@{tq-daaI@sp`Yq2%XQ^~!TAjSf5XHNAThyK2 zH?AGYIHUe3DfARqxrWsSVJ#IhRqT#QdvZ^(H%>|9Wxqx0oa{~!Mxc~QB_^#4i6&!` zG2+v!D4mkdW$}z9eYP^bBg@^|rE69623#wYeI_PiRjEZz#lXalsgw5pp+zM819)_Ak+dZCVg71K!b<0J&&J6E}-%+9Ng z)7iy#r|>Ip&<4!2wmiqEk4gHfc^G}i(E8frrr<=So?(033oG!cLqA)}t@>1*ZA@Bo zJ!jC+oYE`N;8&pVWWtji>*D6gP5mN3WQM7CRZU0{RtXkoO6vksb+2e2`L7pA$hs)+ zy+Ny&MlU^=F@rco1htUm*F6_yt|4D2FbC#b3AhBbitBCM!>%0 z6llA#Wk@7{4tADJzc)t%yRK}?e~ez73POb%tRA&{*146Ok|ms6im2C?c?B!mb`VhIBWxPED0EZ3IjB#6P-7uds9%dJ-z-0NO2u#ZYG>!m z4(=zb%HR_nay$S0tyAuAPqi=*`p;hw0*?_r>&nZQK$kgOp5-+L8U&wz(~@!-_-raz z+S~7KAfP~4_g8x^5AdS|u4zl{UF+4~agtGTdmslF6crU`L+{ti{W;Gv4~@4Kw~P{;G0tYg2G# zdgOYV_n-Y<-|IAedJ>d9*xMT>Ofk0iba%d@s^VaeR1$P;0wl zzx)wm8JzkJrsn2(Q&e&$mea$|JR9K5=1=-0x(0k+QnL0+oU{y#j4wp*D!+U4_2Gl_ z*~%%HFKR(Bv6;v2f{%@@2qZqhs@I!S$;h>hw{+ZVQ`U5}F6z*7y&lp1h%V-dBo_pc z(Hs>O^?;OvuhiE-|8%?2p&8X_;~+B_;qu<)dr^s~^ZILmtw}QUEzqni|5^@QVLF%K zmjj{KI>{Y@)RL*%AkAqwe2lQl4_gt>unGPv1!p7^?=+(PDO9tNh*8~>ML0=j{WSZw z1-XH;pSFgb^u#wz2S-Jn$nT5<(^wuqk9vY{H_;J1HJRT90;Qw+*%yY!`1O?&n^YHZ4#W-%S7HB)0+buNJz;8!T@*fVEIlLTYVHx7~tpTe*F0T zelxX#nCl^Sg8amcnbyVf1JKQ?Zf>PN&{+56~&RF1zFT?z3dGi%Twm}DX_M-F4c=tR8p$GYGfByZo zwi&PCpy+ONVjn42a6nMCXrA)$=f@)He&@?;AmwteomHnP9ldV8n46ewji!_|{v|al zI$fsU>NYoN*XXuaSX4q7Ds$`BEoHHDnK{Q6`0nmxv0;^6UoF*J(e0gWfW^ZkWM=m; z{x}l7<@o-+7V2mgt_wu~SxRkQV%N&~#rbvJ`YTP%s~9FcDlwP+AI}FCZTx_yhRUR! zh9aVwXg0V)P|1umkHIt}SLOS!|du9Q^#*p5eLv2s|4=3@O9w z*55v6auQu6cvIzh)S9~HRG->77lzdAHm&saK7DvB2;bY!&`f#zCL`OkS65W7dB%{g z$^YY%MhHp^W!(EI*UO=+gA(hJa19%8NVxDSjYuIEzcvWWA|oO;eNk})ak?~C8u#ws z!Hu5d2rf}^}H`H+(VQh#)m&2?gr zDGm-!zETFP!K_g#%M!UO$i;;~A^e&^b>5fM?`RSdAoXCNcx-g^8$kGq_|(ywdU{UpZ`xM6lnJDOYH7Vc zo!@MGwbm6RL1yNrdkYl4&91m>MGh_dNF)e~lob?Fz|93%M>P&Fu<;;AC4#SCPwU@! z0AHNB`Q*n>#Kiqaxw>f3C=-1rnuyk6p0GU=bOWC9y~Y#X>wmuDe#iLHC+74 z(PT0u04hoL^S8iKR{`2TYN7E{w%;qf-@i4{(bGH6O@1F8ZA=}Snc2EZKnOPqA%AHW zILJnc&z;mMVr@fMko(dWHEJ*HTJKhGbm`yOt+ zf}d{Hx$c}Ex-U+xKYSTM)?yCF13SfR4fA&DjW55g7MayVwtQB)tBlOqhT-Or*Y#z+ zbh}hZaWQs3U2a}Z%SG=F5V=e+JOt&vtJNODKq=br4ku@4-<6UnL%+2bQnH|6$0d1l z+SVIwZDTzrAhkLCSy@td!Yzsj>&4^WudwlM+)i%l<{R+}>)u~4LvN7!P#?V~M7R_r zr!%CPn?IEDb+bZ<>gYGB`Q@9kxET8{vCbQJs3nITT$-omZ^fq_>MWUxKW4@&uS zGjqVvbg^ML)lgSwnGY7BPq|Zqy;o6Vi~OJd-2Mj+Z`T=z*2_H67o15nqE7u&L!CxKWgrsTzVsQ_;l20DfEh6A%w~`xT9MCfZS_Re9TBR;=ZMl3=1i|o)%E#SCs0W~ ze%PbW4#!{KvT1N4_m8=GjGXKBBq57q-yasI1x6ELYNU4LE$HrKR~G`3(7`J~!<_K|!_ZjzVd;Geq*vPtWr;~iT-ptH zBkzFTwihKulDzME0%?uDzUk8$_Zy_IwpH>7@n8n80xw#jhLe-LfkDRUTK6e+3%BEF z0C>=?uQi^;FVwgJ83s@V*Q}G!N`?6Q`)Z}4Gh6ke9QFb}UtWH4Xt|K2H=lxhfWqP_ zwgjoCyF_mw zN}8oj6B84kbxR$6@8~!JyU9%r^ZG3MOfuTfOBD+XtFiLqtY5;PEB)O@0gj5P8x+7U z*j-y3fu+)Hfusf*k*j2xv$MPO3fkkC<1bDwPS~TKe$N#Y`rH?L1|~l z*w!%wdYLO)*awo(21eK5*pZW8-vU935rCgiIY%G|vAcK$wUP7qRqnl@4hR9co_?(7 zTvv{J^PZ#U%=z1jF|rJJyDRC8+@t})cw^~L-F7Xx9Jm%(yxkCEjgktSNe=U8!Lp|3 z^GD%0$qJga*Xt1I_{7A+bjNJjS5`^MDNSGOS+}3>p+yK#r6Tx0G>;fEf-C#b!Ym|o z)_p+V?DK&EP#%<9&s>gb2hee8rkh-k8WGJlh+`yaweoS!th z?e6VT&M~owxXSD?F?+Y0jt^B9IzUPUYvGVbt~9#C zS=czd*Cvt>C#KbVvlnJ&YoNR~%CWLDH?q#l%R6S8Qtj;XL-d1|UbdkT#DQMm4fQjz@FZfV9Fr%s*QieT_=D)f-rTK%Giy;4Q)`4}7XaL&%T5^UtgbrgBxbnqeo0 zhvO85Tl-n`fFzvK+SkVhtdQga?CcDD(?4b&HJ_B}*;k$KxzAXvb}z|TMC711-CenETF z**W)bL)Jeh?k*%{zlwErs8L zZ1#wOms!D56|dKhVPm4KPPDo2B2VER|J%`&qGxa1KJKa>SbRr(RBD4Wx>jov{71R4OxeWOkl8~59?B>hfA};ar z=*6VhOl%w+b2PrcwFbMICAe<8iawhQs)0gp90!o;3-8k} zSpP+pe@<5*_~yy#OR@K1O8KH9B4Ws&RTIV)upgZ{10JvzKZ#f1!}HyKZDcgk90r_z zEch4^-5S*$=Bb+!j;}eB-1Q7N$GF~g3ew+SwdA|1-=*43PouoLy!!bw2pGi~jemS$8mrG(dFbh%bB2>(v5wrPMS>H!ue?G5VG1~fzGE%(wFE^G;F z?UxJc>+7L#G(+E`j)}Uns}XmEB^xK_weL!1S<7jqWAkmC%YUwdKn^bxhSr}**Qey` z18kN^aTE{^r-RU<>D<|2p@GVqkyAi`Q;->ISHY1ARZQc{+PQKJkO~hAYlOa%V<4!} zS!f$j1XVCERT-qsUB##%7mkjjcePxRFRP$Xh}{gEy1KgN$}&RN51iDZpo+2@-3`fp z<$gI|y|-{dLPC;2oX}wPH7}2eGik2K?ZbwlhX5}x8yj!Lo$Yx~e zLZz~>$%YLDV^%jer)NCDG4Ac@xy@vWK?}8CINj0Trt?d}c|NAc(X0hIWP$ohuqrSmqmD=iog521!bgvDsRG5Py5t=&*HCN0Po zt&o!p=;)|i;Nx7_zNyeE2~sI7DB$$_3sJ(VHpx0v&28r8Oo2F$TL4oxq0 zB8$h*PLgiM2BKX1{?napmeH|J+3UN$YQA&(zI*P^_TVTnRR7UxtEmhi%l4>q<*G8^(sxB#ItIUm5^0bge@;XrzwMx1RoR`w$Yc@3_*rsnjz# zcn+S}h05mYe@>ZabOlf$$lmRdN0#U5?u*-J&$p;-tYT~zYyG=c9!i10m+6+{#dtE< zD`7kI>;k$d!KAK}?bS$E9dIJ56iuYvyd3oeF_LTJ#Aaz;#?I83D~$>X8QBL;TK%bn z5>Qb!_+wb~)6&rJI~#uJgm~W4a=xxxuXiy2V91(TqMYj8mV(0ZeKiq}-+wxkjnf|* z{&{oY2o!&3tuBi9HoBQ0Db+jf5?NlGKvP;?1*JzqdgsIu^D~zSk_xSU-nA{*q724N z-BR=w+>8(ie1S968?Y}=`=egXwrE1*LEII`tV}3XM;T=vR#i0yoIaL~qeR5Sh7B&s z^DBu@;EQf5{;cXxMl z(yxEGp>ZamW_QZA74?qB8bIC6{@>dG*vvM%*PNOyDu_u)wiD`oMG;*zla|-D6&2SP zsa*d4b6X*_aXo9{AaqgoMAw&Axu4?=v;lgx5@C=_+He*w5Y@tqo@f>vlHkk(a};+1OjJ#>~d_g z_i1z8aRRX6KOh+lPZO4QxYb~TZaG)3o&W$w!PuCijjzf8ofsUR_SV+hrT;!`#cE2J zLD8bHa2qU*O;@++7Q5h@8rNsP-{q=k>*%;$&j*5f1Sli{3G}#C9KFS$2T7(MDwzXy5pg9c4_5f2+VI?cv>%tInUGixAOV`TwiQI^+qN0MGI0NB% z8SnVzJ?D*qOoi-`>Ww9{T(e+2nu*Ml3`&g)oKFz0gUCZ{h80 zn~8Y1j}DK@0P0|85!7P=D;i)aAcqAno=e{b)$KndWf~E$)%~|pAo=N^^|nn$$+Bz4mu8gs0@{)W(yk)fDaJSMCLV6<9g&q9w}fs{4DE@W%nT1OhGLMn037H+!YiR=fLA`v}9y&Z_mWEJ9sL) zy*tS)QqMq>`5bb6J_Q_BpgmF|;ki8CLzwT<2UX;SwO-u$~D&;~iZ zB*YLvN}V?DLvt}PLn9+2XPGxNb!9=Eg$p-3PoUJ&dm%CEsX^FadwcuP;?&|I!XPvvs%ZO1 z@eHq_)*&VB>K1b*F~%9_@jd1g;N)j4sjI64hirX4I5z@&OjA-0L1c4v=?msmlpx2Q zvn`shlWG9oA}4*LQBxln2l*K}xFlPFV(YIu3OL{Kl%qdA>M|*TqS1WIB?MCDq%}*;)UlPv;Ia za74GYBqE}}$nn6+%4*Gw7}T-?@FfR&>y}=!w{jG0`GL`(QxvlA*-vrJoF7I{l^WLtFTs zlXHAof5c0dpZev?SN=V-SL1_#%n=y)B92M4 z2&SN-k`mw3nu$5$t8bOV3SKYzNXW8u^-$$~;!os>DDdudF%JcHvhW5mJ zdRV~nCI}6{2d3F6kQ7O=!BUip+ZBEbGA`#}xPI@OxW}OW|0gf)J<~0)r%SwuC*dN7Cb_9UxXh$93jbva?u$_J{XQTpbcJV|Uh=v+7S1k?ABG|uN22icBuq*2;=g4xT3b0{u* zFZ)9j5K5|Jc`9nys>&s*%t~#1Nft)!FWhl?FutHv+M1AKX=S?KfA8wHC+-#hnC^e0 zDK+P2Jzf~Mdi?2q=D3Zt@y#_XMG_Pey*yXMf{bdDg5C8=$D$_3HQC9IQUt=sHQQ0? z#!uyp`pp$G-|EfzVIcmR%=3Xw)3~|0xvH`f00yL`K8-x~RZ((tv$EQnTJ#Zq_ZzJ` z=;12XR$zR4i+z@f|GH%!w#y>|SM#fx)vNH%mMq=F>4_&2Q^wbunpSEl=3&vgo9l#& znTin^53g;_?5x78S>`P&BLlP5IE53+UzL95*>oqYGh6=TOL|SYEFRV;up?e%=m-yy z=d8g>`N_=_H>s(SpP|l@vu+^GkCBgB0m z7PL>3$lPR~*wZ)OgX$A?*$W^fHp%l@;32S{kLg35bdUE|4fPT6t^ZrUsvI`#kS$YpGTE=54 zwwJN1)E-*pf_X}vE!vC-L^ie(D>x^Nnm<5r!-)WBidqhB!M^WCL*o2e&=UQ_D=8_T zPdX*kPZ_?K*4iUW#pQEjUuM_b*hm*lKdj+h->ok!WR=LI0jQ?7pXXxK%82WN|Cu@D zrT#*x6?L?HZecj=%LeRh@I^#RvzQ=~>D)m~tw5&IW@-Z}t;NuLeaQ2IRF zF|G<2+9D_(Jx%V1!R%*`txP(j?A>3mvK6`QO(%PBQ~+}q2&y3EiYw=K&d`(5P~GdylMv7kVGp9YL*$kAf-s`GaKaObYAbr z!o=Y*CUh_7Y$!50Yap8z@;&-nVC%S%t79>aZ3fwRX7k`xGv1+)PmK;^0>6y}(AV-? z&YH8D*e1cXr)qr+V=hs<@2*FUT(g%jSYu6^4!o0PEO7vl7969H7`K} zS5^jcf-updxgEY$0Nu$dRE;!|{Zdl=U9Vv^H_6b`@iO4Riw1HO2?YjT&8Ak$Mt8v= z_mZ)x)9%$+K!JO6{lLG0W=mn|YS2+}QYDqpfl#FC#N{uoQiEuCfF8jQdWQd*-2_j( zJV{yvgBtCoAmw$bCvP~l6msHneuY+RN5|<}R6#=yp}5EP*^i>R&7qkVhtZ za5yf^o`Bd5{+Ni7F&9y$)wA~AVlaiJk{}@=kxInn%zA#pK@(Dh@`wFk%z5)9C3f~F z(t4w^vXZz@)2TiU;9IoA*dH7|_?C@iX3#?|-uNz@21MdJajwq;gDaDAGN0?k_-x9o zYQ7Q#nBtqE#-7vVS?LSE7Fsy(YfN{A{2T8Cfoe{{M_&UhyXZ4uE3q^eXE8OJiw3l> zs$@!mIJVWf;qTS)^L=esuktrKQXM&_UlW3n9MWrh&J+L-6onhIYqz6h;^={URivV% zdlB`mq7E=A&N}b46y2n|&*&|Pl4H;wu8W9(Q9D`AXKEr<1q{I!hzYfnc0NJ^DRL}_ z5MIuipaJtQ#?wi`LqCS&Q@xL}@iZ{+egZ90E8JU>em7&lZ6AS4e?Q20v(_lag*M4C>asmvBSu;UL^=_1{{XKOc=BW*!i_ z2#x*Ya;9u8%`|y`uVVvRkbqS?3^RTvF)mb&+@sUp>oO+%^~>4 zKsvLzN#@OXY7m<9>C;bs8_2lJb1YnH0?MbWaKXieS~FBEDMF9E*>%Sa0JR(Y7BQpA zUS4~14*()t1@K#_(?D``Cw`+rq(W4383M2m59+6seLzvzt^zO{=;wbul^*30v4RVzw?wFJ8`ro7@Mi9KcBFfitsRt=JP0puqHh^X^{k;28guk`_MyjDUJZ>Gb3t(;kX+_+xn*?GQ5+BKZr(Ooc_OXPH?|lG;}W%q&+Wb zqiTxY{UQVstt{?lCf-Sz)nCiHH5}}N4N@VqyR8pq^(fp+BY`6&=Z#h5i$}}HM?as_ z{{_FCDDSre)VHxOOw%>gBzL4$U;M+ksusVSCgRx}a(*iFWG_?%N34>}@B6l!k&(3E z^(ksiLDNN4PQS^6MNBMBsjJ=^8H~)C4+mfoYpRZpS(}UU4%UQ2bGeJ;U(J7LWC4;2 zxo=Pg9%TcOq#i+*7f(hkU9yH`Jr#Tv-HLM&WFP`)tG(q#7e0RAg7&z1#nZ0TzEnfQ z-H$UM;O^Aqq`A3yjqY^S3EV0GUs*Z2GpMt(^W}g7fIMXlFPF7m13cnrqS4c2uazf{ zEWogxAfmXwwZD|c!Ot6$l6XrsQA1 z%3GN2pnVal%lJq&SYp7m+%;%%9qicHB2{uKt)lCK4^)d-q%&DbNw0m6=Q7>OLPV-s zTIA-vC!3#c-5ttmt=D`K?$A+NPJAA@ChRd`TEi;24&AlhUT|EI~)3b&B>!-CrNp$Uf{KrXxgJ-5q4QzzFka zuT|>l>*Hc&C!bg^mEKI;&+y_+Y+oLrInwz$g+cM)6;Q=uEV5JuarJQZg$;?#{jo0< zd{!gg7WWzGw|e932^@X^uK6LOk1orQ;F$yH)_~bJosl#};=1qOyZk=zsL(V;C_$^)L_s*nL085nQ{d%yIjU~EdE5AyIJ1+In_Yv{!E zaGhCSZQXPoh<0(q}fG$iG(%+59|V2p$PpQ+Na!nR$d9P9sG9z85kYnZ}UICB5B7{ z7dQH!IZXDVo3MR@u{SfrdZJA45@DONQ5skG>+a^R8enZn=0yj(2j;vjQ5gY8@@+l= zWtOF7b}q4AM?=(}C1S`WV!S)#KH`_5`X?*+}?Qy5E}9Zq*P6 z)BOeeyc<5k{{?&GC%Ez@CB?0rY|*c=;c@G`y(Zx(r6}zC{`j%UF$Y77sTB!Xx_o?7 z>qNPO)GcaI&4;uBL&W#JB16DqVo%4?Du$BzSqfaB7ozCZ&M-Ec8DD$)xV9*4NR*O^ z+BlIQ-;@%rwkelWi9=zioWxfZmIAd|X+!rK6(x+3Rw{p~iN-f&v2wjDxL>@3zJ#Qm z40QtD(CFZ7(4Z-1u^G-pfK=8~t(4~m>Q0YNOJ8ZuNk5~}6{U}2r=l$M+D1;igM9n| z*;>x+PUgh`x=%gw&^=jkLKxsiVbVrvz*n)-i^WS9TUZkmw6NU_u@Gt^{`hnb1yjvp zUoA_sQ}^*_Zqcti@7i!;FzlDeM z_%BkEZnWH6X$+%^WA9#&mV=j4vPgYJ#X)M>jl&7FXsXn3tRolxRaf=j=%#>xAWLR} zEBE$vPJwW6{uVs6DV>+#*24|PSD`PhsJ7)_+_vTcnc3Pqhut-6a?mRLbb243c%PlI z(mb>{aKVc^q<1#fKk53>CIQhX#J160WmmX0Mk=eHyhY|n?JJXE+)pKBTI1#H!^A+UMe(oc>?+%jQio*|>v6z5 zLkrN}$^HCx>vckLpp?ZsgjWbQw&H81Du3w@554G!+`ZF-X#T`;C-|%hBL4-^gdLg# zWV_g4_}@2cgn@a2JQ$4h)(Xf;$F%f-TKEc1wDeJLQP$5cV+It0t9EOJeTlj7hKsX)^W~&c5p4{(pXC>pa)E z!9iIyn|7C%G_xU9j!6n#W+k}$$FQl1iEo+o2gf70?GwKzuH=5Eo|{H)tKAP#*?6GJ ztSPn=meW*5nuby@h~M5pakWl*`VzI%k~>BG5#{xw(HIuCdukR$2_f zUZ#gvoS&cbSPTWe3b4ti6!p67VB{htCEf93rKgV@Fo&kHjpzv~DQ<1AjN1MelGA8y zPJR84sVQbJ2N&m~1==D&pt_ztj;?D~*jcCv_y?n;am2yS4p_(bzq((Yj5@mW57M{& z?4O;TUozfaBC(3wJKnxHw3*pJ7ee2Lkv~p=?l$kE2iu8o?6R=FxY_t7#S-08SMXv~ z$l_NxQ*q;rdIn}DzRIGWG)93#VHS{E3zQ`lOANvKSS_7Y<&>6Y20BRHyU7dTKR0*> zWK!JM6l+|ev8Fi7mn*FsR9EUpTsPDA|4fRs`;-seZj{N=LRYdct<(2dH%yRXI(`&t zd5QmsECncg#KbOYyfL`#Thehl@N$*fquKGq`x_eSMMOn2fx2L{qi-{6+<7Uz=N+CC zPE?~~VP&lh)KufSlbezC2MZZsL=3Nxq2v@4TTwEi|CGzJrc?wt`dy1Y{uXpv2WIP`itRIZE<`mAw!Qr)Wr&!H|=G zO@zgPNoy;h;7|d?tRYJeA=d|*pxOZ>v}R@#pz#VASZ<^wHR)S`<$U)4g+X%A`i|nH z{lksYKI#I}oq*3fR#xWb?m+}(l)uY3PL~xFG@77?3+}K~0lUK7T<+Gd?`~S|L`C5q ztq;M)y#qh)7<;T7Gsa1i!o^TKhOcfmHmJ>6%GC?wb0^NH96G>+lwg$N1c_>4JUZtU zM`;-u4Ii|QRk?Mfc>V#a(++vjMzmi-pFMwK*l>T!P)%LDTYs6Mmhs>{Qe8x?-1p!R zleM1}PF6^c`8#sqbCKP29W-Mk!zyF=C2~DAJzB6i%=h^5<*nk~QBni)Ro=}LK?563|mw*bvIU5h1_A3yb_SLFg!q##T!@&mfi6t1PY(m@~?}Fih!m4m}XZH(k2E5 z<`C;MeXa_hgmHD8ykxq>}Qyv>6jh& zpXMmFCk_9g4_^u{9PHF;Z!gwj_q(291U(@{nchc)IbNXq;o^H)xq4EERsABnVlcTR z(MMDgnPs4K9RS$+v_DZRk6<`*cd4RgLu3e^C1s4<{~)8nB|HZ7W)jJ=Ufj4;VwW2< zscwjc8T(9CkI!LP;RfC(CkZqyGz0_@lW3c3fAO52adZ9=6FplzJxxViwjAIsusGFU z{yHE8J=o7LwsF28;o#;|j=DQ=-v2|q0Y3}0dT)V&qZ)8WfeSn+Fc_$VHR3hFA<)#& za8UwXlv(*4e1SFz#te^+Ii!iWUPKBSPKmAI{4>`A4GnJuAk4s?ac2a*)@}x<{D(GD z&(AJ=gdB3yo2ke*XjPsNE&5zRZr{2|?of^<7bn2E{clv$CXSD3+tE;(RbTu4(9KLyPF-JH z+SyK3p;?Xd?UOauny{C-02zs!+GUoC^_nBlTth&LSyaJ-DLv5bUql}8j@wMWRX04Q z=i+I6Qc|0>ZD2+uUoXXy)Sc3CLL5je$WK^6RG6L6m>;+JZ0eanoPQ!Q^SfBHN# zN26%e!BC+)f`yRuktP@z)XO#0!!;zFS_k=T^XALYUEWe*auT%PMnD_czy*i~fl^yv zee`VSPfO;yqG&|_0BxwzK77O%yY=TbL}#UT2o5ss0aN`>;w+K8r_)|SwXUZ0{U|B{ zOR>lxDgrdG?B#FEU#fU`ErwR#U%X?MPmt%g>11DENrl>z=wOMSb%SjIz+r%u$dU6d z+B>1U%75N!n+X842|Kz@MK;+`#Tz}W+4MCX~BMO7+e&i;lENVW-V*_#BwsP!G&;zd}NUoj1z076eFi9UTvV z>z?^_$}lsboEod^e@5LPue!mU8nApo-zC}?5tFS_6qTPQ>Am7)rFbtv-nG`{%cdY* zKku8e=`1}Y_CPpq8n7Bb_dvYyl!1vUtf&Jh9%ec`LEQqN1Q2yore%S@F~v)YYn|W~iZi)T zLC*8cd&=+2j5RCEs9uXD7^+LZpsH#s8vLJT zM70n~uRBkR!HF_L9}WHL?`c@XM?4%Mz2-#hYt$9aRzjluBrV2OUa`6J389hMU(KF$ zMN$8e zCb6p{4=!_hEgwJ4_QS8~bTJC8qIGRAR=L?cMx)r!r>IdoJNFPJ&w z%t@5)ufSMIDO70Kj@R?&^uf`^7p4=y z>X^b})c%t$=vA>+i*aA8E|?j1SMl!G8n}v!n>H-z2<8fPn|vL*kBjSar0>4VrK~%} zqK{Qx%|-C#XEjglZ3yTj}t3;*8)DuZZqRy zovOxZ#&qi6g6LaxcZwHkYdy}^4APtD?GNG?E(3k zm0|XXq_n7%a7QHT6E&w zCdFOyfF1Xxr_yZ#IWHiYMHaQ(&Op?;wN!1R5k=L`)Nv{ zwR`Iqd=Co|)CHk8KbQ`3XKisEu6XJ=GRWKNn)afy%7vOn3~8XvfI~>)K3kr@H;1R4 z6^{@`zA{k|*P%wkuCPF60Hw+-du&h=bm}a01!&u{HHt=L&{hX0$hUgNA5{4>TUW`F z(_aJ!93M%>dj!q5GLJkGiIf;*Zej~}q&sQCjN|_Xu`F8L95t5;`OwZB@WuJu`kQ@h z?Pl63kza)a{O;nCvvfY0_~Ixg`McI4P z!34^4+FHt(WF_fRwDBGE>jLsAQTZ8yP1AQm<`#%yI1x9U1IraSKml?$r`<6o1^LT1 zp3Rhsl!A&3pg8t)QzHXc9#7F)LHCYw?GpK@TN|8DLr90O(S|F46$5D3lB>*Xefzq) zHa3^CL5?E^Tj)~+qs0CT<%kE;<^xQg$QdB)0vc5YW=26MpQw;fe@I%}1{A`h$WHRg zE<6lS-%CnBl>n>~5>6eUc8fH~zP!BTxUrQ}PyoPsz@~VRsnu;>1(K%A#gZE^Wh)l7 za5C%$K+u!ms2dv_GgC?S_m^HR?si=1dT7$MnjvCU_|e2kIfGjpg;WjKS>SbDJ$ndW zBqk;vlDOKnDQgw5I%H@jH}F2^*A<<^)AS9wYHB13Q($zm`cR}i2mLgE2_D2~Jz9(5 zN5~70@Yha4=l#bY92-c}BI}@g??;ojKCWz*(3G`(k`M+sKMl&sGfGz9k@RsVx(dg+>dqaqT1go>~Xr!x?7Q+};pNJwR}^o2v~t3%F?T zijgJ4C))>zq`T|4?;zO6Qm;EJ1;GJbeei}ZM#e5$UO};_uu!Ey&1R|$zA;k9{E3;4 zxtjR*Q_CiYfP!24wxtV7sjGsqqYdaE)HoRf^!l5~AMwF{7>iT=Ycp*0y6buQQG$KO)hK9Y*D#7H7K2te&P-bO2*Jdhuo&jBe`2pn|6}sJ5JKKd|$& z20OCIlVX+vmeW0N1dNT)0@y27R-8H4(_mX~-o70H1?pR%k9_4- zynCAQX9O)?#MdgJ(Y)EpVlA*mNMVfqikG(-Pgo$D_5y#JrUJP+e@C*}Ohswin6{Yn zZl)d3z>Qc|UyYy^y*4;-7kdAEwR%EXrc zw`tJG_3grNW*;+teBZocJenZQZUOL`A18iUn$W{1LZa;Uo)?M(g78W| zN6+gCA_Pi0o)aj>_@uwTKnf+ZJH}H~Oi_-<-FwFyK2+H{Jj=ij5@1UUc?a?o6cnta zrXZ(eG4EHUIO_I)1g*?&VEVP}u~r<8$6j)0myIqp(*oQ>f6W8DNYQNNCS%H@-rp(a zt-Ft^t_%Wt;^JcIszdA){vz9wYh7h_#|YI!H1)bPcSqEJrx=V!C`)J$`(I0u%O6p= zLcRCFILsW?yGqr-*$}YK4mIg&ISG>C;J9qYf7G^so#ctpxd<8l|9ZuRs7JU@9Mj%A|23MI^ z;yFu${nFdUTg|Mn&r!*xhX<(cae<~|jPMQDF%d7=8;D~Es)S13ZS4-N?{?4iJ!H;# zyT&~BA{>z@BO7S?VTHC8Sma$hT}B>G4G&QAgS_^Hg|jN*rEL1=;)Q2YHd?>N%yLvp zEaDWje(J;}SRB}2{(zpEWGJ_MOs_-*rg7UY&)PTWk+Ha5!Td$va^loyF$H-)+)fta2(2h-Q# zPDRQKzNRa4OT4|}`V!@5=+n>OA z$8Sjn;)!ep_bneZ!F31sqggh56e>EpeXT*RgPq@CyNQVSdfDq%X1{jvFuOdI|Jx1h z{@_s)^WkxS(OC^(IN*-opvuRfAsin5#-_%kzEH5Ot!Q>#)IMxjX!2}vb}fWLiRrDW zW$gjS=eeLD?=sO&4#~`~K-fK>_f1q^pBm-SV56f^b!k(uXyH%+^^EiNH_1#wnihXF z1v7<3gP`TDTGu?VC|*Y|Dx^L}h$_){5w@?G1WFrmfV8 z$bKlOYqXYMHbj3H{ML=WeZj47T;q5(L`lWTt23rBs}aA#V(D(!#uXHo2{~Grsu-ZB zNJtUHZYAOVozWGFO{WE1n|~xjl9OX^w}^ag?mhRCsx1#a|M2q?M)JJ_;(gM9#-^dY z(UFj!tW*ZW&x^J)M9{d}r2T|Ai)Am*t_c$QOPZ?g%4ZKh<=BQ7%w%;Ryn1z{_BzZL z-Br&mSQz7NN*+`uJ%((*Sd=+4s?5a5*qiUKjv1-rGmd#@{8I#yM6Dn+7VD>at!pP} zf)zT6B+*87sJI0)t+@P;gJ)*nbXs5f7`aJiW2|M2nsJjo0;9)Lt!7f}(A=}jpPT|G zklLjkG?(MzYlQUkDkmNu9-(A(rgD6Dm&e8PIa_LI3+F>2;SRc>e#sMj?)mBAj_D=C zJUIyCa1@OZtx5PEcU@L~Mos3)#=4jo-RwA(x9wwsR?x;kUnEHYzraIiarVU`v*m;2 z^6hy2fo-w7$vy`jLz@ai+dE?-!JGcDCiYZE1oB-0@BIFA4}$W?N@k3}5@lNEVXZ=y zUhZUw5KfY8Rn?i*pnxg22)HK-4x`R4EYSw(KO`11r_jVPn>EdZmee``v@0Eh{2x zoT))JmB<~`dUWjY_MUeu1crVmjH|MUN8oDv@9o?4s8wiAeJJ2U9goh%B?y~GhI@-0 zE-QopY@^tA;@DoIiaq-5E#sq@{x0F8^c+>=zd8{k0u&GHg{sln*+gMdP@McO<@0!O zxdY<^*+R|ghHq$kdSkP?3nkbzTLqpc|QMKUy>pS#$P3HCbRQ0%lfd<0AtYqZ4m>kM?sF7U;pVps`r=`zla$I`twpQM|tyy|a4NX`C zD-Gr__X%Ub3|=Ur)%Ltu5>wV246S?C!9yO?`se0{Ll}jouybZorD$mNi#Or@Ul?2<^(@vm_1xRW5Wvr|xi1 z5g{BrgYp?buGV8##8yHKKuZ|7xJdrl(=vnOea4__?PEShU(YpcVku?mqw}D7)QE*e zV4N~DKOdT7?CgKld39cOUcCprW0b%cq#O>OPw(rh(qp23+a-f_TMF@VdcdaLjFQNh zh@oB8=_^oQv#yC%RSiIEP)apszprmuX1yyvx+c0+`hJP(71*(|1mIsJQ1a*t;((%` zS^-g5v6 zHt$=hsj43^YU3GC$uYCZx0*#vbU~gqiW&)?S7W-hb~x&vw?|4IN9fh7L<>vIsSC~M z@+@cwbKo;HG8M?;1St~EVNmqy%^7#@=kV^rWjjm?NIzFNI;3-gbH%#5!ma|`Dr>>M z4}izQh3>#zTC=PBe)_u7Gv>6yAr)&xOMF= z4}6Nu^Ua%mnoZ{6DL1}qlHBgDAFY)5N%%3FZTDcktdt$ONTn#lUs<~K5Sr!4ef)?3 zVvM$rJ)aKmmUSP$&i*)^N;*hDX{x(JuYrF?lBGl|z~VRlyUIdLQH8C5NvtWq^DyvZ8Y4;}jMJcVQap+0OLQX$VgBM*0WNH9XADc^Xqxi~oqBPc5=0h$;Of2py~z`%g#dAXPvo?))g zF$jIS^&&jQoAUbT1|_j7+JnaEnG3~~t-u(#>fq^qDin07zh&1G)$Q~G%K$nX0I zx$I$*egzXD3s_iK1nqR{R0{y$3tGi;2!==lN^o>I{@RLw`47@>+jG1w02pAMF(5y~ zP{IKI6V5o}uv`+VKnL~lePRoUBHth$;pPl>RTQ~|m|diFkqsXdDz=1rLc5w4ms8#~ z9%QBYC(H_Tuv*({atOR+MD^S232r%xmReT7P1*V85EML5j27nP~_u$I^G#;xcxOXBItW4)ahL{L>CD^)?~AL zIIhn~;QlyP9dQ{5QJ>wSbawUN(mc@*>jv>7C%N15lDEV4kR_FR>Hc(@FP7`~9<={I zB!ng$^j)_=XKIc_TFV$L`dgOpquG9!(z}a^&V!R+*D&~?S4SN~E3(K(pUGY!*(epK zeYdEamfnn(Dndt6ZpZQhW^nioqy^k~+~|+kZ$ZMSzK6 zEF<6JSfv~v)Z&8XrC6|y#AP5NPXEAwz}7%`zA)^Npqw!NEBo7(a)RQGi4d5Nb} zlT|tB@v_!L^PtYl$lh)tq!E#X;UVDU1F-{wz>3KG zZgf<14Y&^PX5;91z;;YyRmn9qHEU~X2>Be8vX%J2^)vidj*#9RB%1#DXKb85TL17c zJDYTlLjJ>(2gA^ELxfZiM~4d)HkGK_lSw4$s)nKU7t(Itk6R`7lsVl|#Xye<*hNdP zz$4U@M?LuAbLM1LyjIiFXPCpOA@+K5lUSshRsSk1Ai$LlJ_z+lXlNw@`KZoYpxN51 z+-i-?#MU&25O2$IXK=!hB>L6ye5$)SA_Nw?SZZJuWWpK0DwH60Llv%eSQ?uau7(%{ z4B{JBWuE#W$jtv1aQ=p(d*i5wx{Vnl@@1aR)DLHQEMC?)`Wtz*=wj}V>JU6jP03J; zN@gE_HX1|YLGR(GjGU*o&#qM!ZQQ*x{(hyrgklY6lTMlg$p$EE16mh1WO!+bpP$)h za##78rVv74JHfom`uh60)8%oITX}go9*1pJ855Bt(0H5d>1u|Ce*f?;92$HZ53S8p zDsWx)xjZ`XO?K8J$9(N-vz!Ql(C@@By$$>I=T~?#2g-igD`>-9L(Ah06zndu6+o8E zdagnucO1FFtVzdL@6m6ZO0KrLdUVI>5$=)x(rRaW+wHR0wbo|2zjBo%0D$?HeeWUm z%tB-I_AV|?KrLq0o!31RSI@eCT=6EqM`Z-c5X={^tR`5Nni#-v&5U8TZFVbT-8 znDLJX`_o3(fhPG;dE4fD%DX5k<)*yrY67B>vv4o3w#5!is6zDF?1}CJ^!fuTUIO}- z`e)tKp&bEqK{-Yb;u&1U9(}IrX#xK8#SzrUgn_amo6=lGGk^A9h3M>eZZ?{sb&K=E z_kDqv?>rCT-Z=KxIoK%WhEby&A*9nLl&0{zHvr}`O&lQZfd_fUy?h`xAz8#NY_v{j zrrhv00vw)=1V`EkDl+1t*X8D?V22J2Ltg;a^qb@*ARzF)xuB{HiN)b1ad_(Q3iHuf zP-32do;@=deEP8QJ;ktoYgdF`n73_U%2~Tp!ItOJj|e~KZRf!}U85mTLo8pgg21=e z1+YY#iw%^BPV8T+bDG{CLyUZzj%4ZuxxHj9tQUk*Bm6oT_u{Ey%#^xW_xO{(!YhBk zB4|FJ{w4D?k!^r#x{+hzW%lV|tg7o|)emB6ZFUTm&V{`-%%@`f?+qc$`Bg^+ zF+|pLeQY_r-GcM<30!UGgZJ&V*TZMzWl+cexc(fDc-sn3+Oe5I--*T%sp*q$FW33_ z3aSGw1F|&L2iFPYTXQX|x2^eDQ`7Ys@J+2R|E!Hf^tljfs#nVWG)JU4_s z5}l$_FHrRdC$tg2ltsb-l~a*qjoC3b*)};AazGp<|W7-L|?I#>e|Q zb=uGFY77K3ex#Hqy)Q>uADy4}uD-&@zk~ZiojGd8?{?(>^*)g}Ygq2J5Fo^jG?{c}8<# zGxT)artu?oKGJqZbi?^Vb5aH)uIP{8I#1N9?gm*;nEVdW-OEL~`6UWBZQuE=fL=wWt+!%AZcfpc5vik)ZEX8*z9a;xoYHTRs{jF=S+<=KI zKF}n$hg+vYhkb45P~{C~`k6;8l$<;W+7}RPI$79gp510pG?8-dk3t|KHwTNRgdUfc zGhlf9^ZIM#)vbiQ)l9*-FLzs9T&aKOIBxL)+$HeljUTdFuKRc?{xp^8VK&HZSew&y zvD4*HaaPGFNyMS{(iPiHW{BTZt&Az299=>!D6ym2Og#-V!riH7TI8$Ne}z5ZdVT3$ z>K-~>>KYkA)Zr$PAo3qhg()U_`f#87=sV}=loE{;?H8rs$T(q36prikas^0p!^9GGAo@WGKNTIYW%RGaXhQQ6h- zIrN3l@;vZgxCdt$53+`!YWpQ``*l|gC_u9ax)a(C(N*HVQqRw8#aD+GUw-Ryv%=F% z9sky)WI+KF87a~!gi9Lqi6&0?&)xX~@>4j3^CmJ2Ej4srFFfaI=`?uo9(Yg8W!-Py z@>}-(k-2(r=n!~*dcoRj^r!OYpKQg(LY!e8F-rL$>W}16@xoS}W2Ui^#jVX+rhZiT zTL=~62o8S@`2N1O$zFxY)kHhXR|@`S#)}00Y$W8d5DXso!$p)S8-(#*8v-@|r+(|4 zqv~8=Os?Kq8*Vd7WGl5c!^3@eq1z)!YXac``FE#m=0L(`F8=aX7G4OyB8_(W{~RoZ0rJ)^0)h8%~Gz zE2_XJF_?oSXF2is6+Vw#aO=lfu8wI{mPr!xE;W|l-YALUeufXjC%>V(kw2xr>eT*+ z*Yp<}kQoFkIOBRb-zDSU%)_Cl|7Ct3u6CAGVtz8lk;1vY!G2n)K?l-TTLoE5{!;!T z9Jxo|fa6*V_GH{k-Rf8YBigGFCJ-kb`bq=iO_kB0V-$5(%#v(CF<#Lu|B2loSue#W z?0?m9aB$w_exqOu-os!!Zx2gOuMo$^yuoh~?pOwcVq?@*pm`S}{zy)f$UZERyFox; zk#1t;E_)S%Cyqj*q>?uMzKFfK$^h}}BALngrJ71wu>I%8`cI*Cbe5c_qXdakf>9#> zL1z)?ML+MBq&ZqjyOBz+v;WDGp6VF#JZ1-zGMs=YiI>A}^kR3gsA7U4A0fQDB@1-n zExr_C}|>s2=EOfxK7+5;wWbk-bP)?g$6`rn=|3o1RBB@&cz#wjVp;1 zI1R=IMlqW9wU=q5y2d`nL1l%;od&Y(gqCjzQ%9*;zfe}`U>HUJ=L`P1OKS{gspK{_4_i(MZ8J8A)t4|QA#P6^$ipx&VFAi|7zDn7_CJ!i08eKN@_S^_u z=pe&Jyo^}ne0;TSi1*9)RP6sQliA!0rUJ{t_qOlfmj4Vb=BT+|xzlRrBY3KK7P0M3S@KCbyw4?*+9>ba2+NLXeqA~MbvBysXa0|24Ew!KP z*v&k9?)*Q;u6NFWozkix-Rj)QFNYD`s0ZVI**sr9FkkgE*jdq1X()r63NMm+W*1vt z+uk~2N6*flWWaO^Cs%=XqhBXPNoiU|MTqT0tP%3{7uSC$(!Xw)U%F=+ zrN$QIL$kB6=1r>Pdi2$s}ExJ6QN`e07%z z+%##9y0!4B{>(vVZmyuitD1A|qd>y=?+dJIVPY~;)ZH&ZJw+QaLwrqu;sAHZraoN(+!2yF1?>CAy2^mz zA`>q#l)8x#O#frYiQrU552TVI1JL&mcqKr&$GWlGm|ai7 zcfa~Qu8-~A!SF2bWK6fW1jQsTZIy% z_bth`fk|i7D5tOts^C;pTiSCq7GsXJiRF0fUFj{Uo~AH0H~Zytn?REm7kEx?8v??u zrTO4;BG0Qrb7dhVg~j7$6YT6?>zPLjr;9Q^>k+r(VPL!gTSpu^@g!>6g|V>;%dJCr z#F*|_@%Fk&ZS5G~-UMK%-?)W?ED(UYa5|Ns_T=RAq86y4jggp>{0V`nl#D(Jy^41M zWh~f*QG80FIdwd(u@hoE6r!7@8v`Gx?^c_iNl9_^E2~zq)me+lvWcR@;t&420A&@c zCkJVfa7-^i!#yB{_Kz@bV}$k@eb0B*1r>Xb;?M zub~25l6B+C>?{{i^CNn^*v3FSqjrtg4#Y)IZ`LAC^1j>h@o;ZXv(YFzGt(bFJP1vJ zT6t}Deq8I*#Riz?8}a>xcsDJob~ENId4|24_#c>gXmKV={o5n7ahfZC{WiTOXQG>l z(UI5L==$E>E+ZjVX@*}d>VAHz>m>!{YCdDcf$%5RJC5Jw4q;Ew74UiOT)s?bX7{eq z&XsqK(~YbCVHxKw{4R6@zt;tI&sE}c^0H!V?#>c-^u zn$2MH=(qO~Pt1jw$NPu`PmJEvy}vv5kF>Hrh-c6?ur^$zO2EO218%!=Kq=w9wp2D@~id z3I4LKnkNgyrcdHBn4Jri^`0(HJ!|Iyar#d+a#CDFi9kvIO<|1K?ryc1k@B>OgQh4? z@l0j1SfqqVMTB>*i)op-nlhfrVEGCvl2I|2^xFPzcQg~C@d!3Wwl@l;(}mSE>SuNkd+={-}3iUOUfcEp0im5H=4B{Vjd`=jeQ1C|{@oJCjt z{=G$b|M^Y2%!1wVs%@n=#9ulyNT_NXU&Owk^EF+Lwwa$0DY%Zv-a z^}zHRh(~Vb@Jiq95m5}wg@uP-|G7t%OW{mRX~c(Nz`bC2demk!b)z9%E&n9+#l3iD z8FwuyAGcogmp!8t&b4;vKuph2&N+L+<%Bj-RaxHqxSeg&LDEldM5u3Jcu3MGY6e8h z)`L=Qoa?-C(v>SSGE3d;RjP3um_KDDkaW3*-C;22N9WVZ^cy-=OC{4vn07B0n}lXS z$4am8>tOSZ6*MLRuM0!k7jc16I{AmJ^{D1tQ4dmOo=REEZ0~9W z3#0nxEsHx6K6BMPr-7Ipv?G3FU9#GC=9VzV7BKP)tY(ew*PHRQ_s)P8Is$Vj_ej}d zBWNH-K2p3?RE4IQM=}Wm)C#Zqg^y%xdQDDErDCJU0ZpZfRk}V&8_6dwb*{2dxVJhw z8zN8r-mYVZyx0qbO;kf%R2Uq+Hj`!3>{kfnGJf9g<Zjc5eCWhp}xL2*!i)Wilbr1bg{1G)ZLKU ztCr^G2T&dP@S)nGx_`FIc(LI_&Q3u|Ny*Ej$LTdW-Uho-L6A~?TnL@=df0pP8LB!4 z4n~zF#;&fej`a(@dEcPyHg9KcQM*A3UZuFs-T3VHvqu*1z2DK#x*rUti!_3nE^&DL zcwUA#h-FP_^SENI(ns9 z|0v*!dMR&x!F2oEiNj!Fo^^k<^l==1EIh$uTnw>!c!ga{tP+*Q%&JKzSE&z~NZ$NM zH#76M0KAbx9_TzQ`+j{56C1C}%EOxd@4ge=M(1Yr8~M3hkYMgE8LPX};gG2+W=*x| zlDFf?Tglbq?t60U_t*(KHK96E26lfsnQ_;>y%9)WyGJb@KHP8O0SY{lz!*U2EZ%kmxk26g~(;I?9>NsIh309Vk0vOD<8qkf5o29$hHkau9fo0)iv^g?%qT z2Z`^w8Y1|B5Pq`HV^GjVyg+4hT2u9Fx1y{NcX3}c8V#(p-N{!YW_wnx3M?Nk%W4gO zPlP3J(j9AHH*Fku36x_VMpY3uUrh&filMi+jXaTd1YA@E5;3Y=Kii@3L>>+khTUbu zy+Xzo(3CY*qHWk!YmsNm>9lR<;xgBV+h{*Tq`T#V}DZ7;x z6!-w<5*aW!pSRpkmApr3$Go|x1j7VZ73Sw6XJb=`zbFYzpRMF zR(*YuF3CN^h%)HX=kh1kG##Hw`V+{BD|UADcKpx|&$$M2oax9#D9Pvy+7bzv7%?FI zPPJ!+77T>tM$S_CTBA~#EZtM+7vGG*cEy^q*q=PRfwAdNz|N1)BlTbdmr^;zASI^e z`EwVl<7gdW+dkn{+&r9qjfH!05U2Kfy6VTKiqS&6(6rxtm^6bJ!e}|)?frF+t~mKv zZrWqoiuy?kSLK%Qp!>rt4(xE&b)?BP8H?ZkZhcGFr+;Oku2oWl>G66?XQA4%b_d=} z%WDCUN9Pw8huG`ehwFZP${aNrN;OV7U5gZ8P7Hgz-S_R5UkA~8s?TZSr-V@ZQLthl zkQB>?qJat!{`m?$N-!%XYkW#S#2-+l9zIt8dRR71d%GJkl*sBS+qsXE%1Y$jJ>N55 z_ZQl$UTv|_aADa~IjII)F_V(@H6o?pHxiLtA|dr#x7jM4G%uMpQu%;W47o%II?Gx0 z7@CBe=HkSU5Y_JtED|5$Nzsk_L?WLcvgnVt8y|9+{jhCzP_8$1vvd9aD^U#jAfLAH z4Gwl@nA_zjPOAq!B1^P)g-5B{2lEYBwV{5OdrD@|6Phqv8a{M7pThW<*rkg+M#gZSkU586BInfPMNTJw`x926j zc8!shVQV;hqah|XVS`(rK|qu+>1?&*rD3Fif!pllWVP?|6NqeZ&sur#Wtz`b$nY=U zo9jnP@_!~k*<2d7GmlQi4W+C=H(^x#M~!TitaU;MY_|_CO1W70lV+1371lDak4vyL z8lGCY_u;!vP-f}1lYDGZ{Z~AuNTQ05xs&4kr8QYc4&PdmKV}R46>a?#AN30@NRpDa ztVfkBoLpDcjzCf4oV#`%=nNaP>7LZQ&J)6>U`UZEU% zl#~?wwKuB@iV81vzLaRTbh}-Ippl{>oNZA(oZO}Xn-DjuH&_JMYp4R+^}lg7GaLLA`6d$>vwIyTOT;*slTJo?6x#RaAPj<=<;aWz_u zh~3(6dw)MA>Zje|J{Ox&htc9^S57_QCnX@@F|B}U_40i~BSohMX_bmCXv9?;t2*?= zO)zUYJh~oiY;HIMB{wjng0Mowr}d9uVwcs&n?Z@1f)FPwXKg<`(KW zfrge?Kg5ZJJdY*HHx7r`*{1-#`(c^tQH&geuZg$AG!PgLsH>}o>)RYPU)Ee_%d467 z2P)+&ahGu&^s0Jiq8`1yzP^2Y6AY2o<%V)vR?i(C5~94c^vw}WFRQ4qFfiQjJnY7` zFGvb`ZCR4{p}?-x{n>JVVFS71`5&k~1N#!~=2Gdnd7_dve;ewtk8!ppz;^5W-S=>eq`Hrh0kdq^t$FjHoLFpG@cVp3bUs1$zv2DV$oI zaD|0WNE>6OPK6&2KB?usvDshriHxrk`>E(N>LjVu`t_uiWiQ%U#=@QLP&8D{V^Q=G ziC0xC7QO}EZr;q?W_0gDCv_7!2jJm&c)GwSuxf~mjGV=^wB#2Qr;NqR{hU9+h3?40 zU`!mF=_wm>E$kbGg&`i1l@|YuTyolT*#9v<1lSi=E@uLts zBV(jKe_R#uD-?6KDF#*)?VJ0ti&$nqDb+6CpZMOo-^9*WqK@pT>R>y|LF-ONVjpiEmJ6W_Uh`yc}-c4k45+qS0Jqm#rDE;%fy6E!(~ zj$Y_wVZ3#L$yr4y2Wru}KKDBvkxRPu7J-ua3$RO=7!mNkZK+9uMYCyGRL`M(fAySH zP`r35OrO#a;pi=H;cN`G(Re~o*km4iV{tJ7$@<2@2jXsz2!U?dUYiv3zkyTcq)|dP ze7%Uq=NIOj3TobQqWTLT9`o&vIG;?94);^Ao6RPdq3Pc-otD{fl_ZTRo?*9MrQBaudS7Mp zY#fg;QVe0IN+X~c9Y2`o7b6_uw4J&+%qxcLPdQBpuefg9$%#FY&vCRcX?azZFoLmG z!{C-?A_}>!+Tj3W6Zf{p1x&JD>#X46ElxOPL*So)BYe6aui*v14KVATw=Np=SB
;GgB2eG0G;g za&4hw;j`S$cOqa1PNr}>`Ih)Iu14I`_P?6OEOLS=D&C5-g<6ohKc-0Lwt(qe*~{Fu zl(G32m{W7PxX+h`Z`z%t_eDRPQQ3}LXh;?&OG)PrBC1C;6$~?}Wflo`B~{U=3aymG z&%BYK&+rI1|GfcdrAQ;@#tvMM*u@{k&6r}l8Xc~8W3ofh1rr#IJG;7&i%#!%Eg3Z( zUesHpO{l=J&K*DFXG)Zw5iul=lcGs}Jw5Chu+TDq#16qZBuG$>XE7tqZ*E$ZhAn}z zs~4o>x8D8zfz)lHbVkW{_b74VSJ0$vhrY3I^f0e!LUmP(2Xu&RXwO}qIY#9B;F*&6T$VjoIbqXQF?l(*<`&zG>_e97W4z!@wf+uX?s$QtI)>anEhaG$`njxo1XV zE*wtAsLCkaG<;Z= zS@4@I(c0%B5K*(UZ;oY$QApu%d|>}D!2yhKLUZ8h@{~H>O!ztP=W6oXw7a5U8@+}n zAwS$*RlnGqRG>S2S5EovZE=YXx?rzE8HzAHuFC5;t@PbY*b!2O(3k_;8l!K5I(oB& zKl_jQv2dpkODz_BpP*q-w{tW@!-{2M!A41Dd_^v1@g zksaNS_eEldm24hNgHGP);}5UAIF6R zg_OT(#xw2Aw=khD88|%HOFW2Wnpm9rTuh2I^Z}_@+T^pF*rSq|0ryLc-ifxL{jAu; zh19vHsgFa1nI5~Zx1YKq{1t4+21@2w>l{yXeT`KO9YJ13DXh%(z6>PVzh=;h_yoi@ zLGa0<>4j_s{Mkj?3u@CU>KX}|O0+>xl65FuWV7epS`IQ}E^kw1?xK^V;8nq^BBwXI zp!w_QeY?sQ?Qg!P;S&jpF6tWEnt@6TV3Bn~%%Pv|o2D2XU`7kZ8}P}9}b3H$t6NRVK9lsGxiEw1g9kJ%NM5?-!V^v!dTVIWwc z2?AS^)YN-!k%GLukK+{t*&dVvDMp_Y5z>bVNs@>0>>A9eFvLy#Z$VeKKaOv8r2~Yh z+2uEhABxtUa;CEFH{S&>CE_NZIcr{&o{LBUHhT%A+lN1kMy7|lREZ6zdj zrVf13UxelT9w(!~2tlHJb{na*Fl!#xCrUt&!CbnO6?EAbQNFQZ7*=jL6JFbv|9U9v zrm>-@0&yv#Ek3M`Cu^(R>f3V#Ys*Be`KkTlq$vf4@g#BLp|azMC=HtU+G#GzMFFKJ zIv>3>H%ajQdL3T>HvbUwz0g8Ea%kV^-!b8_6PSimG1$X(t_6DlWz`i#4%Z1FGYNM= zmwv}XX(kcnxo(tZfBT~%tX(lr_i@~Z;c4kBtxmeMx{M?`dDr=G6kG6&c>JMh+VL(V zTT$i@B~aCcMOp^SUK~3DAhhW*R*!rT{l+xceDjA2E~g%-2E86;6F{xWWF*l#WoBu~ zXMg)73!Ve7?_Z56(Sq-L5|Sm(KT^Oa-9mB>$Pl|LpQh@h*3^6*z*S{o(AFvY-8l6k zc?jk8YhJrQdR~GlxHV_TCkzgV`UrQWWKS)7W*VOd% z@Bn6b#0)-!5TDG)uOJ^k@uOa_HxYwAI3KAkng@`S6WjJ~oX}jU4iW*BN z@Fq2JqzXqXnS@BO*tzk>ncCtw;v_n+=}6JXI!&*W1nkN^8hE@~Pr3^u+<|vgIWL3; zbj$`Y9=zvW){Yh1K)6arB*Bym+OC%eb(5C6yZOvWJ}VEq?_H}x!6-e$q0EJxtap(U zQ&ZR^N?J-*dHvtfVg@#PBGcZU?aElVr5(TJVPu%%w3~M`*njJ?W^GrH^hpvAbTO}D zITC#o;#bOfQ8(O2$F)Uzys0^t=oFr$JF4~*v)KgX4@&y!g8Vt`w=c^7@XU&q?$JQU zlwyNB-q06m?{6&zQps0Yzy|=Sx}O9;s{KAYqgz9I6HVp1po~<$&sUES@OgorJum8p zQll8Yi~i8D1M@BGE6lopvRC{kXs@y^UPapQoMkI5@ky7Xjnr3quLVJ9G&FL%0ZDfEhg?FeSFzoy-G?J(6@uO-2xyusq z{kXu*6iRdof8tHvT5JkLZw6Vi)`iTV1r==xqa)z^0o48)3N zT2?2iSDGd&4fJ!|$!jZzqtP_@K)M=6KYcV)>z&w7LFa>AWj^|>6({I=8M~i4@GloM zH?Q zjXmukqI$&JC$u&;FPt>jd=~ zWmnrR?GU-7>YtnCRx?gEKoQ?T5gd4^kboMAReS zad;SEwYraJA>50Li`?A8R1s<4A|jTiImMTw-*)t_gI0!c(nf)Y?PS}=i__3j%F-*U zZbJSBUiaA*A+c^WL$PW++gwGgB$7o$9a^p<{Uh5hY@}Nz&%gSct!vw_3#12ECESpj zob0S3lI0N!-d?_foNA};K-ax85wiGnhXbq3X~Dlag}M(ZR=nVyS2h(%=?0f9EP)EY)1yY3bsg8rCO+R~r0uN)%(XwJ!AeVsu%yqD79cGx+(f%Si4 zl>v$|(z~EN8Lg6SzJw;`S=tVBB6TvV>ifq-MU2l3_eT4 z1W-XiAx?kidJ9|I$7`Il!=22@c37R3wC}<=ix+{b_TgCAYH+Z5|ihrX=^( zD~W0i8FGwEf*C`v%O=-l4*I*UEsnG(B5nsg3zy+9*Rj4_dzHOkG10%7k@DS% zH?>v&)bMFSkX!f9h-Qst9sO;>6Q`*|KUR10YkKa$a)t-y%`Y`0ZoDL1H_er^o+Bnl zpr=UoU6Bkv+|Sat_9J|cR!U0iv#eWxtz+lsEk9M-mATG1TyBD>4Jb$H1jc~A+2CbD zY)p*D_8)!VfqQ${lF(&xG|$nCh11Q%lE32ZHuDS;%-nrAlP_-x>{xW%cHFBaoq>wl?_;Bhl{K!w)gqh^8mR#>08foZz52p>y z*f|j`87Xsl85KvdVrdeJKOyl*CCN>?d8&ionozXq`GboyDHdWamkh^dW>n36=St(k zr04h7Y@$V#H|P5Hk-!EScO33Y8mG0Y+8kAznnE^i7us6|_u8!5H=gdg2~3=B z|9qNB38>8I`d-L@elRC#w%9(*8bmn=@8az2HSqUo8-DVa1NDUIM^S}R<#S1^V+@vP z7HieFp3BZ`{qKXM-WQH^=%}?i+XcP*SNCQndZ|u0Qn^aNkoPM4=7CUb47FNfQYGd?(qzJ8=?HTJA@vWm!yW>%<2V>{^9RkjUD*?Jv5@% z&+`8fT@AW@`s4@RWkhgrZxB$Sa0^QcXOa06lH=4sa%x{^?!690Xe`GxR4bcxDhkPJ z{En30h-bJ0G&4Aww*NU*aK7MN{CLDPO*<}jEprsXe8jtdpL=a_TOXt-P8MmnYql0) zAJks5dH!|nE#mx{cC1OUpa08$H9P<4ZtF5rhWNikEg3y!V7A&g*R`fe#KvoM>NGfX z@lo5PR24_fy3gsJqOV1s*4Mu45Ptd!h48;W4pcFJhUFC!#uwx*xw}Rvq+1=6&}jt* zOjBzWkBw%hUSYcBnJr~f&AAUR&(ns}#dafm)n{ro92 zgVFWi?9+ws^_PTW_wut!Y}@ld>7~SijI4^D&jID-D`bP$)F#(b`Z(73Cwsi{^8NoD z4V-jcVb_-DI>tD4W#d{|q&+(U8bap>`^N?~w^PYc^(Gxtn2l*96W?PU-*Q|>RfcAn z_;U_uX6}P4ag(jSungC-l}hB35>Ppl$m9fu#K_-8J`Ej~R?P%kwQ2 zl7E6|vJmvguFkzpif{jZ#F)qoeKN5b=&l)XAh=t1>Q1swtzU^F-}5R7|3_)}4>Imh zoSG|mkY;TVSwomYbMQClj&x{tJvjsUI2=UT8kZ@#US5KtMM1u;8_r;iJFyV3&ufOmmJ%KWJqZ;#5TFEyfOV(WBSJhGW-L4ap8)lxKOBnORmr_ z3jdeEuYW2Ty~c6@$MMk(-2cCrwbt_-fduDPD+MpJQ)|T|Ao|6$BUlrRca;!1SfxzQ z%FZmT7n{mfqWuBVbj81SE`*;(f1y(7!v=Gbe%orr70AVZ=xIwbd?_>YKS%iF79TiDpRpniqIe0{|-4O<6j>Cy%ykS{ugxjkIL%5p96jex}pF3+uvvQ|Nizr zust|9yLr-fhQI&T+6j1nIgHepuTg^i7ytQ||3LM=%quu5I$B8X?p7C?v(4r4=Z)`7 z6=@lA8}Gr2d1;rdTY5B-G!+ySK&it}U;jzZKPu{fNq=8k7q@ zI}=tgHe8upb!;!8eCoH|>l*n=$E3sgZGl}v)!KG;i>7e3T*-ZuL?xT7tyL*^a&@iL zoc@5L4n}oi$2#hrmA585Xj&09ZZCZmrfzAu_t10;D3)Eo-zaf^e>J6w*nA};@N*yf z7v|tgAj<}OCS{pvAPEeDp9eWWZaZeswtI~7?>Q}yG~eIMD^ke~$tAToT`RC8YAI+{ zxQ;9h(k23XlUGVe^~!g5_cmI^{T}+Z(zNbaIC7?kFnbi_%hcS_0>r}7JMZ1`A;i@3 zEhsWwimv^R*D$E+Sh8zp%M*&X@Nx31e?{ejpNF|F*`>r>&0Dr?SWT*0g zJ6ux9D?hhfsqjbawv|-v=aOlwGnu^uivh05>Cx?`L##Dnbx#P>V~WO<%*xEnu!f)s ztWalKTea8a)mI@)mi4WTmRlIq1?JM%*EjXlvR+!pscCz@w52J<{<_$8i4@~@u~!S~ zacb9Z`N{4iJw}5na-NpNUe*b&N zz2lzYP(eIm z`~$jD%3AJsYg_A{`=fwXgh>hl2w@RVQW`mIqjkl9xc#1%Pfs73cX}FEmXzqfb(Pkb z(^}HNU&UNA-L^X)*Umk1v}e7dcH8JsnmtX4-EEOqb}C?{L5(!08>J~+aa#)zeS(CC zyF8z}k1GnOG0pLqq(MzBiGg0Q9r-f}OZ%kK+31=otr1C0)AvJdKEYGGY@$|!_t!YSp5i} z-_?C{H?}+;0^7owKdvHlXmzRi59G@lNkVJu(z z%<_`onYovX6VSx9b`ZW!ud`~s7e;=fiFn-|7MDuG!)vh!s57pgpfqLyE+I@!?PG9!EwHGX4Ggp?zn+-Z6KO`J+ zvu@DG#(I;hn^ED>+NNRg`6v0dy$*R<%aYcPWo?!qG2w(tdGP(fMQDTr&vO2ukA zh~8bv`HRY*8nYQ4tQSo4$DsZXmV#jf4ZT5-DFqEAbm`ZHTY9NlPIrDz&C-+%ji%gY zFq*64)jE~<9yc#+)=wp&kDyJ-yiE>&tvyIxIM{8VKKKY$c!5d+sqZnxJIm8J!@|6Z z;;ie;24&FdU*%l87EFJQ=vtyQufRYj`B|)MJl9tDBkTg}cOqoLw}M4D5SHY?!Wb~P zT!(hOtsvjLN#pU9qlkWo%|3t6e|KI5(o=uZThcLLyGm0VLB}jm3vs0$?pd{79-lKc zEsTv-*xe297w-ddGc9e=Z2fE_+p=(u925iijey_vet)jYqQnBo*A{x*&fsWV?_pyy}Q zUZ!-iwpRIRN}fE_J*$h*jn`?D3K+_a5(jR7Ou0e%4%%awCc`ka%nzYVtc zm#XR-8q`YU8XFocXY1SPW?X_9AEC)6;>lj(5NC@#RF)oIK4xgy(Nl#GZ+&3nfe?WP zS!>^j6PFs%@rJ#{y2sM0jZP)0B5UtX;1=r~ZP9}g$gvnIQmCd#B)B?!mL5zQ5yPAL zNHm3a*k}3pBoiGFmdTP=KV(3G1(I(O1DntpAKe<=wws;o)lyG3$Au+A!34RP=BezT zClj)!9L_tf3y+9BgM61Mg3(UF){Q%pU0Q1S>jS$U3ubWFm3hlHXoFR&oaA|TGKnSw z5=*TQhYQE*32n!vQzj2fMhmAYgM7W&Hs?S;N8L)$*_bj|Sy|pNr;Hv;Dm&0X+$0Fv zkJ2z1)V*_?EG+hF(8lBNj15skcko*iKje}C+rHKCrR~G*1$4mk*GDZk zhYv_AL{qhsQ`61&;{)cH&m&UFy{Ms~0Sv}2FhXHlgfe8WJE|B#iB+*kS#?1Lr1A;4 zoT+GO_Xf&Z@zt@E{UH6UT273>gN!D__wg+WzTB4KMHfD zwpc!J;o&6%4@J^|FpZzKn*(Jn_$)S#YaM<7CCR8=;SPF2^(wCScdCe*uG;aeT2RpN z>;}A;r}5oj1Jx$v`s=FSOBY*G*RF9z1U^{KWWB;dcu&-lL;pgim$nrfr+n4*LXpi! zgnR;7V4ZMn*lxv{>kUIfm1Tn;+)KZ1N5n4!oQ7z%5u!Oo*{KA61^6SX{!U=c;-~T) zMT+_p=S-qHuhMOM8qF5_dDh)m)@kV?Y|kXjCV7=v=M>nEqCoq01|af3t)FeNGYMtx zNDV>}dFb@2n%}uCNR&0PFv0N6Yx8#S$dY!iezifU=xm`!Bh;^e;-VW%7})d&ka)?~ zFL9!#CYA*SrE~Q#yFQZE&UJclpskJlcq8*riN??9W5$Z-TB@jF0-Fk8K-3 z%#7TNclY){r_XLRbOs2w$^ciORLTi~?h-^<(W2R9EjRs43%&Why?=W1(w$vr!Zfe= zom)-%b_;hGQx<2Ioh`H$c{*dVA;rMZxddP}U^U!4hKW~lthxpi;eyZ(fsM6R3y;4i zWN@_)=bA09o%Bm^N#pAXOr4&d$z+wu#Hxv)M1&zqt6ZdWKV zxF|xm-xW*TPRS)?pMd5PCLQB(*o~d*MJg7%OxeuQ@=tCT;YyQNI3Yc+1pX{owU9^m zX?xtoo`|Z~e_1;NlE*U{{?^B%8K^jZ$KSIvD;`6e%g!M`zno;|r8%44)gDX`M+#AV z-X2kNV>4q6{BUXwJ-N#Q85>WF3wtQ;YN~LS@i{TDYC2_PK3zWT6R~P>($!l5(eZ_! zXZth5Y+4c|?4&FNB9o|tYU8#CU#7;atewAUaIHL4f{LE9Y3X8_U zU-RG}bQ`;(|OZk10MrJ^_We zfe*^zD3|}|a0waXu<=~SO7|d+6s4oLn*J>=Ba=6=V@I;phNKJI(I~eRubqgpPY9?g zmQTl}GrI(TKIZj!@W4G>SwPP$)h4Y9^KfIkKD}Dga&JM!w`Aj33T4>kYAp z>TmX?gKD9T#P{6w;m6G4KgyY_3QEHFQqr8<+>~S#Dg0i``$_5QSg{(J3-z*`-%$90SoFO7Y;v!fgD0c0MO4MQor0`@_o+#orgn7> zt^*U5iUqni8qiOUYEWj)R~jDpi5>$)$z1Q8N78t?bv<9EkBDq|e9I>?#x#L%e<3wJ zF-lx#$gD?q`61vPuEsV7(wQVoY= zf`u$_87BQvw@QX^*(%}i_j;|4s;%_aR8SO&uizOLV;$x!?1Gwlaf-HQZEBU3o2>CXZ_lUBwE`*C;E6j3ot z0w(gp>Gg`_2a|;{t|f0|NH+QuV53E3)klOy7)PJ zE25(+;MPd~5+p!fl$t=I{vvTZ?cMTJ1hoNk&Waynx1O0&Db5;KnHN89w>nm*dk7=7 z0<3^&-W1qI>Mq9BPP6$&#D}|xFAksEd9}7ZR}oCH&@d zvZvkIi;7rJ``LU5L%vX< z=P8Y9!!C(YPt$VMWtMJ_tw3|-ytQUYq%_SE;<&? zl+DepScaCYM_xf^kh(e5)Ub&R1tY=3uzOl}E+M6Gx&Fv&RL_yiN#)+yAevQCo}k{P zqNg!8G41VZ(xWmmJJM;W-wisAc~4nfY!O$i(z#_l5L~7$ErAj&bhgEaZ=?#{nT4-d zHfx?Fuqa+-%fy&Jfykc&qIB}JKeLXxcJ~y)FC_5&1xU~_vwOL5zbGMoI~FFxl_;C& z9t{LC5KXdB*Vh*@SOx;NY8G@8@KMZK)) zE>iIb+X}lWRY0Ig?~(5Y$EQ0CR)xdgr5~fYaoDpnTeURM3!B*fdi@BWf}hWP%5VQw z9<$RD>r>xO`I?$yx`v7s(bUlx`aoY9`5E+O;yDX?-FB)&O5NBfRDj(&?`!7RJ+wL7 zO(vlYF~9#GC$5KQMA!2|g|YJ!5k5s9(*iTjJ50A9orIxisA$U`cba1HzeZ(lx~hZS z%~~pQ=@XicJ)61CIF=UPywAoK_P65 zD1IVB1yhA_-To;2U-~v(Z*GmXo9TAHglm_7@2a)8cHD^*#ZO7$ZSqK6;$cYPvMkm$ z7@e9+biE{t6je)UN;1{;M$UW>s`{rhO)0PS@il>B&4?z zB{`<{?Bp4J{ENRMl4jsE0njg^moD?LCKPvYu9$= z)t)7<=xAU6I8&n2?HS^w7?ZP8GUo&>av*$tVJyla~h; za>=?&`8sXqGBbK^7l)pvh)u8;vjR6)kvfU_^6WlyhpqIVxWSUQRenjk{V|FdG_iLoZ?Huma!>*{quya?+Lw~+>1EFsi8wpCn|92MW;JRuMEtw_ z`jVnf@PKMgVUli~hZvS^Na1Ef=&CTrXbm4Orvlm-^xcRUetYlvK8C@3(M zduTQn72Ozq{4WA;?js%=g2U0ryfG93K)fq{6*fF9?TeiH&be$^`kxoAQt6+AgNM~- zq{q84FCg0wc!!ltHCxv;>g2CN2lhm`Dzw1ujT!cLx! zn+9X1ZrNqP9({LtS&K!^BVesD7d5!c-Rx2Fso7qP8wl>E-{N)Xp5DbU`-xj(r$Xsk zidBGpF9*J5=RGBS*2}(IS7ZS?E-2z(*>+%ysWCTDUVlMfEQTqW2`N9XV7Y^9oP3!o_hp#Mlp^WZdj+HWR_!VUVhd;U(!r7p^-t zA_)<93Q-mLp(5Fv_S@ALjUF2tE?As_bRy}cYjb5|XEj8q)v}QE)B?E$jSng4c24*U zzw@@*G}s0~WO%IvuA7@1FvR+-rOVdbRGebm^*%W{TVi}{>?p5ChbfLu6Ev|f?!|`I z&iaDg*Gu&mpykCpBFcv%&-b9i`^rsUT}*^h#20|SBj?V+78UeAzEcQ9bqETnVm**@ zA8Z^O8$%&h_^jJvS8Awr2dGp=(?@=~?E=saj&%s`MOBTVqHjNoW^rT{uom)ex6XW- z!i!F0ep*$SG8D`za5X1Lp&I00QH;%s6I0+#C76+K+$%kMwQhMClCQLUohJw2tQ!)F zGRAJhCIJZ!-3zEukkLYQ`*f{^aaZ4IE9r{y!K3aE3dzkkx5>DnMyu+ebS6X+*=K-g zzR$x1>C>I`orrHbl$b+DwKEW*3T!Yb8F48GWvCjo$S{KeD}RZ1afs&fTOh&_m~_A? zI;}R+2Q50^1vvToLSnIaG~fU5O}5c0VF_tRSR(+nsA=d#)}DbGu;Efxj!ETd^6EL_ z-!?SdwdZ0v@o~R0WijPup^_gv48G+PQTCXfgIM_=R31Ch{%^iTp{7M7t(b ziUSQGyYn(|17)jW^~LV%8Z**}0*!QNmZ0Ux`Jvn|=(ZE_Lc4D{sT0dzgsE8Hsa81- zZ4Si0FEdJ#Ha9clw!QA{5YDp9)91pCO4GD=7u(uhsq2Xs`arz;GELs}Fp|%(Rs2QfVXavX*2~2$os~5JDbVf@xAn7^ zAD_TP_wO5m$0MGkMp2oyZu?GARs z(6sQ$i&mq_p^t?o&h@E#YyQHUr$b*|q7J!qn+`SA4=?wmTua;S63I}8a)dqTLuLrY ziGl94LA=m=xXs)4B;=yi?~rxr*S^;Ph>Ns*_ZGCseo>(!3@Wi4Fz$<>6+rnm`Q*>`k||n^cf==eztD&|Ig5y^fFJyC{o&A zsFKN%6BZUxRZ)qFi%H{lvm9z*1(q2i4`#kNkNJhg0?-)f=-BE%d`EWKu{)w=_;1F~ z7&Q3upbbys(p)v=ld=C7{o6kEWfaitzX^;UZZ-My?L;L1Nq zJDsEtx}wyC93W$H+`~Ksu1zPRN>VRiH$xXTlD6c0WdVsI*f9&HHd~o2GN_i$HO+FY z%nq|9r-a-mKFy16kejWMZfa;5DLnbp2;I(0_J%f#(K=ChiS&!CFXDom^9^*)5G2Rh z;%X5J5!%-GvnT88>j2{i`O;gipt!J@%VW11h;OpO`GelD5F!3w)IU~&4+wWj?$Vn*Gs_|=Xj)~ZZdP2e^tQmwh*nlPI(EaOMa4th?iTejsoe3Pq@yinS!vLaT>{ z!$k=10gGEfwOdr}w-32ol;)Ti+NjD#djX{iU@1Du*{DoC6Kiu?0D86%|6t+$s&fQYxhT=SfaH||oe zb{^{55W zk+hXotJbQjGr=dORJ{Q-gFI?-* zb&Cux4@>f5qO`j6!1aQ^pe90a-`0{H?C^P7A8B6k8K~V*?e*t06%XdmUbl_njiiL{ zFHKF)M7?iAEsBKDabF-vkbcsZHbMQ2qF zL_KbHrDmS4XXd+{bkf0bH%l)}s7$9hS^m1^HlUbcAW59(_n83SigKBBoYt$ASUo11 zDU4;!^BkkO+ux!y=DZ2BC3nbNmsq+Gc1?}zcB+ICW5LsumP=iUbDG|lFNGuW)fi8G z`()aGy}>5%h39?W^T&yOy#jtHd74xI8cd+b18Nwr@Pm~)EAHqdVx{O`0R*yrErR9T znBd#DzLjML8seNYvX8};{ z`33bPTvpSXofJhHMKe1zf&41L*-2TeKg|nWL!Gh%JLh9Q7pzmbp}BeLQMR|fMFH`t zJ{wE}9hr4Cxm)K}>;2|}n1Nr~S8s&R9`u*u`Ai8?IDSBp80^9|y}{I65e968m0%L|cK@SRR{Qm@gZ8zc9B#Vb6zO@!wB#9{GBdz9>?N3>lce zM%F0C-Z@!L?bw@3Nb88!TYZQysc0*=t^l4K6_2Zj2lj8|b&RpNxNSkM5dMfJHb@`p zGNLc$j>rvm>C3yBsuNUPWZ2 z3Ii7(R!QPwh>4$eGyr<&$+*-;t$Ru}mzZAu9jOp-`ZB;Ff3 z0V;!OYrz?Ls!6{Ar35MW--%cOG$RHT{w^~^69YpNW(otik@?qACYUh92D*mfx9sFq z_HahCE-Z%l*c`>lk}CX#LQzdw8Fe|ZYu3}=Xz~PN6vbAOEAPt%EyLWXVoATg!uWgO zZ@Cr_lIV%$%jW$*EGE7yH0Tei@dTQ)X|fAZihud;DtK^14eIsL{C-2}O`*G`w1qND za}fq}vcM*4<;4ak?M^r%n9HzF*2>Ph@2$RGPz@ep%e&c)1ZQ*I@K-nV zkL-_-PK2n6+GCv(`P}LO?)Z zW5_1(eH%~s?Pq_Q+}#~~gUo8XYCK`A`ivJBz+`>%@2UO!cCg-hYuJt69qTxGRE^wR z;>>U2&||ffuEavV zUfX-~pG*1mN0kLcx6j~j4$5WH(q0e->`c=5${O!r#3~G4BuN>bU5J+LAO0yEJTRdX zhF%jh2y??8p?oPrrk~=Ko-a195Qe;d*Ry^Mm^1^bG;3dtflS8dxX%b!V>7<4-c^Z>KL`=3RO02yOgi}#S+VB2; zme(FYFUq^avKy8e3{Bk7=AhGH3||d0depcsBW-%(fyfAKNqFw0e}Dd2W0jEhYcnya zMa#ES6{=gR7oOUj)&Q7dcwCQd!gCPn8bl*@{rBcStJ!jqqyP;dyGj*XGD!iw!a##m-Y8`0^v9E9U>*DK>sdReLANu)wh4_!*JJB{|0~4nREt;EMdyL)Zn)wo0vh2=sG*$R zc3U?>azAT3zM&gBL3s%aNs$)CQ72 zd_<^5u%NFBf$u)k|A1WUd~$p|DJ`zWb_y841{Y|QO6M)q>sIZ=tUO-F_G}FQvHt>cf=J&r- z(2NgjzXmo{OzsV17-q+6n#DrHIDbPMvHaM!C>lPh%QsHG=jph2CquSmp;la+b2}1z zx8NppI4zCTV^yuIj&QGIW0ylZ6KB}|bqVp4*;Ei~Cl-hOZ-^wZKr#-LuO{frJ=P@H z-CIZLcw0*ZGVU^b5bPrZBk#VzP#eT$A1AHBM6B1>K}A|oJ6tn5nB!=O<6VH03^-h z?aBuDYtEr!p&~l!oXQWi+2hs?e#(I1y6lSFzS@CCwPd)F z6Ai)FeD7RSbBhY9`R?-1_G8Itnzi4FC5uy3z-#HGWX|lU1xnZbMrc^fNNaahBTi@8wMK~*9$?RZCf{3X(gqFEw(qX z`v?#rF{V>~QM5%JnAlc?@OsV0BaU!fstH_w@`kaDWyNAt(&rwIkU^;GAM7cCs~&#N zteAeX|zRBiQTm)K?W+EL8i9P{uWzysV|RT zN7)3a5l@*a;EV8+_!*fTl|(ou@fl-IZE^b3eanJ_U?T0fo0Wd8Z(YcgD)e^vDmd-i zS=V>q@N-WTq*aDCg9YopHU9Hb2sYGY!*mmq!jh_?a|8uA5P%2!pnGOyrnB>5z3X{{ zlq3yYWag{ftfp)BLG_*?9uEh@W~Li~1s4G!f40?ZB&>S!e4|IEx~7&Hdyo^Q3b>R0 z(hD!L6t=Wn2K!GlQ`0Ha*s9=x1_v_|64Jqb9aTY}|0m~ZOvqm36cDZiNF9W}8LGG0 zPfSkb%F+s^JiKYaI7B_(@FOrSNivA~@UHqRhiXJE4@=~C$^_{=WKeeSQ}agNm8TAo zM;4G;uYJP5@hnxo&dvV~l(4vp>2egha6FQfmFB$a!Q-+|wMhM16&|DaAB}YLBm6qA z8U^!*ZQwP{r-NQBqGJ2xiRbXStpD~qqdqt|05*TX^w7y-adm5pYdFGLLi%;xB^?Ci zHyqr5<_sUc*mVWn+scU7uz@^t+rcQn>*JaqzC`V#ku}{cTX)%m0+=^dbi{-{Do`s> z$Slpc1*DS#WjL5j+0fDPW8&g0xpD+&Px=SSJ{L^>#uz9mxfvRw7YV}$P_c*es;AEd z3RU4?V|=D&q;%T9>CV^MQu=tDLHd)^CEer1OOnFQd@njK$??3m0>~xkv@0~m$vK@4 z?|Z;rJyuyeEDev+i7unAZZ9(uaV*QzT0jN3R{!Jw^c29VS|C z4wvAMZb~ljT?qU4e7NXa65klyv%3I^%{kl>im{RVWQWyx)Dzz-VI10ysPTU08%c&# zQ0A@?=X$jJeP=!M^-ga_vdCh_jFp}7+)C?m!$GZPCuX?erS#LTC^Ru@tT$yub_Z8n zXtIKeh^}>r6EtE*aG$*dN3|vAe)}0ek%xwoS}l)DVq<_?x<*=3)a5*Fnb+~y9jK+~ zBL>#}7P2-Qd1+0O@N>-rp7Pr}4taaAycv4JHvuISFxvaIM1H6 zzgOG9*lOk<{yy1w=w1eq*L`50!TpNy(|g1}aS;i}=g}iTy?B)7cEon){Hc4htmW}r zyUttRt-(ZIO1(?aKU3|oFx*4vWnjK%w|?C>(t01$xrVJpL7(t+Z^w8zU$bzYgm;H~ zxZ?Ddh3RY72aQ`f{QU0nP`$w%rt~QX6<5lM44a#zy^>xBx6%l>m&G19IO5|~ z)zPS#(e*04@tbaCZhcgF^b`JgmQMyNKvmZA`zh?{{=nnNX2(1Za^vu4QLPa^kKF@F zsHBI{+5>b(SlV!SnESTz(TVl_C;SenA`42no!6*2%9CxCHt(fl948|s8-B}*a%1ok zIKDW6CeD$R^ExP3?g02T{4||C{<7wKarGVHO2~m}D=isyMG=Qu{a!s-gUDQ{koKzF zTsD5jr<{0g03b4>yL@yb+MjFQ1|lDPJJBX`%5Um!m|xM_2N904cu{`5tEu9SOs`~m z>;DM|9VzbI-|0E`pgxHtHjW451%)gTE>&zp|I=nrk1)1l?<=dVolYDHs=T&#r&`V@ zoEtfufc~|?pjAl)n6Yg_vPFk<59B(+f#Y@XbfBd$mnz>owtOFEMhY8o+N58dhb0JPv|ndkc=k$ zv?n}NV3?RO{AjdUWmXn!7zHl(o79Dlw%IRb$I>S8Ux)G{dQ+RDkdVB#yPe$KAD1}r zrORf*4=f#{R;`^&?PT3q~U2Po23If zH{9k6{bE(8wNN_xC1aas-bZqw;M8Xgdi(%)k z6Urs@C9cg{Z;`3cVJCKg6Bj`TVyw6n#Z{x*`R>Z#3qW+)bUEDr37)2j*JzG>d|UcS zD(Yt_0?dhv)K1NC@D3d}y9qj2 z10wuNx=id#Q)`TcM5bykgNhs&)A(!t!Zcb);imdD(4Q@s5-F3Zy*H?U(~| zBO-P7iKrZUMF#?@5qeMSUoMyD7oPUlZVWyT_v=t70@~>}@@-pILE_7CHh=0SdCSQ8 z**w_NH~x&+oX_Ng(BWbR@ocFBnyjC{nRL?*|KX(b5`KI_iN1Ky9$HnDOkVNSGEa2? z24q<{^K+;m5lhqw4=P^E(8S&jH(Gf;OtB;JpXVnh7kPbs+UKl#cz8H=zGs0+1GS6k zG{{vUrmRlx%P9Q_xum(G2zn&3g!Qv7CJ;Kjpo^(2ogNPclL#3cMasX^D#TLUNWKeC zX@w7&kFvJg+;}!k3rd_n#O*U_W)7?rQ!UY1*Hrm8&v#HeimjXgF!q zQZH2wZ(R87>O(%>zDWX0(ho5FApHp%rRFr8m;1?mhiizg4+)&ZfQi6!@F=VMF(V)^ z?<5B@HVAp$n)8*}29o&VR9Dq{#^c62QeUc=KG*di4+M4f1z0#(ZLbZ*3GX<==H}y^ zSNjPHT+!l;CZ$|4c9$9T2u&}r9&QkFpfd(ye;Q3_FW+c)5^<+pSIXpEU;jbk@O5*z z(f3FE*|^CIO-$<#R`{OPKj#f;En0{kuA%OWw5Iz9`A&&d{r1R2GvB9j4HZt)w)VC) zw~ASsVrpk<BMuw~?87dwgUR4_d09q%)r87&ewHB1qXSd* zk!+EoHS`jUD@Ntfhy{YfrNlX&K}=4a60ad!PGyPZ%#{8JgX>qrkI&5sg)(6e+2i>u zY8TZRj^jgn2PREZT%m={_m#1CbXEQ^2Dl5wZ%7c=;L)anmTON35ZmGCW~cp~#L#j3 zzEN9J87$n`v!bi3z^+<1_<>13P$+ZkOGq;*u`f1!_j(r`=o5iUlr+N5T$}b63R!q* z{DLX%m~TExSPFwbWhD&Rm1M`#%WZ@>t4Pa<>_{U?0Cg;cdlqf?)hQ zM);q`0dB<}Sr0g>qoX^owzXgTa_AGG0d{T?V~eGwCE+!l>(k?_vvs{tldOMv8laa+ za@ka0TM&3x`O4aSu4sM;?Vqy!d4GL^wxJEhw*pJ7pdw3!+!lIraFhqfUE{h$3X}ZC zV+OdWnuG1G0JICNM!kKlc-G^}NxIvu>x$Mx^-ev%e?T8eLBhyy$Mf$n&OBz{_b${i ziD=3q=~Plavzm_X5qygR;UY9Kz-Zj`)Wm8Yn$l%dUe~r%SaU21%=&B>MCV-{&Ru~T z60Q%WKz?;~hVJg-XT9%1He?v*xYb#b|C<-=gP}8aB^o6~g3rY3kN&-=z(nF}9~w$4KDeBf z7@DoadKDM0sS5MzYHD$|#9dMPH@kaf2^Z-AksjW3*^kG__jDQ*J8(dN-$1@pP+$Uj zgO1>_g>Z;jthDwA7+aE>^f=S46t!2Z|BUJavVhA#x7ZSLh*@vdde?C9!@&%{P^Pq# zQ(YM1Sl|X_W>(e}D_i0a6C?Y|v513Wjg1ISuCKq}kM<$|&$+kqpni_p?aW5Lxd zni@>vg$t`#7)jj2vfB;P(z-35YvaPkU{{89<62RD2q6zOoDQH(?^=j<(fjyeu%M&* zEv0scf`fiYWE=Y%iFdF|r`-fE1<2sG+InUbFjr=e5f*K+eYVGQB!*LYpKgnuv>MC< ztAeY<3xH|a&jintmb)WzWatSpGQBy>ePeGB$gI*?i}*Q(kY7A0;Nj+0iGsEzjw_9N zv#+ExmrALOpWu&3G0Sa{>THpSI8r6`T`2jOCi4zwp97q?!LK2HN3{)J0x$yM`1p=J zw_78EQ;9a9%(wLn0MJyr*U8S|l3O?dFGHmL1kO32p*?xr1_lS8wYe~8w+s#sAO0EB zwqi3GD=5z={_T%64h!^yg9x`IP4yMiR$ z7VXZM*qwn8!!MmgoqM!SPj1vbd0IP;`7To&l^{4)$wvP zn(-FWFVl-F;Jq8CbPwWwf3xG9d8Qxa!CIbn+Ix0O9*M-+3w$jPizylHNA^A21{-lZ z{fC0AJ1jcQ%|H9oOXU7p(Eq;YzXn$u1AOjhtyg9=1GzV@YNKEH9zO~;pnHHK zzE;bNS1XxnDz5`Z65FQI6#O+X2)*peqXs1k91*46@jjk5NL}&X!YhcpbOI!p4kQ9j z2!j9gM1TEZf>-}4aq}Yu4ZWPA5=ps}0TUEaN8ndV$$e89LM-+Xk#x7AYq`9M8aDP@ zNdLCT|7mFtC4-<4a_MuhYHeGvYE+=fkpN~ z^WJhm08NS=>xS*Ei?6JTP<0OZ)%>PCK4RVDVZa~wPZo;8ms1iXx_D_HtgUSWli1S- z0HDUuC|h$tR1g{{bx~&Lw8dn0;=V&(hQY;^$Q3DIjig(eSpFEpIT4dc(O5#*D^Myb z%ht{>r8?M)u2R`Q?!{jDs^j|;2Jg~>CF0V)uC9hoyK&5(ucJ0jg4%dNJ(*geFx3Yw zF6XT;+4}IL9p!>r5?H8b`sdYjxY-#@MlbN~okFk45w$-QC`>9TD#XU07GHlpL%p(m zC@HDIAy$ZpR*i`6+dSz?CGgJyx{R5Xvj`1}yMx9Nc=~4)eOFP=T%y{6wlola#)OUoXy|j>W$hxguY<8cbQFJq_63?&&^HasC$Zw8=B^LNaoXCjXCQ-9Ja1{Qq(UISFBNIu-Wi+ z^;j2nVV-+DvAAIuRyciexzEqA&%~q{2^yxTl#1KKzBMmg8w@E=Z(_qPzd2=wpDxbo zqkq6qv**ex&6Uggd{$zyO+j{IVQyDy#4+=tn=0J?n-=^jHeAh-0gA}p+@FR1 z4ZA*#wQ6wtQ8N~&NQgy61-rZG;KwwUn*CK_1-`6Y@@w!m#h0`U40SeE7SDeBWM^}Q zU!O)ka|X4f`GR<@(<`vkYoNe5%-ASf>{qHgp1LtR($i}BkMg;nGYn7xBIfnm8Q=@H zmkUY#%&>k9MiUPP%!xu2Zq7Gxm{??GO9m=l(2#W>sj$xn1fWBWDTW*{+|?gkgosZXf*E&y_Qls+bbs}YbgP^9be~qCu&WC$ z?`Ha2g0G(evtOScqqtU<9QAv-5n|tC=cdMZB!S-P;jsn&2+7=fw_EFa9=}XRW{R{= z#^v;!X?%|!wyI^-+ub%HluQY96v0xtvM;}Np1U@e*BJidWh%4~+B%qjDS3w>&TIEg zphl(MozZUV7H0-|#`<>G#2k`M(R-Xe|` z3FvT%UE3U2+^gd`cb2Im+w1QF?c*fA)m%&8@wf{uSWsaW4CM-f?}lDms7uJ#We-d( zMIVpAq6G@kNo2^;*YkMZ0(mdeVQDFqVFf*u)Sk|w=Bw6UD6NJsy>;>-SVMk{szPV% zxv2Wbclo)r>sGDB|E)ej--^EcDxO8`dwUDO|F*uP;9kbd4JTnn47fsFhP*HA0FLgG z{>YiK`S~jeSLX~id1@sj(bQR*2g8WEw|*0-Hpachy+2RHD3Old4{;$FRu_dn7KHs? zJCI-!v+Z+rLlFoe=3~B>Qz~9balJGEvz?S7r%#_+y?ae_)>*#e43qZy_Z8+i53{~a z&W3u$*!XUW%k`7jv2uM|ZRLOdbH)FmSwvlcik&@IXM|Xwlm;$ovKZPeY@dG~lR8N9Km+0AJ4%L-?M0xDq0vbczvrk8vb%mKv5 z@BfzOKqbn$XWW#fyVZ<#z=k6wApBWH5ltO>QIoq99#U=RO z;;#Eygj+zA4CJ~8LV5AK7*)o}4<)&sJ-u2r_N4H%N8g$2m%f2gAO0L+9M;4rXw$YK-GZ0-BUjG8vb&!=jFv`iZ!*-KGL{gq!M zRQWw_v}?q3rcB}N%5I?i(m3k#N^?<(6_ho-DUla>J*d#d6@^W-rWFgSDH%*`j04IG zGZo0pkG@zn+Mc`=5S<)jINV2uS&-6py?GO2idUm{a1$+}hlVx&h6Q#*9S&PkN!Vnp zMxtOS^a8ATKuBcJJf1QL4hIr=mu_z|B^2KTM~ji2#-76& z?Qsqs;yy~?w|o#bIT?H4MiCfKI|K_-A>_lg_mY3Si>h*p<`Rx%oGt}ELm3snQ+uVN z5Gss7`vDg08&5D<*?{g;YkQv~Fi+(^P-ht_j1hKlgM4b-8i52!ekBzY2zqtEB(qI~ z&hj9DfEid7CtN-*DDLdH&u}US{(ENf-|!BA7%B z9A?__YuhQ!E9|GRuw*N>@#nEsQJNdoZlHQ%@n#AC>4)sZ(wEv{ABL{Esm{46{ng^k zjV&aNkqpO~@7n83kWKCn&F&9Fx$w6(IdVIjSRZQ2zpemw*gvngKx7Sp{TbZbj3~Xj zX0!N-j;#+ze^qSHli8sxSE}^vauBjuTR*$OSsQbCT3>;w8ag9re0`}hf5hI7K_R`-n&}cNC zW7sOOPmi5_3P|)mbyKwcSJMG;E81X3Ubgg_`vrGv8**z~1kM$C*qOS&&)1<0}e2q*h$8LuHHbZm&p6{IV&Ntu8c_uSvVu0jXPg(1}?(28iI}6XuHu>yKS9h^SF+b>z zE@(f?JeCiWzeC1Zx9#Sx;xvLOK$3ClTLWNG5bDh#sz4F{V8;sUWp$OWwXCjNr=QHc zbAQ3X-74JsyW7tQr|l=edm<&0u8D^Ih?*i(*kp}U{=gv#D9e;9`&I{jV8%H9NmE804)@!Z|K!#A^KVRMA z$X0p;Iw?UP$5o^%$PJwf-yLS}*J*bfp6~Gs=K~@0dkoQ?v2XGvX{%rgb!>ayM z_nN_<-!FC&E|-3cl*+33>-A-;lxg>++(7{v>`kbo^aW{-v1N9Ub3m22KcCu~spTpIDUnm@**MtI+NVwI zPX-|b&vTT5zi=M$##+WW5+&QLlXf2^*l7Rw?-U}#@Av-V5Ir$pyqUc`gbTU$rebc+ zaFM}o;jGnV69K>2*rcwzY>G4R4eoe(#}>!dvHXBiP$})3fXis>O>92Sw^-jyDOERY z`sxi4rj`f3h)F5FYhcG!EY@ll-|+69t@=e$A!3$QgB~5zvkdzpNlzg@`|kuI=L!2F zwSh$Wb&IF#d0jlow8Z0lW?Pl}A@_T=^AMIbc_o|u=gTE?*JxvA9gheROy9PbP)gCW z3Esm(^R|mQ^yN2_86JVWv-xJT0E)};jWW|TDjdUCSxVXeA|~y(fA9IY6RD%(qMT|pL0GQ54>Xq{xEyy){(rBX~W&8q!*-1>u z0$X0U1TGhwChj^3OZA>YhTe49>6u%aK9`eU?3*Z?)~H@&jjP(~zE_5P4(>t!-t|Qx zDuI16{J zG;S}b`&D`gmJgBVd7(EPPO7wBJBCUKcK^&3<(2LY^_@g%eNB@+H39AQlq zznZi86Dl?S!jrM=6Ek{EzwRRJFsXK$JH~5e=1-ci8SobRuUTpd0C3Y^-7}4i z;*F@09v*hSKDJ2U!gAZ5i1FZ4p^XGs5rm7gKA3egP+$)`eVg`nYpQzFbKWKLb>_%= z{RPGfWN<0<5uVz%);m4p@MKiZBbhXTIi0N}irJI8ROa!@x}ARbxD2vfBMfE(JMP~l zXvBAVj{8WQ<6GH|Ft(s!bL;mnwGr3+tUeOYg23fDrvoub8Wqgv&@jro6C5AeH6nJs zGVWsE3tV32biI^TN>G_L<&L_egpEyX+zmg$4muCSLI))e`*@2(+Np%@ODr!hLn_g{ zNq&zXKUOc$DmR?}v}etY`6{@uuK4tUm_7QsqUgcsx2%=m;2bL7Zh*l>*_~6|Ln}=q ziC<@P%KXVK@jBwLGt(!v0cnlAAiyJ(ctb8-gkX+#r2Qze*bKP`!Y1bBD=4Mu7wiQ# zGOg4cxqG^sH%twbfG3xI^A#>lOUlQqaS3ZI<2p*Y!@ z^@=S6f^vd%85`^`nnO72expE{ZE@51UOfN| z^$T>=rCd^~pz84*GLFY$?(V>~1RO)af@-FidgW>bWz4Ye6AyV$UX1^LlXiG}JOH-7 z{`YhUudq#v9tBXmj$DpFs6=OX@5y0^;;of$Ae0INKc66u=rVC^5&KzGbh)&sSm#|= zIb9mi^}eFK{86oBli15p)?n||P}d*0%l+PDC9P$y_EZzfhq|guc)oA$%3EG&#Z?3KnvK>XtW;)&8KFY=)CzN1`Zymg(yfV#S zE%?qSAQK^yv;YZAJL4{KG$VlGitpuYI35=Kp#vx>9aJ9H*!~%S-ns0{FZ_4(j&X4% z$_u(0o{aY1O|hWu`qweyh3#_#c$9p*hIh{%;)3|d|HIeG-R}s0dd5uLEy5T%UlGgjEFYH^S zQe@;%q@wuVAwY?}al41%YptrhF9E!=?cP=1NDQA9V!C|2@P!*)`=fWdGFuR{vXGP) zqvT`d-|iUA9z4zTsq~G#OC^!3rP^bRd~zVBZuOL&CC(&S(5mnF=ef|b#e;8cc^(9` z{`Bx<$biqb^e?ski z+ID_cJWqK(@^a))Ods!jNc%%@t@tmZ(1}#azY&GF`|0Dpct;fj9Vw@wHM6y-{_R)~H_o)I228gZ?!TtG_fb1J`^Ubc)XOG48O5~7{A|p! zhUuTR()0F$Um@paWkVjpocR3mKz5!TVE&q&3-L4ew-cNVXJ1Z|Q!*~8e0tA2p8c*U%=&a`{ z;FEiM+|3^XeI80m#q)*ufj#Ee$u!~so9U(Z`<8b6Y_gF*|1bh5EpSo?lC%`&=d+4) z`fi_;XZPU)QL-gMq<8r!Kac&YFUJvKqiLo+3{(ReuKBa$17ppXTTp1@ShCL`G>xm* z{&&pdw*E((6+hy+S_c@y)cKHzI-kdhO{J@~CJ&O{y9p0ZHs5ZmqdSEm7g<|7grvGx_u&C?1DCB@U^pq074o0f zSyKA zBcHOd#$}!+F9_(=1bmj|V6K_E$v&;*b)V+_);)s1s9fOe9%0?|DN0C81dN-4?7X-`8yQSl$2eYckS@s z#_EkPdBKTx*Rf%QIl0SMW2nqhwph$r5pinl*eVFA1(|I^CKix>udJcG7ViaoliUXh z_=t~^^w|H#voQ$w+HKe#u!Wp^06e`jazjih3YhbU+27CU=oSfyG*sIN<@6Ao8H^R% zEjJ7Q5&3SN%5rMBASX>z(|(rA)i0OcueroARIf;db?TsBq7@!stL9D}BcpftHge?5 zRHuM{>(NWsR|V>de~nefkU6m2gKKg-GeGo|i* zh47An(QNUlijOeVrU+OUR{Y{_E2x5WzXU8{fO9FsY42@xfmX+)!iBmImuh(9Co@i5 zgI>o&z~A}NB-|OLx;iOc(2Q^1c^9k+MnI!4&U3~n8hn7b0e5) z_u@=$J23K!132k``03dNbO(gD65PEV`Z*LA_ip22$NriWOqteSP&fRToz<;#hhmcL zAcgt$dm{49>$^sz&Q1nhCVsKJTf|yLy+0E(!5r&!mfv^^vL$FHcqSX}x*BSvlikpq z#|jn@xR*_=Aj@rPwsL71tT69oT^e=v)F4lJ%-L=Q)=nPSRHwTkChflS_VDcy(D~mp zI7@C>>If>R`RYBVx>8uXA}&-=p{?o}zsPmRZn)I_Qg_%IU}3`~0txdVr?qO0B7Yp4 zW;CP(E@0|P!w>k$Pv-0u>VlqUJ9^XV6h0383T4eWXTOck;D&`zu{e@r8&SR&-pl(W z#hM~9P^c=nJ{||7>y8wHe)-w;T~S4{kdu>-642K|6?A9ESuZa!I}ouv7Mr=>@$qlC zyWVgEv*2q*TtZBcTtc-|1pED;!8Zop$PH_jO-sZufd?*YNf0k6)M8 z$8VQ2srcry)h{?8q8?s!tKf3wl=0 zUh%3)tI>jSyl)90ss}WNJ;$X1m%EN z*}oeu&P0&a7mjS|Um$zrr5QdeL`eQ_@x5VVy=~fXrmM?=zFjn5?CfCb)Kb6LfWO5x zJSpRpCTzQ5>%=IkrK6#yua@rYERVXk|*k zumCQ-YR&y~#x3#sf-yhcFGryk$Lpxo{f556 zp)ooQL*2GBI`1QK(N{hd90}Y=cR{YFYiFA5(g7KS%^=ls`E$;ly@RVQ(P1+*FE0-? zFdtCcT`r;wE^6Vm^M>dr{!f{iht?DNrrfns?uN*T;#Aspo_xU4Mk{oqtTqzI*f=#x65#@si%Iuu<;!iT7GIu z!0_|97Ap3n++~cy^?a}e`uuefS9%u(O5OUhuD*%aY}mj?|AevTed$BiFZuh5<#i#y zh77rvn9kCvA984%&ua9j>iKZYYoz{~469@SU%gscC8=gWRYmEW>!8xA+$cP-P}e<9 z#zX=)qsv{(^v?#yL=F}XOY8K5NB*Ba-5G0Gaz)NgZNtyxlLUtnB))_NZzGF}03~VF z6$yK+=TF$ST=0fdPdCMmDGDjM{$*pB1SyUkx~1?)NV*IoF|f&8Cr7{2M0}g5GA`xK zGqt!!tY7X#v_+de9w89Cr29438GbUwa{dFi6jr!LJ)eAa$fvI-z7umt`%#HPdA%v% zuQKrN;e~78H`z8BnP|GmAQ1C3o~MvE_*iGLn0*WF`wRl9T^9xc_Va6NYd`GG(f!Vw z3r_BX#Rm=gH&Y4^?1 z_+_X|YB9x@bocg#&fXU_l8{cptAv*qjon5L3C2N3^qTf~_Do{h$P&0;mhm42PEJ2_ z8HJ~JNzNz@$ywfwaZ~gh9(pmhWcrHT|7j7&eTird5;+ECrT`~0p^b%TxMjnIYk1s5 z>ZjIi3`;>p$JcB(g;}H0$1Y;>Xq`IgS?y%Elinf&tqu8^n~IFP`Y$y&>U5xNj8qC+ zXA1iH@=y;CJQ_eh7}r!*Q7JV#5B#^$xq-pGyGBJwOj*ifyV@#4EL4nali!}7Um|Tk zCzUfXSq2SR73UT5n|a>LV;a_;r{Z8?$tiN$JmTx^xnjcu2)lsP?6x%#nUHWYj+$_>DaNh}7AIZ7(=go(kp|=PghX#jx%%$D3o_(1_m}+=#&wc)W(f0EtT(^4N zb#)-|vNvj=x6USeRxj0o(YP;xkH@rMv(&OL#x}k|>=;3o0-axHN=*?nuBoks>g&%2 z9#Mq4v{y&;mcx#J8&LaR#IL4$Y{cuB&E_FC2TYUn;Fje@Md%sq>`bwIWpQ}`p;o-s zjT<-cZV~$v?(OZ3zaYFFkbe2$f>LMR#15RH77k&xbXpk3bMhs~+oC;7xpV)~iNh`9 zQaUP|6UWNZW0*;d6js4QeSN~(krdvplHnL4H!YZno%kL7{W;o)9j#nQGH^fACXe4M zK|WX)W+B9r-#o@+xPN)yr}lFX)oj~kU>g2I!MP9ygZE};*!7W)t}a+wnvvw%=X+c8 zCMahLx*Ua^I}x~-j$Lt5Qc|K&4BOME6Tf_3PE7pz*~{keG14tgWT_&k6{CN_sE|-~ zn3KgAWwBLNoHVx0N4Is6r9-Q%%XrVC`1C)2P^(5VyfQzTff!))02rW*h zDobsb3JeI)_5vR5d!74xd&f%#sb^=XR78P}hPrmPGVJCJf)n?>j+t7w%6L}iJ+lzR zPi^;^v`~SXDD;tLap*wy7N11vtqNUUYM@oOAfv2cP1WM)%r!$-0wR@eQR=-<(%s3 z>PUSn+}9Q5;y88mmGtMcoe~`nqJ^-%>y1?vuH_Yk?-m2Y!Xi4zeCw(l)+Wgee20oj z^fB$#TZ#1*$~?p&Jp)qV$8tI>WYH^IuQiFMotza@ul4zquw@#i(i&t&Frv^%xO-zZ>w%|K;7-icF}iyPu6qbP>ii^!w`V&O->e|%IdE}-6sWR z+B=#3-A^WS$_uBq8*B_n%DX`TQ&SL?YW^5&Y{A+tO{FM(1$Li|>iz}j{s|`+?Vg;B zFRg*=>P=H8Ty)nHBZo`ETXQ7AMW-O-6yz@lV({Yl;)x)1oI(e>H6V_nCq{&8lf4-- z^_r-Bsl5I2JO8RtV%w=Pe_)`rtLgQtC!wJ~(yq;Iyq+eTIj&;&U+a~{bXebg#21WS zpdByJ!!Cd0MA+yeHcHKLs$}Cr;m9eHZ9H_9V_d_}-k;JdV*67l0}3Hadi)!@%e%9& zl7rbM2Dewh%yEvzeLt8f&Z|Dt0{D$iOb$5xKHZ9x`UA)QxKx{@T($_|X1)6ar)dx@ zGcKN)&hj5*NKr2@Ern7FjJLHhh>QCkwD*An{a|Gv!vP0J;P>*tyW+o58cs8Mr1 z0+B-;wAJ;$%F<8#aqr>XyN8zz)Oy_u>(1qU{6v94^~R%sLO-zn0o&iP7U0R1@BFV< z{z(@3R|E)n@oxzaJo#%9cl9n?b5~vf|0v-Ze`NkA&Xc<9IDd+QS;p1&&oBC~cmG`? z2mklC$rwEOAN|?C-mOa-xWY!-)3!-1VsAftz-c>qCZ{2myZMd@d|Z3{$sr&+mc>n4 zr#P*O%F6+sxs`wbBIK-V--J~mMI8AkbixZ5120zitd|Xl?ACF z#8DY`xW7-%C+xgY2D^9fo`&-Xcm-TMi+vPi6fWjZsX33C?i16NlsYjgri{iU9@b3A zI0sI?n~g1l@Gn2t*ZTN|<-qA6LBz1PGsO?0Qp&z_d=?+Vrg!pNT*Xn*yTNI!`b#?1 zj*nBzss(%FeDsQqID)_gcv)vdNH8 z;^~7gCakf;+yV&^>UH*!SpzfGo!LvO{d<0}p#YO=Zdh_<;py5nl|ypQ&GfD2k)a?& zv>FMt>O_IcHR#otEN?%XAWQ`+C`1@`TIk7dr{C5WT2z})_LVuTHRaP7-0+1BhkPv%4DS4Jgu^u7sAp5 zjdy8w{#NCi&ewX>`&`Gj)t-m=rEbas1?nkrg_x~2<|*@el!4V*Qfw^dVAy2*tzr+a z32%bF{;sTz&30eqo4unW2WLkj32-;oSoQqKv5awW(qR{mt|-kFL1gcC!bFk8>bH(> z1|2?DK^e*lI~lH5DDL3;j>?f4<`p4-t=@vSTS=mVq2GLFN-kGxnZzs(R)wU@#3HQ;ib>z>;+1H<`3GFu&w8P{NziB91|lKJ$cL7^+`J0 zF1G#xUR4wycS%sKK|wJl@q3I5V}{>!eenwHQ!iG)TgZD?GAv52BOIR1%elJ7U%5j= z<=xB%lIXUUO7|$vf>10PPm!AQ3!Y)Mi0sUT>GhH%)lIXI$)v;w+Z*DYZ5h%->o1SH zYR>`{tbS^xB&tV8zNUr2nqEqfiwk_Gu$B=0IXp7FzrR20fi3VkKOG_$sKSYuWxs>p zJ~f@rlX3Hng|<1^#UAfRel$i#1@=|M9uhpPx9%_-3f|mFK6EzH(znrbnP`Mj71p~j zwp9F9%l6Ee-tpq?=j_rgJxHAQrj5QG$KfznDPpRk922D->Cl_~>j{9atFDS!Crl4z z6{i3CZC+MguP|cOTK!u-LBB;!%v&}J`+(8*;C8JFZx?M$_6}Vn{da2$9DBO8m7(p) z)f6$8{ZXK@S>XRrO75y!Y@LmN$tbWA__G~DB_?7&7@NUGnL8=d@Hi~0XNwU-p{J+8 z>i>%|m{-9vr&&tfOn`g?d z>NuagMY8HHFqJ36`x;8N9td2%Xh2mzVxiaw(?1~Z(GNna6at;&4Kv1cWu}rU(=u05{PSjGszPme^V}j zo7D_jVw9_JjbJ><5$)i*k-u1fdxy*NeM6TXLo$uquVW2|mCxCT-E0@~yJLK-{Z#~T zskEbL=4geTYqrTw+!+1nxBY-6?!yb0Ipw%!fx}J zh42q_>1k?~y*vK$5-*5_g~df!uXAp$c4ZniWG=}{kPbY12Jv3BljgjhP^f$=m-j*@ z-a>O!sK{?$g5W!EIEM?9*;rV#i@i)Fn$xx z3h7jcTmR4ySFYQjp3~0VyIVe!Ezlmk%qd4Un>zh6c)3I0Bl*CVbx06~3RNaPbjFd|Yir%0ORsfbU$I}8bB8)~ zN1o@Cp{1MSh2qinzgr^2?Ok1M5uWr!GK+7h$1TJ-l!XKY?3&KvKm`YB+aQlF92n?9 zRXL1&Jij$-a9A&|y*lw%&F%hU?zMU86#%&ehlbes5|3Y#ArCy#Wh5?1KXV%rpPC9% z3gl$7Mt*8lZXVY3md0Bl3PU0ke|JF9WxWh|H1CYxf$#-Emnrs09@-w+9>9LP*nyzo zg1)nBTp5B=MPhz11zeiA$#Pd|BPRdc{7X^MQA|Q|F`q@;i2R&nRgBK? z&ZC!%ZP{B0NG%Ixl0Tt`NdWrirI$JO8a8Zu)YwSw<@s@Kz=2u)=(M4waR7!z4B9&8 zYq)rO`F4ydV-Z`Y{W__p*4OzT!~CX^UN=$JMWgmUuHR?%P`=MElO1ZUl_`XTgdUl3 z@6?|b-t)(Tv@Nn#_P0+TjF?H*)NVRCIwFctU%%R$4+Wts$v3AKk}Fa2G77;-Vg=Fz zRONhpTtmfo^u5;mDp6}QkhSgrYz4I(g~r6}cspOTyJN)UJG}Krrl$;+bz93{LeCx~ zpLiKm@OUtLhQ&?yH_30)M0JNJP4lFq{TCnI(u*#<4V9*mv@kJJ-69jMJ5hYdUCK)B zqBV6*e!z&oNq1!~bAMgO`F*YJTmRk{7I`epLd7aGqOJWh?B%6J{qhz;v$~lk;l89Y6khd!Q zqAlv)Ij3=f`F14U(jH!nD>vGm3mdVG#KRvw;mx*E$JMBH_iDhSsbe6BGt-_-j9x!pY zsGVe!?SX8Y7rhk{EU(O7YZa{R3my`{$8{>vz@^cBtmaG3bI>tkRY8SZe_!Ojk$oCK z7(miyS9GZ%j!psdz}NnnDh+NVvhmYtc~3{ObPSD+OpF50e0ezZN+fDkV%urp^wx7y*hn*7`{m}yfE=joWdQNaY?>pc z)N<`t8YM1TF4k*)>*vW_3zotFhn$OMp^GV{=Izf~kjXJXPo>}mlv*R>4&K7uFa%1s z*sCqS=zRO{g9fQFQ8?aS{w_~|F|#f3@bUJON_p=oq>z75?`N^PaRo;7{8&SiuJ6|2 zMU;?CX}c(!zrEwzFoO8|PVRaOT{HI_+sc%DNkPddPO^<{&jl^yF+rK`H1zvR9=nK+ zU9R#m#-{o;H8*T-u*l5!T$VOQs~5lUyqQ-t2#@CQW%dKg86|i3d|lWOLA-K&jIQIm zIB=WUPM#4Axmu*y3d&|F<0=9FY_a4pIPrDtcz%MKHXWlRXkE#N2h$IwXb4h^Iy&#P z5b#mj{!oTdxzAnD1xwqoT%I2xtYri5K6(tN%PuJyZd;a>_tDbV_xWwV@RC4$!kOUw z%&Ud2y+g92%+rbQG#r^YYti|>B$$}V3%iZI>6g}`cr;|>X@+X-3$yvw(wVG-8HI-`-YEau^+R`;dA3 z{*om*Rd&^kE0ICjLh1tc>CkWIMcR$^(=a}ga7*vX*^Kv&7ddM`hBBnRtC#XcYq9*> z6{S{)9l%Kjbwx}dwxs0hN}3A2hzE%ygw1_@M=Ltnt!>*|nzXQf!(TUhvmc$s>FXQv zk{6k~HxP9UUmXaJhllzqa0m0jD)qQ~Yhn_(#Yb~rD1(4xx#W6}r$cP5;L`ht$?E|w z@3Whr+-{*NT%GJ~B0t&vonc1T+wLB`_5h4Mi#+7VI+rSK%w#kIp`HS%8iMJ#_5Fsj z;p4|>b;zk;gf`9}kM77uP=N=jlb<9@ezyXL5Y;Nbp_ha5Pgu?9i%l2>?+a6O=lb8X zwTxey!Uu>KeI4YuG@m|ss{OU;rJeV6^^-~*-rlfK5wrgN>(;86eOd#c2;2lWk7u3l zT_td%U>Dux=C%rjZSU|lOitgy^x>W)WN{Dg2FCgh=R|CW)J)GDNqpYeAnzZvT1eZi z&AU5R^vb&oy)<8v@Fl(bn?Xbyyc(lo47ce1;Xs)`c`f_qNP7zeOv4==RQcq}1qF{c z>nN8nCVV7zMW=Cb6-X789dW&{$IS!i3y-b?UxU)BUnQ9>6fQ>;KQ^fxIZPC_dwmtQ z_4W0>F~xN8&3aGir-fPe^gqg%8=HMmz;SLsy8c12Q@+iNh%%Zyb*^j0bFsS2J1LcN zWohYwFt@YWbYc6q(9lxQ=5_!6B7|`E!0~{MG$be}DD7%q(fmmwqnIv(vi6p%o0HS| zPAxj*%NO!H0ELs7T~;Xhe}&0MeCNa!QOs_1J*WRJ)bM2O zRKhb{zjCt{jJ_tDsTE}aJW79&p|Z4yUlp9{4lb^#{2K$SvL*GX{R{Do6;9-ymXwQ( z2E_8EOkr`!#H6obk5oWVWuDiGB{ahMZ7r*==uuOYsu)~O1^cVcUeb8_vTGKMhmsUNQOV$ZppbTv4rvGkBL$bmxFLkJtqrNKU?6CFqJ!s`F(2fKu!nC(NAV91u1iBsoN2CbYknaY>uR4sP92oCBUzNpa8;m zhEvT))dC!acN3G*-I<>UFM0OlKn$NQ|vHh`8li((BhU|K= z*F0K{aoy($m~2xsQG9bDM?KI8&R%M_0-I+7o!WPG%u#j*(2hD{2S=v3J4gIA?*l@f zsgfdMN!&43aI05s)gw17PQ4^-l?699@_PIwX-iAVBs#Z9Ry>KB%*^}&EYN(k)@?h* z16d&PSr6hCR9Wr!CV$@h;*$rt4J)Vh#_q0>xNn0LwfEPU7#F~DTI`4bvF;FI_)gpL zK|;7&t;GC!3ALP=HibsQo8Zt$J>aFT{v^+i>p@V{_*YGkrz=3wWt;<^v#Vf=13(pK}xTyNolDCm|cI>`+)5G%Hjga7X_y1<;)rvf}xW`D+cHHkCmV|%BLb=`_u8@ z=IZ^4iBhSn?5o=zk780>UR^bKTJF%b zyOaACm!_<6D%Q^lGJv?++_aZQflTd&@~|5^4G}xG-ix_|)>zY=nw_dT2{&ui7*oHC z9`TGD7|sI`rrG83nS3nCO`1TcV%ee$V{FN*;$$G|+Fo4g-hS*)I2H&r=Tc=le|y(q zO}3SU8q1iBkTvd^$fWaI-CHCDaO)Q)Id9cglbtaT@g@To{hH>nO)T5+5G56znXtL2 zKcO-NYE=AvGYtG&TCDz#X*Kj+I6Y2d7%d+IpXmA0M6nV3!IK*|0{CN8l!vtoX++s) zT$5oIEHSb!0XHk~=C>E={9h}ITc}X^KD$UPs<&|^Q`Kej-`zX1(ALz{bdEGQOE15x z4fXK!kfr}t=8TM$QB(vs5tIF`O@)}WlLH&^dlf_j!eZ*|VrQm+SF1+Uz@V+>!Lh)ii~`}vkgr}~D_n$`EHyJuH*QHV%jV!LQ0FlpY%I@5 z00c|wXAs_b0+>KgEyPp;b8>PB7u<*U+ONXF{_Z!b|<@+QoEo#V?- z*fs)<;cuL`e%q%=8Ti&u>^b?ho!Pg}#AU6lB7Im2z#rha^g=3OyNnE$1O2UEl^@V(|n*b*4mosTmB zWwiLtZD+Q8m6q~72M8RtJo@qCx<-+{in8*s^Wfw{gt*6Qfg1!M~CpnRUK^&3ok^VYG-O%T1fFCBC+q`90+Dxo?WeVc|QPcDlH79}wz6<))8{-Zmm zawY&Py*z{To*3PjKK!{x?c~ZXpiT-6m@z@8SK@|re10@uC=(W0jU0sW-7qV0J?qf5 zbEbIMFT?N<*t~wflofXEZIiMo{tm)|zeTB}yGdG=B^-0Y&3S@hfWA}K*~yoWco^0` zIQSN%RzycfQ$o_sW?eT+Qsa<6ou;db4^mRayPTNy1g|>y*X-JMma!U|V z0kuN1$XFG{_%~3LozjuV-CHw`VAI^)zcbJvXfve>uN4DLq9fUT?ZS)K38qnH35oqS zKvzY1o$DznIYdx>3kKMuc&H#{pI5P)cbs_Qb?%!#S3^+QZzpPQA;4f zW@E9){@V00XZ>OG0jEML6>OwBxvy8Z(iC1)G?45z)8ygqJ?Uv}Ch=;x4(4Ptn>aq7 z5PRuDQS;gtKcuZ>XRsn|=GN~T&nY(HlqmhTN_D#xIlKxS{7ycbdeiD#C=rk66P(h< zw~miuWFSX`qZxLZOss33cgKz%PHoT*_>g&}CQlDln~*UDgoqio79@qmDYg3Vnuv22 z6%`TOp#U9OHhAa=5zOu>|Frc3?bFxMR2g@tU8T>x6qC^(MRO}`(NKD*H z12?D|fpCO&VPh|^W^iS}&RmkkzZsw3oh98Cj;+w;oEn=V)Ns4qcAcpLd$?1iV(`;% zVl)z4`1u0srgL$B+PF+%anjJXlT|Fy*|R}mTZluDyL9fohuWSVamrhkga9^yI9toC z%^l;0Ke3O!pL&~pFr$WL<~z$cP;)PTb4i-M4j$HYrBT{D?;+|_FE;0SF7y?cu5Qud zRT1JR-kJK9>FJ9%>()a@z$_1K_M^je^7dtZ_0uq{fk+z<` ze%i?TO@e#XR{hCSq*0j{M_nCpHI^9}#^Z%rtE;O})VZ&NQhr9dx?XNDXpxiaiRD05 z+Fh>5>i}|Fa#9}iwSj(J%xz~w49%YCZeCST8qSc*-tD*rS!bR6?2qB7}&3}cKa&+=t`xYmVB z?><3%Ly|W^UB$|c8;o>LmL;K6D0=5&X>gt86%(w16 z+`UN0{_)2dq{5iPG7i*}3}>c}Ad9XA`sJ@+&ih9PP@8#F;9bh$u3aCQZP8dc{j0&Q zN*FGUh*c$>e(V|SldWxb?k&(jm`W6t5*7WGlqsgWtpDWKN0pbJ0bQBgS$>wTzccz3 z5=dL9Oh%+pSe>(O)Ivs`^F>r(e|nRh{kL3()zYDnm5KTmJKx^+!QIY&YeVqlYvg@i zUL0M@;L~&nb!7lyvR2o{_ptLof4v<1ht#RN;}mwj2Q!HA%Bky()DyukYk_24>+U|# z-+i%345DDVKMfAv_s3cxr*I03{%@K!4$f{+v1=bNjtZRp@v`{2)pf$Fo)@6l+Wv+a z5iJmhQ;QedATc?q9Qnxva)<$sIQx_m8x z_~r4}DR2g3x|a9Vtkj8SYgMhT|2EFxqy@hlZ2wF3_aD)@>Q6`lka{O4Ay3W!|J9nW zKJEWVnf~Xq{UeC~qk8|3& zFBJBxpZ~9KT{t4CZ0`Pkq28RF9H;P8tM^dad?dn4K%#|?7;C$@iSvQOyKADh03LT2 zSgOxxT5A9K5Unl=uhUhCa6VIHLx>a$uFVmg)g=HLh=98ry~CI%eWctUA1IhYTKjI| zoPD(8!9@eX%?u>cxN01Pzw_;VMI+hSiCb@oQx(cgZxy=z>C{zhKO&)3;0BJ~Jk?@} zE;smz-JrYZ^n8YI;G#H{)a}2y<6^9NYFzv p{I64k@bC&x3AX>2;YUngUO!AY{u&}B&2se?87T$HLJ7l9{}&(?VXFWD literal 88958 zcmcG$1ymecpFK(xx8NQ;cnB^5(gA|IYjAg$h7cME!993zZQL~k3+`^grEzPl;Z^Sa z?##E|`_G!0x8AGOi&a!rpHs*8Z|{9NR7v3jCK@Rk5)u-ow3N6C64G;GB&5eUD35_F zceZDRNRN<^q{ZKdW*`H3Gtra5eNvN@=D-|PXcg)Xhb z?G7AWEj^aV;Dn3cf8o#dFGEOv4If3#51in4Kj$x{=z#b5>-d2X=esl6`)Ov0`_J7`p%yUhyQu$sx}g^H zpL-}Tr2jb;i$HL=Mp7uR!)A50%Po{Lj}zdPjr$&m;eR-Z*rz|2&Lz7~uCnL_*|(a)5<2st@`g4@0no z{=!BYWmF_VIYL5;`c3c%ac?N~6cW*6{~T#&d34@pLb{&zVbp0>6$uHv8y|3tg>?MO z0C)6gJD57?DbnbHYZedtmJ-JxeRntcPoC#UVjO~I7!)&X1sD(yq)#Fw%hKhfw5Zy2 zNS`tk7%%E>(H)Z?A&uIU^EEm5i+Lg=?ZmC7pBIb2sFDKm*I`FZA^|cwHGZ;6frQjZ zC;Xq)ELAQ=mn8($saKaE{VLoVNc_5l=cf=7k{~_q`Tw!r|E=twUdBBAryl;#t^QwB z>;I#Vh}DOM3E=;!#J|(~U#0e6bdZ00QKbJ>ll~8i^>1na51#(-`9g*vl^2GsR@`ON7X{bV`wE5@+yY^7J=HhIb+=yN3T*GNcz zf3wB`mI$5Go(l>Jg5pV&Mwnj?&IPOT+Rf2by;#a5U}0hpjf~8AYf+b+JcqE2&Y0ap zu4O|yF7&E!B18JSyvL4>&YS&=B_dX5Pyv^1;pX$X>51M*GF>$-ZliI?w1aZds^_kB2~wd8)hL zDfZ!)WBK>8`2Far`TpA8`$6W`TBqFy?Ni9jF5lPc2mb2T(z4>rh=H*6%nE|9x+AGN z-*#E6h#Utisc@GsRuLUNcApIBn_tuO9;ivk<3>n&yzmjfQmn0Xwbd_`o_3_(w6q_) z2;LIpV(9Ij_W8DA8uKGRgiv2A!{v0ZEG1Y*C}vHE&isj)ri#li zp#!g41?)#Fv^((BNiU`rH;%9$64U(GyH6Bd%S=Pko|dx6yKUm5MKC&7Tws|vUr{iB zM3Yd}Ke?z@k>33-Jwm47P|D8S>ZClTH?pNGa1&|n6cl_kjc=yKfAbsXK+n^kd!6@F zOjqey@4EX{0|wS9WtIwysx!xfkL`uYx}ghBbNv^IA-4xgeW)5!AXrid^&ofXB2=5` zUhvL8tf$>ttxA^=s?-P1KrHqAG}`$dx*dm2(R6i$7q`rt(ppI4;c;F*^1*L$*KVc| zU)_pm4}D_ll;OGNOIIq}T^{j=GMQZshN42D7(KgD-KveIvJWV zt};we6YbgAXNu-2^;hm{Nl7%07w`X!((f}TaQFw6C%-f548r@F2X2k;g(!L1o0vMm zpwEklX^Pu{fnmG-?_U(+xs{wZ+>0C zma;S4!Z^TZ+?q$7FRtHZZRkp)$H9vGStw)IVPMdu!KSw6SCyQ%Kh0x=v~$l-_dV%i zah4wm%4fxXVBE((Cg)|cjj{h){$V!XWF@nu$)_)#(CfN76d7M;F1F|h)vI1q`%y+G zFU9S>=VGKqK6L+5LUS6QZ-DH>^Wh4-#$O3Ty~gz{>S)i?i!xhlnEhvx4Szn5+)hRn zY`vzhyq%J1>>QN>>be~IN|rxMxxe%|79+%Rs`#gi&^KZ95F?iz*0R$4QsI+anPt-h zz9wD5tf@j-;~ZA-&!@vrSjsb5B1HGz+?)t)x9Rpx@v&d_iNDC0e-11Sqwv0j(>SK> zZHV0XcCf4DB{t>(k>jgXIYyb3!-e-`&I4zfgPtc-p|bp}>tvKJMTlmA}zy z#90#fU@JoRXjWC9kT^Jv%Kf18#Kkwfw`SQ*%_q4Rco_fOR%VV(ub*ZoT zGRf^04Sr}rpc5F8hKBxkH72P6pV6D<=eyNX5FF-&ts@cxWmQ(q{wDC)8wosW$iNmJ z$kW^xRMW&HxEY&GQQ%LyPry0o;9O9zKeafkm109B*eSOmW^vX~cgXu_-P#$i@8A$r zKZ%x%7?r9U4Yu!DpMzJUk{f!kG&U1v#ZMOzypbKDcI9$Gj}p_CPxdQo(q&S&+D?~b zrTYtFeG*Q~sZ>Z6hAw;Arf6(8Rpe=5eeGVSoJ`S!tejVe#3?>m>E+PG^#Ss=p|CSSYk5hY=|G)fGU zDJ0rFNKP~eM@38jbT@;C))z!cCDMGBL~_cYozwNRW56*xC`a+#xHKO6Vw?^W7g4ho z96f)0r%FitqKdWQX>eH5FY z98|f!tz@Q)xTKwypM&ElgSxUR+Zi?uJ<(i;!S=e1ojxl4HVLD`(?K~JK}DfFwDHji z`!)ET6O~#gn?<|UD{f}!0+DMNdFi5#imCKI=xQHgtfyR?mb*)V5NnHp0JDhSzGswQYjlUSoY8CLa?%CxdFoI zAWEOa=w>jP>lS{H$!nqmJP<1Ge0tS4B8s>}V6Q|GQ;v;E6wnp;UfqWTK0$6jKb7Ss zWxKb&!(Ks*E-o$(<0v0Y)Mk}ES|D%8#anm`m#+pXpcswLUmf;-pFpK{P5>oQzG=(|- z?h7bJGk%m`%V2gIK#_d<7X#xFNBn zjOZxlXs54%Pb!wOwD;XObmjRF?pUK_kEh-BB<7}b$Ov?=tr|~Tk&AOU`jZ@Vj24x)&t%!hPc zz^1v92YFyKJGX@L-}qODW>yv(jMn?HH7?JAv1uUp+{gAfEl$&e(=yyIvdubzS>RSh zR-4cI3R1X zlKON#nDIJ~z&_`A?Qxk2nQ-Gqe@9v%?3G%IMh@X^OIhB;LudPkWUt*XAsP1Sy|uaNGoq2dHo=PMHIM*5LB)rlgA?F zS+tN+5A0H~95SvbCbbrsLj;CI43xxWf!4XqDS}sMY@)u$l5H1pr!>~4YUCv1 z=c{k#&3a;SR*KD>*ezc1+UrCFgYu>cEfUQ1>R_U!bck$_PA~5KEH=4weaEeFKAxgoKOn0gl}&DZw)Pvh|)QA;0|wqjaD_;TWMjJUr5&(i|=4 zpI0fYg()3&tMWoaL*c7WpJMa0`@OG)khivMb|s`ka2V2g90tP_rYSuJXu$OcY)dv} zB>r1p3ZQKJK9*zemT8fju!`bjSg)rs1gi~nst-1FgksKWzA!dZE~8TS=ijUi+yc^J1woB?SKNM1=tM{|L7{!pzl0D`6UW!Cyyd!t9o6uCi=(2c zLMAmG{riir8(hGG+aIUELP}>&gS_wqm75Ux554gzjYQENJ0@awAlh5+oT3p+J;j%|i`QD1hC!h~92y z05k0z`Z{5IJ0cZchgezrqK%-qDVp3X;ft#`6#TBlRPZ$XP_NIv%f!VEoNk@#mU(#J z(J_7rEgIQO+(x*rt&FFjaR}AVFo>Eg%`H4FUA#jL!Z#Ex(8$OgCn3*bix|ciz~*j6 z+RZ4Fo}7=}9}f>J>zF``Q#>h6+rPzT8EQ~-UjOX!!;?H43ceZ(0cD*Wi%lEDGSKJE z^2{JO4pa^~q%GiQ12p&#!$dwaXRUpwlJG7rlz%y9O zvV6KwrsVWe+~N2Fg^bTsVF35DJ+{^PRaZUA7%%TklE(H?=<;vV35TuY0$>bgXmO`@ z8&&f|qI=qr&br-H6g+KF2!@ZBgghI5asjVO4kfQ4 zm7j_Ti|pJYCNyz;3iOzf&o=8CzFvkBGBvGziUP0AaPgU;Z7-oT*EJKz&*+_y8+prk zADOVkj>I!&)L^HE{^65;ym!odmow(=8amFvM7=RIvut~|6de3KrZ8?LH@_ZI-(9ahTQ-?tAZ z?7LqKVP+m)kHs?Ti6EYVe!)ZhtMPP1DLHR5U7C5*=BunPZPLr;`lnX<%dv);$3 zmPO-%SXm>G%iiqEO*Rtyj6@g8V$>Rpf`x!35cVr?C_>(CfJW3DK~&q4{^W5B&HdXO ziYQuQLCeQhZUgN(watgMQ3Jol%2$Dy3(R}+8xno}wtj~}d;KyFHYo6OE+_E?=rZ$R z;-{W4^XpT&yJZun1)^|pBOY0q1~;Gjh7g7wryk#mmzeZofcw^qrt2hwx8tp0g&a~W zio(0x###0_^hunGqu*?0Gb>^TZc**+o**IZbZU}PTyzv;v$D<;)59}zlvY9{Y-;Dr z+wTN9I2=~9gV@>GF`ftFIMW{VWcuHQKG{;<`e=1KmNr9{LdF842eChh_cxxerKwjN z9?A6C(nt!stm@vJnlYq1RcC)M&f2=@8@cq@(VSC4cnq@FN-iaAqv3*fSgU_- zOt41&wr`C%)c!tUEn6*XpXNdFW=QU3B9F`~AinLOfs7`T%MWb3yyw7}zp*&Dh$X&7 zQWP6qn3T17gC0v54o;qW183XN4#FVDOL8xEqv^YoJJ#N}405qQwDCrmXDho6l)ec4qdAlFZ3>7j9YcqXP}3uD?=a9vkWHab_0T$W`iy`mcSP8j-#3$o6;fZ(vW$b!BkGzLkQ6xmqUY`EPMTe3y5#5$qFV)^fO5d9gzwr&{i-2E14WrqePjh&^2F&DQA34 zyS)CX9budVN{keph6da7ex5z|Njy#skHA!M8eH8lpKVkW?*f1W0eyMa9?{wJQ*V(q zsUKgy$s$kM3{!%j@$m8qv8y>O7A--K!NJ(WRE!Fbh)uJlM_OKB?#EDgsOuyT1z z4H>C@3Hj#2R-+>y*Ainfy?Q+SnyVl+nGfHLG%|BZDc*{~d9*eE%`DHH=CiUkwLp1$ z*1oMT`r*aJEq-;pd}G#PUFnQ%$#q|Dex9E~@FF|6E7=ZEsMQ4HENe;g()vFZ5e%tG zuCz?Wf5oOmLK6ERARvSbDl=@i_oZK4)YH+iKJUDEqV1~gmeBT^)6>{>MpJBtN<2#( z`D=)7pj^||PUbuNwsl|HCWAM%FHPngSP~I+G_|keT%A8*UlZL9UxGd^RdUa4odAL>=BQcLrU>~LtAE`Jj;(Pi zd#B0|N`1({>orR`VR2w!P9-ZjRcT1PoAtV}NYH6?Tx97ndF_U9%u^~w^s3RNm-RK~ z$4@iwm9VIq?47D~2TH~+cvN01la&5+S;*V0R~w2rZTk8(ey8b<+h#>sl4ojzu8^QA zB{h^n2cn{*>37gz(xnGB2sJVv{*6|`Ws)F&5nEbfuA%TZnNJNt2?9MJJuC@%XliPb zE$<#9>R0?PPb~C(@1^Ql=a-i=aAH$pi#aAHCtZ(!bd;5UIxINN?V=-pj$NCnc$YI~ zX^?U(ULUXLvacv@lAc>>zf?KAIFqBq)V}=S0#@zpby;-qS z$uSBwD2pGH($G*l+Rd+7GOvaTC%~jC>8TwV85svhdAg80sXIr1bArd#Imx)QDa<~q zl8}(_{?UE2$AP`I^|)d@uZ;i7RdcS+IGKXjs8HzBx;mQeZ;&TLr-b0O;=TT|+)R=U z%4|{QQnr2Q(>hMn+f-A`V0yAtSXusuTW<)ivF{yu%dw(~i@u4=QV05jtMQn1gf+mO z$yWtStQ+u{oM60BdJK|ESIZtSQf5-aY6g^rqsbK`OVjs8Jy1OVjm$Z^@^WXAv!}9_ z(Dtw)uSw&`*+$k?&%+o)w2=d)fmFR-HlyS44=44~)I} zWGFsyxy9{{4=Q8DhDMHL)ch5!r?|M-4{@WerbaI8_}bW0 z+~}QA0ojM%xjC=sqwkpQ_OF+m=Z`}{qqh|DUmH_WtDLt6e7VjJnTlF_ z%LfEDy&|d>W_qN(tRT@{E!<=NeEkVC8i_ONej@&j(F1&>XA3$XYcIwFr0;2?=rT|p zDWr1TcRwh7XdlyO{Rktlh7rtr3&wTA)9;Lhxatnqx^T68uu?;ED#5|4I|^mJ(>-Hj zl{vVB=id>hB^>2iZEF#lD__zGnCb|(m--j#9ah4Bk=$PH!}P*=mRQQ$PKR}mTJHB? z6Bcsyb#?4)Y;Qe(e+QLm-Q0~EI6wZpY$e$h3uTYlG2>{1j$A9Lu9A?H23EFR zeJyC2`0ibQ_^P6VP^0_${Rpy;gqAM9bqpbn&OkS_v1_yBT1@pY z#;M`|+LokBCy={7LGdXFO4;8~CO26ZAjTH}=% z;6tJRG?2yAF~UW~R#s@{L=k@Q9_tTs_M|Jl`IxYpS<5aH|EJl87kDBGX6XCC>h0 zTlGXl4*Z0X%3- zBNtVf@8amH)$DgL&8F*du&A!7%SagZKX(^Zjvr*3~qO zoP8y^6F{}Ub)(3IX@6P7k3B&vWg|}~$20KopceEn}~^8at5o0z6vtFznn!V%?L zXAJpA|0;lIh0IskI){a^$;nAFW>zMqr8c_{E-qY`=t!TIBM*eCt6lD|Zw0;SWj+oVQMcgzrPWg!1GvVrb{=9A`d4sXr@ghV zC<3rxW@dH^O^Bg-gYC0tmh}pTONf)1Bbpd`o%K~~syv;Gp)5>oGp`(E$Pfy7iAqWN zroN%iT4wyuL9-m9I(C1-gYhj{WncmMOFmnj&1BcW;tK3JYIEM$o=6ou3`I(Z1K>1ZdIPpPXEsznB+&(eM~GPhTg9Mdz*a4Kr7TeLY(7nX?2&7~1i zV6rhiVYR#c@LGTk^g-XM?#D3R2S02pE)CsdS23*19hj6756vj(BB3(*iD$K?ON0=@SqczvFRH<1Db$LuFDKl_+Vw z-Po*V#1?Z|Q|-gg%n>=Sa=0ykMN0d^6B12}H-}uYGLP0+j!^*ha$}7>G9tCY@HqVp zWcKdjqPjm{F!6^D`Rm9I8>cHC{M=+A?5-?{KYoo*ZF{GiT2xe-gM))zUAd#9(Yyl~ zSQsxMg*?`vUoQ;}eRYFA3`Neq7a+K%8u1=lnwRD?`=&YiG3ilz+VYy!x}dk%w>DEd zRYcFuf%G+Bun+c$%jxy*Btxe+TCpst>G(6F3E|T8RDvP@uKidR0@j}Nr}p*|9eg2t z3RJ=xzgFR~b`Hf%YzhMO&gc~vp`+GnJQBOtC&>E2gQgXYO^F~u`qUvG5!9U85F-rmc{ zaHXh14BqVIBEouGm$z7*^H?{{_oW_o@zEqiE34{iwAmw$(~WI*=`{Nlx{s8L@iA`F zj|~`@W94gT0NLJAjq@vdvl|cY@->c@DDDfFpb7oU&$3gJt70YNya$n92m7{|aP(%s zLz-_gS4wMi5_sd-A`+I%wdnH;$D=)mi8C0_U-)un=3X0leb_MFD`2F zB)S*QPj#{rm=D9L6Aabw{E_`B_#9{Ve}}g2yrE^8?x&p{pG)f4dL!_dto(IGg6h=J z)1_wXJl2Q)fTlG$5m!Kb{>C1 zCSS9b#b>^L30FekqXoSgwUcVhRVq@c5s;NR1Y&l9A3Wb`b{yyBP*$skP^pD^GjX7L zbGFgu%yT}6eEv~cMo)U+8Frcjx$>L!rGX69Z2OkB?7j2q+?J+bgJR{%ltKQ04>$?K zql(HrQ#SF=0k=>n@Gg<^)-Ohn!U~vRf-&)+|q?S+%2^VPvF#ed%oMyO#y+%rWyaci)KBB zg<2c<1w;7 z33N|kgMx+adzNCRIHt`<>`!#s2{r_GX1H_{tmy;YZ^_&NUApo~=g}YGyx@fIpitmhzmb-4ZI!c(wfPN)>2om1?tUa<^|~?fDIqgoYbZvaJ%`w^!BWaQB z92|TA+&{h4sc>v-Zca5KiIC(HaxPnqzjTn~${V&9t#;ipwU)8b)KFvBz4;l&@mADm z?P<>3iYJ0iNT`|Mi0z1N=8`*jHS;0J)sBpjCxV$8=36l$MVc_}O&BDjfD*$3(4K+^ z?jE^&O;t-4d{_L)Sn$;OW)M;k2qkOu(3JO;>vPT>bAZN}if7(qu>+acq^T~@wAf%) zN$n=>cW%G-Bt)tB0|&%1348?XfBbl~wY8OHl=X0jaIoIH_=NP3VPL7Ca2Rq_{sJus zop6XKtoz#QTGSW5UN}Y@(VnZJ*4BLYmN)e+xQE-u#>vU4FW?DsxxTCxVu=sheu4)Y z?nwh+|7d(sT*1&c4P8~Ay~*A3_M4Svmc$TjviUK=BzB*&q9ScR1~sF19eYzNK3-mI zIyIjL*45)RkDqyz`KhXw``?^c+&}Ug$S{=28GBJiU#i<|zz|0g;cs;FQZ?Vf&Cl=t z_6YoI`;3S+&4IqIe`Mg{EULY=MbJMdI+VcOwf*k&tUQn2Y{Eie;sAzFMh4RGyD=*Q&;=-iKfrhBluL?Kxna@i3Gbg$%O-k*1rZcTdf(>f0MH zZa$uF@l5d1QbMNxb#i@uk`W`3{`ZOArc1uwctM}FLE|-s)Y*Aso0A{!o{VHRe~g!6 zW@jI6PN=sW`@DS2goA#NjNxve9h~q(L-a($3LwZwzE|rcwa0kKLzS-^LrCO_P;IT$ zbexCtqVG+{Ps>gl6K(T>VVIxg44=cKynNF8RPwS8Ma;y>GZIk}tEhV&kzd)+FB!Vg z;}xL$$tZbVV<`rO+BNc%jD?JBl^53X__!^Dp*V`ws}^eMc1t3W5TnRc>zKcmW?A+m zMmjo?`we5zm)6$wxH#4Q&Dq;%srlo9=j>6sP>?#+B!aO?z$%1o^J5MK&liz?# z{0$z^7s3$-9uGF{B=Q!|I&z=R@Oy7T0+nyd;NECTk?0kcg!~+ztkA0-NzJk-6 zrQ|%HByPwg`9=zWmq#Z?F{$i_LNVSMou=))MyS-eRc$u|Ped^tcj{dyL1=7N-rOdw<8eojPYFnIg#6Lqpd2^WL${AjbtO((Wa0kAR;$UqBGy4|RA9RFOQgG^@ z1Ez112Q6vBy5C^PGxP{fykF}v^f!48xCtIKW~S)rOr@kOf2EeUPwf)hG#UEO{V;vq ze!Hp*=12vin3m?dR?&wmn`qSA*X=GH{#7Cx*t)3(x#nQ`^2;F#N9Y zq9VE=nqppIzz`m2aHez(n6LIb%8>L-+ho7z6`eCOi^Lsui;_s%R80M)Kon^J;^8% z@ZJU!B7T1U4|aEGMFijLa!`xYRi$xztu?xAFWfbWdMw*^9u;wu+U?JB`XjFE0y@f~JqF3MA03-q@iBq7yAaBOb)0Nd#gLhMyJavyb!#Yas zOA*EoiigGpM7<&kS%CkRunsi8jz1^=I5=V9qpJC~ zw!m3wS}iGNkPMV#0Q#%V`hY+uhN8f-m=EuZT}#L~u`Ohc@^n0`ye8vnf&)fQPA-TG zlVj4*)J*#J?R>Gy0@LA}v%5PfkGH9utK7D6X{fcewQ8(tBD>G9KXOa0M4#v9&f+^fM^csk9 zao-id6VdnH*;7NJ@iaX$q-Q?>J`nAg&P+F)=34e-Ro~XiM*9j2@zjf_=VV*!!VOtP z8o#QIN0Tw-YnzSz0H8z@#J|%l^l0uC|`^RjY zt0+=FG_*7{e{(q#1Vj}}&6lR8rk6)Y6E5g%EG&!=PZHPRnUm8~ZaJ;^#CYpk9bH}B z9K*?j{e5N@W&$+&xW3oL9lgDtwNPj`1gEG*8opGI=k@~B6G3Is@6MDUvsAro#VtrJ zUZm!h?IdT|S#|0R`eIRXJKWN8*KSZ~J9gLqm6N zWO{ScN%4!%5cE;vaNsL^8Lyr^6?Ke+;g4s~BJSYPtS8EA?X2Sjy`NC?tSlN6FQ+rI z5(G5+tBntkBSEhYsgIy~)HtLoFgn7pBaLMMYAIC4uDyV2z&5SOC57LF3QGD-e z-*~{n&P)UG^uFLqp%kX$_mpzfoIcL5AmsDli}`h0-hS_Qosd*oTIy={O%8Br?4NJB zIslse0({XFPgggja}^RClp*TzE>o(1V1R>})%#*9$)}G|A}gLEbU}xKDI5rP=quE1 zCe30$BZ%%vcSwc3kLA$vx@qgyxN;DSAyvs92SNSj&aOu?W*5ySezn~78G`4b&eE$V zEJD_#CHvyTsvxR$7av*Aayj7+mwgV$_9e|oJF5>h^C^Q`3(+LNGy_hMQ(?tQoAn!+ z=*bS>gKgV(nyb^1uYXCG2$a+)NH0-{sKZM6%pd+o**)j6t`!74PFJYw=PRrf>EvF* z2EbBBDNncSr&Mdnb5oRO7Uo2fCod0)-aIO`td8yJc`hrveq^)-^+w{<@!LKWPlrTb z_WIh{S^_h*jh?NZyuADjS?@W|B*MVX%4n>Zgsu7X$<s4rsKSj0IxXKPYDOL>F2%C7(zcgyPc!-PR4@)|S?q$9 zA(_L)rhg|>_JI~3POiu?g0)0%y>>NyfIrcVj4r$WI$IYLOq%U7V3#S!&Ir-c(n@(@ zB2*I$7p|_D+m8~xwme92lg$J#FD=>q{vPli6pTU6|4HWHj;pP?emJt{z{kES6bj|@ zKL0f$dRtXhCBs@r1JUF6?-(9d@bEElaJ<_a24s5+hVqa1Gw;SH#%4xnZ5N8%gt^{H zJqRg|yeb}q7BXc}-cP=T*(UNGM$Y|MM={6n`HJJbbIO38G3EGBWD$pz(Nfnul)6Rv z11=U++E_d)*wB5oB8+?lM96OCHo#k2;Oie$k5H@tT-H_Ft`-J(APz&*idLCsgN~ZWF$j zrV@e|Kkv&&R|ag>J27I|_stv=rRO~Y&qG$S4y7(O`BbwF98J8~eD zh`#Bk?c->9l#@6KOu(Q3Rb~aX4IcWuXS=n0ve3y1<9F=L=dpgWNDX(T6 zE?__-CVz>K#%|C+EMUVxz47xp3`$jg@|~M|%A6=~kfbtv?vz*ektY7e+f!@F8Y7xr z;BP?=Rp9XW+xQ5a?7JfpyC&*5Eeu|2@qEi=usdyBM`XBXu!4!&Bt9Salme>${!90g zr0Fg?ic!nSNduhaT5Edht}A;XH1(3HMG?h>E$)QPX;Xbpejgiq3LQlhr_@U{>fB2H zYG$5!c3VPO%N)x>27I)Hj`4_9s4_E-+8<1cG|qTr!^Owv{}2er$n1?zi*ErhM5gjN zki0c%AlvoclC`!L*PzK^#3Q@6>M{T0bzswhS02?2Pt6T;`~U=RramT?6+@#dJyR=_ zb+FcZhOjAfE*ZM?ePXkJ&Db`CVb`zy90%w0TzKJ?=v zB^0yn8lqVYLh9VIq6{44ZoC&Dabffb4b_!J%Z>wm#d4ccX4}$156n+cs0TI(0Em=E5W6(Z?g+|>Bf3kAvh|uMPn-wd8+zs)DXwU zv@+Iuuq|O2;CiGTyYSjw=HZTs`*TbHlpZM=S^NE-KcJ7ax+7B9?KSHs=MJ`2@pKm) ziK2)F)_z{2$5fkdZEPe0E~^Z26?qc^YVq0Wt5Hx!g4`ILDtGk*8Ejj_3h2T)FUJZu zM0?w&T0L#FETB0z!|k-_27V^ufbzIP|E2Ov zj>%`7|=?rZpUvLSltF5Jmm|B>T z$&U;UzPXzi>x%)59&Ag6zyid=!UEGvO0+LmRndo*Ym`@26(i7JZ!Y=gbAd6Bro*+h zwU?R=!S!nturIE4BUvE`yEN zx_8f)Q+=$h)V`h$%4$${U~svtXT_?cAw;1XIsVY-=|nRF=W{BD%asNw24RSiIXjb- za(^z*Gb?kDuK0Re*YI*AD9hB0%zvZ0x>|x}fcMA-#*P&k35nj{C(qdhEW{622P2gF zod94hE+#a}>|K{n;m-R0U5Pot-QGn|&4MpOyQ#hwGlG;yB?%rLWV+*5BO zO+B=3=ikSBFbkMHeNoC@0_F?po9>>UWQmAQhYz+HL93#Inqt)WOE&7NVD)~I1wgV# zZx*s*!fNLgGMHLaN=mh4a2A&m|FgN_CM7(45P0*~fqvZe?Z~6M@7@I@vFjf;N&qm* z$NO@j+QK2{-AMY~-5tO(1$RGv9FUoLwA^}2_afBj-ew5fru}9OHvATzy8r_WytI&9 zeZ>hyD{E^(->Y9s{V=~4d1-~+8`8bhvE%G)H!gSM_8LO~epd{EDL93*@eU?rZTYL^ zp;N46t%Z1J%xe7(QFj_Ei&yvINf=Z}1u$Q=sh89cwP{&PHRhF4kEJ8k7;unLPrap^ zYO2cgH?V!!p9>BaO}FG0hWHT^p)TR&s^Ee2J zh{zP=6h}S+%++wZTj8+1aJ8$0OjM ziQuxPwmY5a#z*Y$f)CbTUB14tps%2X%!cWacT?6S4l5Jg+a~f1Q(U)@z!W@zy%Z?a z7xFxiXm2ab1kh%M&j3<@FtP&>YK_P4|J;%9CUSp8T6{oD`1*RyXSk4F2S7(RZL&?r z_ITWRMQa+X$^JadLBBlkK{OpuJ0Rlf?J|hu;p00J{q7H|2X%r*GTW8Z)b@8gw{gcp zQKcc#i)nhGUl)^VA`h1oe1T7%2v(kN8~WaN!Zq3aeY(#{Cc2DE?(RN6xy4pU6>;;Q z3`p!wY=s}*I6G)f!&{_&{&;nKp%DKsgG zF{D$yL|t;EffHO}=y`IDy=+)}ci>SV|4>eXO`i6~?X=@bh~NxAgU0L_;*D_^N++0t zpmt?^e66w@U1&xGdogtKZ059#BeN>rVfr#b%w8fu&HZ+bx$$K>POM6_eP1gWSx*A& zCIPmJakv)QVVYr|Sf1};69-3`um(2i0TV3^HlL&u*#p6d`pVR$k zuk@0N_qhA5@MZsPpco42GfprUU`5yMjS0i``HNB# zfdL?kn)>}bc{0sw%h%}&QLW@c9?Rs2E=AF6xrdvOqa%z_9Jh}SjET$TnY|B)qY^gV zI=iJN`8>78#>Tdb;Z&I*WBXi=3Tp0Qwnof{b8#o6pg(LMb(M*ZY-l-MS(*WoSORow*g{sq=7xfmK{Wufg!BTy28bH>~ zc6c>Q+^YW|I6DLnB;lx~)DD%*YvnxIT`_ULqpa>6m_C?`Im6BW9)@93Yp_w8ESuq1 zoKrb`9&Kr10blZ1Ih<{CftM{^Two^B44mNg45fRxSRO>`jyjhO(^PtH<{qqQ1?ddq1vSh%OK3dN-ZZ($Yn5t~!5w7t4ta zwiv%u%+H^hJFrMRw6l9pD~a{6edPbX6Xdzo3JXXl+*&JOXvNFKjnnr<=u8VRCzu-O`w2@Om3XcgaU86+rWSSNe?W9t*mS;pi!WJLg|=_U|tgo_p+p z)-A4pjmu~hJWv+&FMz)QhnEx+6Uot^u3OUMtvw#7Nkv+3FzZnOZj|{gm7wQU)XKwR ztA_)cPbewks$s7)#fpl7zz^*W^QiIX!so)SX!d!Jh@TvmXZ)V1mo3sSQ5OcHvHe397Z*&vBYco^I z!p4Ldg~G?jSN^J;-~HSZsLWJ{)&RgqfbvILuX+{{DEeN1$|PYqu-jPU%G4-yg%u(f+f**SUt zcSibS^f0d|j$}@ctkTBo*#NK(bo!k1D>^#XhXZ$4r{wypB9b%0lw_xDcUvpl!uHJn zNCSwqymhSCcw}QNcaD{XUD|^3EcZ98A|ful_*~@5SH~N~a4i?VZ0kM6}RN zsb9F&-Y}sBWf#6I&-CzL%4AL`)}X;%nBBRbw5}yGQNRPGq@>8{L3HwHAWB8?gqbAF z>P1S6^`!r*t3O-E^}CF#)6?`FwoG>m@E7|x(SvAMDi24t0J}J(a}yUdX+2d$i<}n` zF|p06^lOuW{qq2qMdh4VBb$9tb3zXWMb!&Y% zRimD}=ZZaH>RZ1(48pKHjS@M8r&3X1;Cqg)w6=s)=#;fOv%2+K?PTuLdOrg=u(A5@ z<4-SY8tz^~EbgvfxViZTh!v?3Zf*nJLFa)zLV*@CIWt8e+AuQI$>q888uUd5nm;D! zID7l-x$3LL&46eoE2yq>%({IXZ6m| zQoF%xUqrJxIXkuKt!O(o8ISvS!%z0cswT7{y$7QzzR1YeSXkEf*5fY@mZ7P^7!e<< zhYnr-`E`e<^>vS;%a)gTf1A4lL8`fehG^jJJfL0(rsJ%wZbC+31~Ubg<7)$tyL88WR=q$ZHew|L`4 zO3>_m{Zo|8zbPYaboeux{ZdOpDwp-0I9ky7%~ILSzpC$#jb>+#_M40w{8Yqe?;AAm z$--di#apu}b@il?UT)sW8L47Ls<&O6$<1EGjIrafaT{;uA|^MQ*(9iwGKE~;(E0Kk zzO=0to3Z&PNgCAey%%SvMXxx{Y`qLqu~O}L!NYetp{Cng?b!SC!^YK&@|jaj??;i^ zkqE<<{73bXf3&%>>%7D#U8b2i_-mN`0OJsm!Fx&^$OS!7Dm>lHSK%6`krCuS`FU7;g3NV%*7F!k8;5Euo z-h2ycY`6#qY{bS{6d`tzydlH8S(d`?i?hIPN7Oim??HY3v=XKFezk`e5jAA~_a}@g zl@1|rfPe~P>UI9AlDZKg%oR|O;BEkK_@xZ=X%AYgo^ATC2$sDU#%w}wdKOrnvM)Bc%^pQYT;jB=?LtsSi9bMAHsfr z+eNot)&ZE;n{)iZBB$%i)d!>r*~|wZuu+rFa@TJ($YCH`Yj6#_22N{`%{KKZ>V#rU z;@%;b?gQq5RszWj_3k)1~Ka*sh=7>pTYqs@ETX ze_CLAZe;@Jr9H1ZW-sDc@x4RlBII!W&F>Ofk)QQOqdXla`SAJKVX%p?D{5yaAIZG$vH*hN@CKwxU9 zwBhB+#l@uda}-oUE}PR|%GxgW8zIS{a9GasCb0vS=NsCX7C*?Hyfw6xba z+)_LOlrrf2gUe5mF*(o+8$F6yZgcxKxtG$8Dq8X9EM2~NiyVwFTZPpC*fJjv=`VAkOG5lFi{drDnE z_c8xP=3Z(oJ;P?<`~5UuO#J{d0i@Uga___bOK*^}+Vzrt;Fi8)nWEN-TUZ4*M*ezN z*N6su2phP~fM%u2z6SZVa%7^BLLsb+RN~-ceyd`noLY|3FB)FIp@Du{o7=Bm?JTI1-veU{lwJ=B^Li>{r#cGl4IXNEKx%~6T&u%P!vPz6<+46Fm*%O8FJ@~|)b(wWlRn^;Y3OAL@^v=Lqjbd3| zpl5J#P-45X3{s4jtahJ0AB!5x&!-9&u5h@S@kqJhQb@av32SU@Jf9poJ!iBYwmP8q zycc$$;6!4ilbZ8niWMn!=5%|byf$^}d5@Gf8t0AiFM-F+!MI{o)~rks;U=4&!Y93w z8Ivc`;z(&YX52o~IUm)oGiNk3X$oi^UmKhTp%v{X><-@DL61K=-xQww>W*8F;m9SR zk^dt^I3u3TIl+HL3^!!pv}2iE)DZdVqrmm zg9Yo5m$6$`YDHts*dqV-mpMHuCMG+KdTtI%pne^vmf>NyPu-Zhq*)}3Zbzs?Zo`%^ zoTQsJZlZUb-TZ2PJb;XBp|^i~P({}4sd`o=+7kY6Y4w<9vy=1CkN0kT{5(9@kcB%* zDKUIJj;Ri3RT?ZJp)9#@0@t>gSQF85YFNqElmA93k@s*2>CvrS$_EBk|0J=LfU==3SM)RKuV9udW_Nrh3Az z;DpVDjMwLaB-B(?^DZ~EY>D`s)FU%p^|_@KCfZ};1L{^HiV~7E9 z$zAhpYE9_HGC5XaVq$D`^cFpvWXVtmD@&6*SG|!@WR=guz}9Bf2s*5p9rE><%xM)a zrmez>wUUMU^bv}OjlMuO@pR8fK}~16hs00!bNH_)u z2Vc=^u<(W}sDPl;2Gl^&i{@eWJMoh1;N4H%4ap>v4%c@qE#^wC*AS^uC2kvU4Ig`z z89?_UJ`1+DwY6)xZ;kJ%*U$rZ`Az~lK1bzwPE|}yjVE0qhw(BseN(yq3D!-`36LA; zSyx`1w2U=Xz=52VjCPKl)qvDP+uXd10?`zbCC@ZOvQ zO}homaI~+ad2U!JVr#$%mKV^yQl#3_WprjGdZFQDsY}zi8+c0~NcloM73Dy9gH6mJ z>F4;Xzqe>6u(puJ_O-WJdg{LBGTS`T?bh}<({C^PdLT>{4Mg2`DjlT+YBeAy$?V8{ zrRCCkh(<#YT4Ogo($RtCRWUX(L8c+WK()2J%;eI)393sq&NmF6OKvyHwxDSwoXGIG z%@Wsh6q>E9E|2TU>TwmY2J3%HB&0{0z+^8D)B@bENPzcyfJTm-%FGj0$}&~j@@eJz zq-AbQSVnA3Qf+x8Mq6gZda&~_Feuvh6t=DUW+!|)kTusfb`8bbP)5%*o1#*Xja=16@2c1xeC8 z>Cp+QC%v`3Ek3))+;m(8z7&Z)+=G&y9@5+Evb7egRx%G((pur-qZ7N$>IfE2G}i4x zsR-CexcQRYGDu~cue=XK?pwlPBG=C5`vm)JY;5)XK)bm&RZV;&Tz~ty%NOW+B*}=q zS=iXt^){~0HndHuVE#ENn`!pcG&CgMPOh%FQs~I2$jGRSNpczr!1LYI*c2r00(VN~ zu`VBYns#sZmMW_*Bl8x;%`#&Hk~$?U^%{BcL* z`DbQpyMMa~t9vU@mtL^Uz@6q=ELcnl&nm1eOdfBSg$gb~hkRCvIXfp`$vAm+e!A#6 zw8%~4C;&!E`#Jgg&(Rq3zsrOoQQ}zvfq{uE#u0p-i@Ur1D@Sx%jZ+41jkhW!Q19?q zjE3?rj^;OP9B#XG{v#p<)f~ee1s@P{WGz10Pq19^q z#SV$4Q)y3TwFO6Kb4F+X=(sf8_}lJ0q0QjLjrgb2hilhUZUPYoyP-ebd%hj-0?9#P zI0RZT_<#GG+0AOzhUXN*OB&3jLJxSrA|cd4@wXzfCd1eoh_}G%MU$u4;go%3f9%&6 zM{Xopoh75vvv$K#>Eh-3I0-RJMCMPj^3qC%gmm=)4YGqRAvrkwLvdJ5h074;67-{q z#hg!6#wc-0WsS{V!P3oOtCF&+!`b(nq4@d-0NeCCBtQHmE+!q$CVsIyUjgh-?OU;Q z8fx`s#yiJ^9;d*vO9Gs>EM---5KqOMPveN-L1pe%(E^_;+Y4=)@qa?NSp6=LzIP;{ z|6ArGU2V4O>3EE{|9F;r55jnwrr6k@ehqFc|Pi?I$G^pfR zwo(QKk?K_4?oH|R^SP@Bh|NPyP6z&HHd$uyyt5Zi^lztP?H<}lB<>&+xfMSm>PRvL zp<4pTODhmlw{+v;M+f?G1DJd)ZejkYAL2o%4>i5;r#&7Fcq}%Y#cQ#WTIU`w8>f|G z)nG8cklcNtmN&i`YTAWrtQ_p4^bMK~B5|&3IqG8)< zWkU<>F)=**LEhFN{Xyzz9^44^e!g&NvlHPuK1@Ky5GtVc4_;Q`RCDopJrMHG3U1!~ zaM>EkV$|(M!DAV+s*kOiY;JvDqQ4Po3nOvb=@)Q%dy1mPxCgW5o$$m;j#eFhGeG=pRm708UI+|JDTwd% zmf$-E#TB&Be2$A}u5H`yS4o3^5}QzDrY6VJbblT!Zpy4WiVM~;O8$ehPjYhd=boU` zAht;&YT`cd$|VWy-r0iB?DtFzo?8_75tsV4H54O`4vzA32SCYvY0Wg%37;N#hXG1R z{7O-;D=vUNb70UHwl0T-~GM%9NEnL!yqFvV!~Rm?`Xk4{$HJ`h(Wd#;kyp3 zb8ig9vc!tuoTliV{Y`CP1`h0MGU-~z*62qPd|zR< z7Y1`&M8;g;l_68SaJ#IbfkBs0cV!(Kx8tsvS>s77jcPR@3xbQjbUFfp-r0sU)BiqyVtt}uRz*M>9nZWYO ziha%N(rL4dYSyMUi@$b&(f?u6-8x&1)t_KfpiQ|Xn=I`7uBMZg97fI+YezBU>f|OI z)XJyrG_Gxdpb5uG*1De&TbRxfHZUMA|_>5EcVh{>^s~{%_R=-o+EuhLx;B`EY zky2MH`I$SMG4FI1401jlQ|tMn#wOSE0HJj96t{HD;L%|NmNkVcovSD9B+-ka?%Oo<%XAIlf;m7daX@HJd(ZPXeZH(3Jb?50RiddBpU92sK zfd*YD{bECXJqjv1xAXN_XJBmg-c(`W*@edGy#WNcC`Kpqv$NMhmohVRRh|6=cu4h_ zuu~y5m6fjMSoIV1y@Z@oMg6ub?E*OBD%owTkOs?c!Gj=atOCTBOL5av zQ%+8`0I`FDjBEjg|G?!J2iQ})B{?G%qeNZUpXkWhqrby|Qls}G1)68AR5v^03xyLc ze4r9bt~Am*{CN*nrCwtOt+G|tG*4EcK|?`t+db-53q3Knv}`fBNhnIJ0qw?&=3*_7 z9DszctsBb$#>g8I5@PD9)?nUO4HP?*6Jy6tJp7(5gcH|`>jpd{t7%?-XapR&u1)KH ztgu-|w(I6J+m{Zm@Y9ZT!uS7+k@-atv2M1_^!0}o?NQX2_i8Am9|G~N=E|nbyXh=# zOC3^Q*JGHYX%06HPnEJ^QeR(2hO%4EyIqZ}2_m8aT`pI$$Kv5vEt|Jau=t!uf3zND zbeD8!F4@#QL`1#b_S6PKgX(1c?@3FGix@9oDyqr#t{k-yx23vlQ#OBD@bB6ZVy=da z_Sv4H7qH({7>~9jf?%}44<2x#1kyft*Yjtm?0fr%Dar|2A5)54d)0BpnmD!JGAfup#uoQgz_7It9d^`d?0(0HrCnrw!kAxN4o9q=*-UJRa91L>)6TF8hts@;Oq*nug})n!w^R-y}P!zW%Wpd=Owvs;`HE zGMne^ZB1sT!rjE-vuBo#SM~AO4LcQpr(vk4NAR7?D5}-^uLk=K58PiUMuDoTI-8Al z@BT*#4`eOKs}(7cMc#Mq$jHnu?R9M{f-so$L=P*m<=HJ~5|X3ME5`$|Xycg7%iq2E zF?v8^8!fJ{YCH}8dv|E^5I(02t+USM_wQ_}a%0g=mbuN%9YUjL5WpHyQsyC|1)GsKPjcEuWX$36Kn08wnm zPslQC56?KaytZGt6@;Uv)b!=sHuK-AXuq}rT2#dZBEC`eL=XWskMROz7%KfE?M3N^Clg})H+ zzTI6$qR;M*TA!>JtsOMxK!}6zICTbQ4lZ_qa}aV~a(B@8xxGy+kU){@CfyG`^m5mB z;ZR9JxN=rX>%fzeuOBAPMOO7zC}sP)eLwr${+#1 z#pP9w_Ja1+XLBj@9bqI~99U0Iff{|dKpK+Iwxv$Pd~ zLh0bW0a#Kug8=p9RX;8c0U4J1R`yd`@AkLg&v!E#SfBa`Ff!`~7Ik8hg=k+bl)cXw z1&^{@hqJp!=#2ZD5{67dZ^MW4!dpLaaB`!fq7!=`UR@t}YVhi=pPpBUCeOlMwzjYV z;ShFvg9v~MK0ci@XZ(XfNuPCf2R<~S ze{R~JL0rXUb~p`)?&EFZ8R(zVpqu3XdEli;t?hdA@@BkBt;oW2_>a$s1j|mm zXD~lPw|nyr{A5j8tFoGnpwb2wWc>MaTKy#Oby6?De*lk)y&@i3PO@-mWhEYWaVLhC zb(t~>e9_L@fi2N|sxB7et z`R)u*v&?HAg#-pN*G=M^Yfi(#l2R+XeNX9x zo^EVu6)0@)&(wn>8Nv@2cMQZJ>KGm-Y8>~9v|GQcu@evlS8r1LwgfY9hyQ1{Q7>rw zU6`MTxWKk@P!PMAfNB^PPnx-gsxJ#qSA)EE<)mXhg8^scL$AjRwv^yogNHGM?@EB* zJ2EF0s3hgNii?Y3@D>z#*^3i|&5db@k@)-fpOqax?u3F05dy-Tr*3!f&Gub0zh7C0h05&A@y-D@1^A}vKwI+-HANfHQ?K`G791;|gOlBIzm3{jdCLyg zl6c32H%`hP>;dgZ3eSaDvNzONXV-d$Rjn zK4v7ksbyDXSwlC_Cr5(vzhX(WXA<-E4!b~5AQEzukVM-*EwW2!Y<-A;(jK-L?L4f z%|dFt9|jL0p11QaJkQj2bnLj?dq!oR3n)N=iNTu@tED=5L@k^s)qvx|!c%Zfw_*0p*Kx|Z37`FSm2 zK~{V|yUfg5p^N&)MxE|+&30cOKVP4?+FI#@o4#1Jy=K>&U1pOpBpn9f5qCa>h=>TW zKEv$~mHiNhn}0rU_D|7E-oe)4ui{xIERF<@z>=ol8w3<8g@xCPUj;V?Q(DyHfBtBF z8?J9()nTP$AZ%bDybkP!!2kRN9X;?hpQNLqJiEB0CZjZ3M3sPWui=)&9qb8i-R+~aFb43G2D^@kfxdyr$+EZkmRMk;!Tp|+TT);@h> zW&dfXy~}}iL(Dt15CeAzV?LnDhTw5uzC-)c8_)~npPP4fj*f8fpYUA*{3r=Y?{~~^ zxc^xISvDwb^kC{5RZCOTI$eISg~soHnz6OT<+T){wu~T3I03AFcFYM-oL^`w`qf(i#fXPH_XRsd-6G1 zeG$(BH#SHrR`;Q2fgCfm}<}>;6Gr0))c`0etTBF-Y8Qx)9bv+JgX2#E} zx{o72vt$i>rygC`AH<;4_6H{i) z?c^bp(Y@z=)GxapUH-wxNFo_~7~-@LQ4v&H;+1sr|58YS;A`NjWQ_b);tUWP0P712}qf)z4}d}k@jEg;eZ zu|;Cbr}=@z8gSZlc6F`kZB)`#JC9C?MUp!-@rQ`&smfY!oKpZ!?zqWpU0q&X-Lz4_ zrB?8dzTvRqET?_QZPu_68X8(jBTNo(g`#TAWt7nTd&+naRI(Y8B|A+uHpc>eD2zDE^*k^ZxDDg9tY9 z;C=m*qx0>LsC<0$ZXUQ$cJQUw2el6SDzixXd~*smXM2>;a~}b0h951T z-}dzM2(rt!|3VhMTM+z!d0LsS6v`R;`q^+A4#kV!^JH}a0l&)T5i;-HnYx?-^+-JY z`+R$@rr<8L(}Xd~MoP*&r)F(MG4=LrgWo%BsfaBtIWZrJ=z~wE@Z^cW)6WaDlef1X z!bR@vD8hk^UMl#6`}NeW{46xyuR}#r+#sa;^Hm!c3%6sayY8N72xV25Rg>C8w8Baz z*V}=2%>5zuegAAiptx%iG7-;TO1*)@|9p(LFYe=qj_(Z1Gwku~`R?oMtL-swRns=l zH=iLK%#bv)g(58`X6$y&N*9|U@#p1L&8BRG18-5XGdO8p-`0Ivx^BL*PM5pbTi~@? z{36{yHLO}+e(~^+Yv`N?YECLAei3fywMDOZ9}pyKseH_B@Z4+0M`-sG2_od5uo72N zI=95V+l;x%y8GwagAdqQSFz#W`mWEi<-`DvE^eu9TVXakfzLw_iUGPjcF5mHa|ZFI zJP$1@N~g?jOzA>*pslz&rw)5lS>q<0cfpw_%MuV=hXTeqN#egIWlimyE&4v+BLDFt? z@}FCap|~-1b!~V_9lK(*g#`jFphxDZT19Se1LFOS@q(iwYGWg#%guHf(Ed(!cJL1j ztnh%jb0gGh>{FLCnXv#&`;NGTM8na)JS;bj7}zzm^!gol|?bw z_k;NSRaLaxB}U^GnZyri?JW)<+6sj4S_u+9On2Oy&1dwe$O-AhXqrl;q`YoyfFAh-X31WdELo^)09R z>`xy7Sw%(n%b{l(gJ`qW`=w=N$S634N(JCqHkiyySY_D)p7rJB32g4-_jR*|KYH{) z1-~4G@CD;0?OX73VNNctbHc?8TuN24NN@yaY01EMLR;(w@lTvIGWaODKoU(!>!oEcSfv&FZ@2?~a6S1?d#cMw% zN)rSb7<9q&#+7Y7N$9OJK;VS(mjUZ>7|7EBt$#^SBovQN9O>DUNB56!l`<-h=?SeQ>&V&sIg#N4??VG`{0Z<=-M|5 z%?#Oh*9nt2KEprlCiH}gN`OfvRWl&sOy;rfEe*=bT0<;QLc_(HU+1zrLa0@5w}3f5 zO_m0Xxr)k4k{U}Zbu7Zvp{c1{`pDSW8kt31Ro^o|@fgxT#`tx#uHhkDa zaW9V7aKct1mB^dt>T*;dZP82d033%|Bssv0#14ubtiajB&KxgkYnuX;G@f4;K<*oq z;xfPWq2$iFsG$MZc;#we5r{>Ab9A5&Qc_azS0zOt?dND8GlP|tk;*S9fuRG0_3AoM z=G!+ft{U!{)Fx1OS-S4rvplGP@G=|j7rN4|!3mY&mre^6XY?0z{YA< z5{k!J?Xd@%H!AAt%8H8iM;T+W|IH)-@^zP*zf1wo<{u!y^!~8`+?#!2WWVul_wpz2 zixdfx&;K^v?e+bH1q80e?dXEO1&@o(aNQ{^gfCjS!g{n@!X82^2nem8X2HLJ=4NJ| z&Q04g6C1|G$EUIG8+4y115}fb&xgny$+l_)1OXj-6k;%{YU#QgCFlAtKpYO@-&4i1 z!Q>Qoa5(w-`ZYD^a2j)mE0hECDll&lhmVB}>k7Ym`7)G{zj(6vvy{|c3e**(0tHkB zp!^J!c?JL>fA)DU29dOeW6#9EPTw3kSlqGH<|!eIEKQ}QW?@+!-+otKBB`;I4drCe z!Ds$AW^C_F##792_msd*MkEUbM zNj{A9wIfA~-@rVzx3mm1bM8(q$XL+qC>nS2Sn~(SJrxR~Wh9*f-y0G&KKwnZjDYgr zV0mp(BBcaj@VH zrQ^I9*)nx)O1gdsLzT~qp0w1Qb}`v&l%=0iufxGAFwULxSSM%3VxKZ(fk=LwRf&#s z=PO{)AFfrt!h4$ap9ytX3L5sNcRu{7@yA*R(+?_IL3NA*7{Ph(WrCYIwA$kBrv`A3pPjXKLX- zzqh3}g^6c1HChw2GR*e1H>5qy69GdbutMBFq<+{2O?d}KpF#$M$mj#3VoiQ;$KO*3 zfoQIbkB<*@eEj{z($a{G!uTk;sS7GAPYX&~Ku-ZdEd%p^0mfOV98X2>{ZYL3jgfvd zmj(Jl`=V5eyg5+%djj?mxDAXeG;o%>o<-S{he;NEadZ^YgctaDs~|z2N#J{M5p6m&|f%qj^I@_ z_Kc8g@IU%3iG5cyd+kY0=y_mT`S7P8(XC0NQUN{_No{ep#YHi%NmYVyM%9hEV9`vO zyP@RiG*GC1eTTuuVz-q3h=%~Od7GCZ5|YA+59VT67#OUt?NxnRa&vTK&&YpkICtf8 zv^xsHZhmS1spR&I+maU2(?fa}>_-*bVJ_jnOjLv}0g-9S>>RqU{|f?Hs4-jssaBM@Phi2$TwMh)hH;k#&Q4>s4=}o` zOPVD$U#*39FE5%&NDDI`bNo@HM6ArRWgwfoT3_%?04do4ezE=k z)6>`z1&F*q8aXz@DLIU4~hx=JxmC+uckhMKmDM;AfuM8?C@yWN$@%_dk1|x+Z za-yU>`gZuxP{*hje7Ji32D+~Le?WY%p-$szVMgK<`?(e+qfyL({yF(*tRa-2tHCIz z>5*4jZd+H#nB=bqZo76Yr>CcjoGdxf4lGa(TyjeGWGVC`1qm~*Up+=O+>%(jId_&C z)<|?8XERgk+d?`3;5Iu)EaWE+r>x7Or3x&H~+Rz={Q4o%Z> zS9;RRRQRW(i#?QT0C3LE9_nET2)QNpqy?%$v^13m4Q!?)i$y<4WQ;`E()pYcM#eZs zL#p(c2at$ZQKK6m7Kf#T9ApgTHRfQn(h}!+i8!D68hb+5G8HZ<7)+X^Dr~UO1)3V>R>>c<*G~0F$DiXRIsf;iFL=gC{mOP2N+*&6ffzT3)gdC`Jlgwb?S97o4!}$bD6wp?*276{HU{ke1CPs?_NC42 zAbj?9=-R=}RkwEEf_p=Tspvf$>dfcBV5Ovm@paJn!h)8C=&!J_p)huBPt(aks0`1Q z6DXRX5#HQ(k7HrgwB&@~@}Cc|17)MT2>Ej{XfrLaV0SZY70NzfPRGpHWbyU8c~>PI z?ZQl#a|ug&*0+E2UcOh(vJ1`TDG;sUVOi?^42tWx5#xNNigG@dCW9fcof!dTY~fP9 zz$N4f#Sph_9D4FvIoyip?$YuFl~jSOI@))P^QRUKxk~D>iJ$gazJO$Vq~j}Qf!v@1 zS;{9XpXHzas6JZ}xUY772MrPeF}}J!#1tI$)n>DDo*Q5A)U|uEY4@6u@r9#k&7|>i zd}oJz^j5I0OB_C{%c{(d2S6qcr_vx1Z9p^^<4-^!Y~|$W>ZDAeKa4C(SFPm}U1u1H zoS&cv- zcmkTkHgy{OKLHQ)7ijXPdVY?VJ^o+_uw%aL<#n~SK?KaO-8x&CP=p51pmK0?V|!R? zfglSw7k3Ks$4&NQKD_mIJKhozmjB}+qDZPg$gCGn4p59$<>eD2lFIF-ZDD;V0!X3V zuIurnpp(QqIS3j8AU1)}x=vWt0ag#d;s^PM7&C{n+ym2|KEw%TbJ&e*xxH2=i>#XV zLf-fK&&)5VZ5QBxeax#YeNaPqhCgH(7kaYjo2^C!33zYF#xlMia0QHA#&Vl)!G{HP zxN8KyclqnS#|3^IY!+?dp~mCezHOBWK`P8;%o3lV^`#^yOhjt8p8Q6(=DfAl1iy8_ ztXNqRBZK~rgA1EIlqicnt4^t21*b6%039#|nQt}_)9mgh=|kwv-JfvSA1jc@%wne1 zl5evd_3Oa$@@GMX2zt3=39L(OyCn8YZJpo0BXQwR=0geRF>UEY_0Pt*0t$jJfEKT<^M|KG zSa~;F2=83FPiBBcqp--*QxgdlHLs{>;MXs+ic;mi*-<1SF!w4L6%1JkuIGo&L51G- zwcNrQ^7F;DeS7q9f-}ZA0UMDyD^4yiss|{IgM>3M1){{Gml`@heDKDQDe(}t1-(2E zhb?As<-lY+pr`H|9W|=Y$uYFA5&~B!rL()c+bE!_Dh}gqG}}_G`gZ=MR|ALCfJ}yf^UiW;e;G&iE&iZjJi#_2`wN`Yx=0_c=%5lBX(Iz&|O- zsyhRFE=uCb9Up8Uv_aX7zoJ=-YKwPPE8gkb#?(R^cMxf*f%=U9P4^LAlPS~e>qKp} znAxyc9ekc>0rMl?c!4ZdEPVxSenVWg0Z9r?xGsb|AU)X}!=u@FFNZt@mjw5oAr-s|P{p;qnd z>CJC%SUisO_d0*Tm;k9|x|1)GG4s6)9wExj}$b6{-7p&u+153YWi z*!g@0pjVK61oTVoyc$iSp5SSsmrUe*;j6Y!=BTK&&EL*DQ1*HV2mqZ=UjdN<$vrF0 z`b9nQnWU4YQ9h|RhiXkYwO#d5qsLBw2Bj|Jlsjdk{1%G9_wu^=!;J@u^7zmozu&Ul z&GhaCI4q@(1o-veG#xjy0p-XAzZ2xpxwuEH2k2gE0cpCS;b;gUA^|5nI(IGZ1*_pJJyKg9IG5GmW@Fbti+XCPG~tBW%1@;qE{oaW(5B&Qu0+aa(Els}uv+o=SgjWdT=E%4*+bYI^#pG)1u-Lg%r;&O zA@e>9XlRli{T;`-fEourqfH06k>45)^!M}t9;D#gJij(>uR#)TqnY|x)eKoo0bmyc zYEl-gSYr!SPij>bEAQra?pjQf6A$k#I^RMbzjNV;wS&++M+f8ePgtRdid6o1#cPgM&xMCX7=e;IXxu88xwDWH*Te93!$6 zQH8r^kQl)LJwwAlp&0b7#cAj`2l^v|*(3}(O>Lq@9Bnhv?Y=FR#|3LL(*V2)AH1`$ zTw`O)4gTyHP1QMn(o!(idzGQZk`R8{Mn~`>VD0CUVowgM3F%~)W`Ppxi!64m`O?rh zPx6B>2WeW*@B9xf?yyknj*QR8(4>WlP>z)&pa?*h+tJ zXGE||jM5GiZLtV1JUvK%pV7OVPQk*p;2b7gls^!lD86IvD=FgC4GMG67zqLG`y5PI z*udP}JgybJ=--f37QB5ayso&Y9zg>TNT@#_E+dZs|Ju~>3WVOhthRQ_su;*a*%F}! z#X=Vi*5WqGgs}#Oh9XE}4Jp~FBVhK^%#Zal92 z6MG)#jWXK-?;W?FIUnyeGF9j0PP%weQ2n7{MWOZnuZ;l}$MOOm<_AQX&3V2Ua`Z)M za=CbHC9!6Fkk(l)r+`Y51_l@TBOm+#UlaKEN0QAd7sYaF>W<8^!*6={Z`J3Awe9YX zL(SiK8~IPL=?MomOS}k>8j8veQ1>ys1=|D!FWzIBT(OxP^IG*wbZaz6 zRn`Ca>67yV?#0zrK*DUCw^fL%rOjrS)@C9#I~v^7zU6{>-V!V+32;{lYpNxC-WViB z|FrPFV{vD!5_Vr)UWkANAh(_b6dVWP4$Nfyc$-sP=F%D$Tepj)oA>>a4O9j@$8>LR z+Azx@4Kr7j(gM8i@Jy9Ms!S6t8EXYDuq`WZutbJj7R_UPidKGgBDGz7T@smMSX$m* zpBWxHy$*zUUfxo*%m4g(gP0?g3fk}QHrhn}1j_s+3rB&+_69)9^YSwos|7zhoW{w5 z!{lS00uUQntpXDrxp&ZEs*oqum}EX5GMLEpoi1EYVAG5ENC>{usaj6{or<_9WL zP%S!c!*1QH$5&`BduPgOE#qTjr>iRxSafO(j`Mp5s7FVW&6m4<{I}P%oSaubZ@s`| z8Vx#j3JRkm%s^TiHJf(zq{Vq{EHifxn^@UK|4#QSr6tvZ0zo`Sl((hzbm!rgx}9; z*gq1?Tx&w+u0D{I6wNIts3|Xh@_M%gRPdfY4ZO!kk=S(YHkfOUeQlzRQJ1X)(-%5A zI=~DAiSV`3&r2uk5LO`ePS|lUtx-E^kYW#~tVjfDGMh9Zu{=wsVaIAz=C}l@0@Xm9 z#_q}sj79AC%&(cx42)gIuK4$1b&1X5Ga3*b8}CN8u9e(eN7up_2 zXQ`fd7RJWY@nKWpAI;v%Z?m|g{v8n-1+nt>i<3kcrfT8fU>(o*zPvO8ehl(ERKKV= z+a=-os+le-oM;2m(u<4N+k1Eb&lLOOZ6kleKeL~J$8U3ldFEb4L#hloEkop(Vi(;& zFb+s>5U8tT_4LMz{T#sYP}cG=Uqd|b@%8<|is0h>_I^?mM$kRC31O(F>8xuOXhfh^ z0djdf*4N%82XFMkUA%yn`kp3m3QY`rfb`FhDJK<4FeU@=YqLnRmYyvE$p|Qxb%zj? z6c>A(6tNF+tbXPDa;A1&RauD(NNXN9!IO3}&JPiOSMb5R{d~yc(oYc{z4>nJE0DvF zPfkAl(@mRy0e!us(y>%zh36g6NbQu|lD~KY)Gn)=j2T03{xz2B%luCWWD){-wKr72 z@H&VW?vRF$KtQ+2g#>=`@7Hhi$X~w&lDfZzBVcIMy_@^5*F9b6?-yv?|J~h1_}AwH zfAw#pq!9MM{+`}s(XCfD4BiCk08F0zZGic2KkQ#068z2o{nm%2KQfSi1mYx^1X5Wl zPDCk;N3*hwizn`^iN}iG?h!+6?lgR0h`{gBIt?Oh@$2?aakG7XEr>bJ2+_2dmpiX<( zjf%_iRk$6jUXy~gthJL1!p{N3;0HnS#nn#V|xp1s6=Bu2?uMpboeVVlRg zEs@t<@g&kP)M+BZGC2G37wPZ*A)GdwzlwmoLOiReXzwt*9k3YsbnIuX%z!#doy{_y z!&xU)k_3pU#urK^nwrBMRv`ShjT&O26w5> z8b^<(UafMNY*`(Dn+8<&ur%3&T5W16Nf|MSk4aSKa)d zsbAEjFjg^)s_M}%#_96&aRD{P#qLaGSlFAq7>0PsSIV(q2tAm)o_3=Z*@X3t{Is%s zrnjp5wx6o0%6jWg^%&p_95{DoTD|436iMxH2KpwyXAyI=Z5=|3VDYX z5r&9%CX4wUPNzUWJ4Z6HO^?Yk6FLP9EpuzNcSX0U@$uL*-7g+KMjL7}iegKU^XTfw z24tLC!!l~+Vz=_`NY``AZ*bu}CHE>zP2sHpYcJn{Cn>ojxh3mlo}QOAtJk>nA)ale zt9}*QrWL;J$T=qk0rj;wewZ@q@#-jNC}T3=lxqtFbBk0CN%&08-D_T^p*LsaZH~d~ zy%d92EjRpNh)RVteNA3h*hxsUF!gfGOYP7gGqLfQ>k>iep zg+`dTI2b>?&^0hfp49azUJfBG5p3&Zd6n=6ZkG%VO4A(4cP(j=ZCDrpBq#`Qfj+w0 zGj2Lk_L|Pz(Z8b0@pC*?S@!i2%WxC$5hU&Qck6)Stq|rkK6IE?Pr20I<`??9S?4NX z;Lbt|b}h&9?92Q_pez`#rrqz^-QM+qUB6aFG^}BvUf0ukx-J<1h4n`NKGTo0T{%^G zFih@S+Gs;Ln$+<+CgnyK3~8vbR;b6Dh4=fmv}B?LABevv^b5*BSergh|2F=NhZSeId}1lnZl` zD~jMoPx;BhY*}1FRcEbLq%h@#Hf+N3$e+Ap-^=G*YfV)-KrVx~%>Y{?AT!pmn>bt# zpjF&!U=09*qexgXLP)zp9z+N-(e461KDWceepTjSaRjca5m>6sZKBl@sQ64 z{Xe328n?rMtVkoB8%R&w1D9 z;)iPuOzhcl@9Vz8Zo(9yTGf}+w)OX9hvBk)gpPV5AML^>u3uCMzXG^m?P|fh^`l$t-QB265>1M3L2~+9MMNhaHL|Ee&BNhlV zVsWk66ID=+mO^>YH3e*;7&S$aU{*UEDjEhR*8#2`lj^n6J7CSMZ*FR6st>_LhqhlB ztJn`O|8CPP(R+u3u%Te@01Z&!n3Zho}HkFVozT34n0bAnQ9+oIAs zm!T&p3*buh+1XU@N1iY{aRP(nT~{}vq-sHx*QBY{4p*;@%LAU`tB%#v`Xy{Y|MBwFulC-`#KdK&kxWM!xNy(cnCR;0q>7c;J3EJ` zk@EYtyN}MW*XJu{n64(ztI;ZrwEBK>2=mH|BoWQ_gT8-%avrDLdOFJpm@+)SRub?H z$}4N{fCmBJV55B2j}LkA$PG6&zKqiga4@IesLLUxJJim=UR89+nwcU-nz3s^>uV3~ zS5pYzt?e*pmbGPbqEd1MD%4Uu;EKr@y7c9HO@<$%dU#*&4;onXg2BUnfy~+w0(Qq| zvUIxo(OjB~3eLnF)lO?iDsMiBp#*xG0Xl^Mdr_#^2sC`hX`WjWKC}o8=5?t-hqSI) zR3$bv3VtxRc+o}A=KOnqp{2eIJ-PA7C*^nYoxNL9&0Bxf+{44zJXu)x*~3e`(Wf;#-zdS10DM_$FteFIdz7Kya5}2JkT8s{?ZX?#{-H9}dv{ zmzH=WDCM(yevApsmGpr=v^_;08yjm~I6vs^Hf3OC_48TSqot+A6uyr(9fLotwxV)k zOX-Q$PdP1l?4>V9FK=b|MM_Ya{4n!Ao8WD3ZVn0`W*=7MSXomRo!rc?d8~AaZcmL( z{j{gA>sF|c26SSrt$uQjp^FD9K0ZDsCL6oKnrpHmL7pb6knd<*(hPwLNRR?C9vP|~ z38f0T+*BM~-c+AD6=(I54}Vd`P@C2h<|LF+8!;gY(9LL2&W-0(w6W#>Hp=X$t`-q)BTRIv zX6<9t%N0~5J*?+AF;5SH{FJh$N*SHdGqgTfoX~q`219sjChp+0qrTPm!kh_5Nc7Ih z!GSVH2Iy=(O%51u)fH*?7CQ+DB8Z8Jrdlcz`;1wjHRHgn0?6i%+d9DBcH_EM`CF9E_x0H&Y+lTLhW2~e>4`JxCfBQha&$Lz<%uR{m{n*vzhJR70KxA*o8 zb_I{C!!!6De{%&R#-VTTf=kq78U+dUGHPDy8I90DqW^dXGkyNwWST`( zyh$Jw$8)N=F43FIusG#dk$*?_)*VdFHCCGaAZKK-(2Koyhc*hktYks#9$e;kCBRRcj zzAs4lh)*a1ye+u0xg5w%!`=1d=~Lu-Sy&>Q%^EN9jdu%RDY_cUIX;ICWN>+4XRI6b z##&)OaupM5@-)7Sl3516Oc>1OMBc%3qu$$3`(D@PrSKc8L+dA|Nm>dqiE{ohgrbt+ z=qzB#YV*}9$8?ku^j5R~5Qo1HEp?>uECccLWXUDwmG7R}*3Eu+Fe=+G`i3{ocp+jq zH6IA)>H-EiEmH@3NrmsbOY$78j^60P`KskcR1@Djn6&Dib3C^put7VxU)^&Y!b^w4 z-clE+%P|h2WXQ8Yu)a~>ecp+9hQH@DG)BOUq(ESDNqDxj?Nqf+5yodhfC546U6 z25c{YXpGOIL%>5{#IHIUm1UyA&}c8Z|U} zo<#@Z?xG_Y0i@TN7V=ZQe>SZWVwrRt$eltL^nWvkBQy>HsNg!k+OQW*=%mbQ^5CIs z7d2%;TAzF!j^+}rmPI-*d4+-q%tVHSuEK36`$k6CeRb}EcBaaW2y7)?l3hc>@HE?C zK034fuY{<;pkTMMUFjW=FyBs1?YTCxaq@z630p_iQ*f(hW@gUK%>`4$KGXYB7Ooo) z5YLdWR2WH|?^-&ylefuR;47nD@(l++Sbpdw6A50}ro~!NRW2uc%+?*Ycyj%vd0TrY z?xMSJ^JQEm2|{4~F(-_pUO{kuw^y&QcV`>#FM25X@dE!l2X_6qSxIjzqZdShlPtUU zACGuT5+hj$A2I#kf?$oDl*9D56gqW5Jb3!lu`<>#%&O)3N*T>d2dkZ1J^n0c#F(t$ zS#EA)Lf%r%T}%A&k}NhMtnn>JvXx}nDd`jFDz{%(MAcwSNJpn(HrZiv5`2w;3E@i| zB#{TtBuRn&3_W+COS7msp9?Ry-?M?BJCUb_-I!sJqu?l}hpfD+0vzdcW|`hnv~fV4 zN~f5LD=M7Pvxd$~TpvR zWLB_K*q7OG6=faCTRNfBTY!8d6^cBP0Bq$?vb8cyzgfZZ)F9d=1~$yW`N4~h%B(|? zmlz<>F#gGa;s9iMCD)(`j!aBM1b(49qWT9bF)?g%QX@ryXI_}gd+aqu(jV@|{Q9=& zKN~n(g@$Yd@87=%DKN7%Ap8_W5sYEyt#}A-X2@zSFu!aqUkm$_o02XJwGn$fN&9hi z>CLRxYnQNu0I3)AXqVO*plfTtQTrlfpFGRhda(EW}JXXIQ|iL1g|eqPSq9 z@a(ZPJJ)kyiM(j~`d+Z{af)G|;ACvAWxU0?f zq4_Y(yQgzecU;5jns(Q52sZ7hRToI;;EsmFsN1S#M0_<_A#%`&Dv|xYyr_V0g5Fx+ z$^$9gWEYo5vmhEwn)WlNx0R+tw^n})4DOG@y5#ZXX+{BQWA8+o958uBLG4hM?Er<6 zLfk!fd2=&KL~eG>GROiR(zsu|rfbl}`uzPvfv-@d%T+?o-lk~IoSAklVGLzgH@9un zKwGQ~gaYRUV+q;E)Y<9Vu9ql?333Ib>u$YI2pSM{sVSdUyK&1Ke`50qia%xB(lzudWsG&=+I~zADS~^tAbH`1f)X zJJYkSeqLYY*^7blrSb1bpRvqDacHOJx(5JhP9sx5GnygxoCyWSo~npHnWRxYQLIdI z8ws6eytQAd&0|`-2pNL>p6c$_9DF2$P6F{B$~+ zSPQ99GJmV}PuPLoPN*dVA^}@l#aJPopa1;_PPQVTaQL&Nd%3?TP8CDuvH!9o=cKs( zPHQB*Fd_y=(LPm5ij?Xx%B*JGmCL)zyV#XXd7x}2bq5!X$(4BZl3-o%CmQ69?2PFe zj?w0}+v`nv&tAC51VIHP$X0=xQryM4nn0_fFyBebip&^SS(qWMikOlM7by+_A}lP- zc6h*r94P}%)tZoyzR%)b;a7R+H_Ka@zWPw;!*V||DJdyE?sEt(9`57G9XZZ{7aDN2 z!q%xR!nUQB9 zY{Uycvyn%YrR$!J(Q}=&o{7sFt3t5ky|~?&Qb&?JsaK;eZH!eS7WK}B5)=xoi*4{- zN>a|g))wo+ey)|S3(F;pMlTixJY41yPC!MajqIxjVj!PovLTg1}-yDMS(G>jKa@Y3Pqa3yf#={u)#Eq(hF=X{2W_#{MZY`*&^E{705x}k|l_2b0_ z(4Y6Nyv7th#fp(>vKeu=C&55|O_zGj)j`%==@pQka&mh5W8|9it6_yq%x0?BwAr`)}AUug-}NgubM#5y@nWNV zbt7|s9|y%gM?v_VX7e1fX^oShGeL96V-mr@O<`I5vgSj*G%Ip?n_w>&QiRFqdzjt9 zE0?pq5=YPWdGXKcirkgwWI=sr|EwU3OMf^ z32I47ac!oWWVS7^uO{3MceNr9QBgYAE}~V=*dE#z;H#~zuAMLT|Ly%NGc>GpVq|3{ zPk|*TFL!Y$D;FyN_wPVeS5Z+h`^9N9I$9blVEC=(Wv9hHzNZ>S4yZSlxYByqn zB9xt7ZrQ0~tUnzUYcS~aa~d~O^+eBjb_rOXCOp@cw(-}eLPbGjEd3dDZ=9goR%1Vo zROkDxjKaI*US83j=Ay0+Gc?y3(J759~tR$Zoubkf4Vsb zzC9wA6|F#9Ib+?CQ{LPx3;|+qPtOZ}(b8P#pTWUNV%olF0>PJG=sk2faO-dQs!`a%t5=tFqm?(Ap%;X@ zRfbgg64h;mlH3>DXxY3U5zrPs2iWASn98Ct(QEb&nvk)14>)$D!e?~rVr8$*ZLNQ- zlN4JrbbG!HdJzR9C*y_N&dh<6*r3Im|G+Z!upKm>fH(F~nu}8zwh_)6xVJVpUhQ56 z0BMk-Kqi;X?0|G#Vqzj085y{+9PYGe7d8C41>UlrbKE>U7--Mu8@) z^7ccN6q{nm3JeK?uZa)zYrHUBSy@?7*bX#)A|{6P{uvl!c^TFPxVZJ{OOcy>eoC%` z(KJv!D6pi6jf-W~uj3Oo6nG?DL=sJV)Xh~D<~>_^*RRKs+n?24A&!B#%1C^UmSgjF+CA zJ;o!-r+o>O3_IgAj2)zz4D{+l<&6(S6<`_r{{uCqbK|}dpNy)bB9N++2o&D!W=tnUq;wr?wxFq zV%y_{>KMiG!`{s?SmH>AUX)PE9t~ zZ02$U9l)yg@vp*Pz(zc|j^bq5Kfn{_``EQL)m&ja@WKjDxU6?@u|$5@9$_N(%fCOvcsMw8%FNpq7rab3w2eRfh>3as-f(1w(+r;#+5{w&iiChx2HvfK z?`>{#^TNPDadq_tIIBk3M?f9@z?+^DQ*a~gq0JA`YxRW+mdOC}rhS+NKOf)q&E3QO z*bo3Hfmi_zNxa~L5=N%b@`|!@a*NNNYrP&BCpVJv{)sCU9e*>`)j;47CdW&dPVu7O zK_~UpTV&3s8Fjy^Gl}{h*C$@pI#>HNc{j7i^32y=y!Fx&t6ofW`+_adi|U?d{n>Yzsa* zN5OtyekRGg@MoWa!zOUZ`FpMa>Fws;S!~azgl;0ovG38MaVU zC(OqXuqL`SLm4)bte0Qr(ZT#bjK!Tv@t}`7)PG?ZvTQtS8=^q60%J3l%|Zxpa!RH} zD0z9!{96kx)p!4trwG}3$jf3t{_*j#CyKPyeI*-+pR22(^`0Bq$ap&!K|mWkQ|GaE zSv==lK4ZDSgi~bEpX-x8jKO4pzH^=22wf(4% zCv)U6?_*LA8O-m;t9b9743C^!w^i`r??R!>e=L`ITlwzT#V2pT8Q3Ctfe&e7+be&$ zAIlza_86;qi#Z`d6+1cLx0_$7>nBh-G1h&tvr~Ao7jS3sBhaO^7=|spF&?6Jce#%f zB7ug~F}QfRB2Au=$!EaZPD~JCb70MDvseYP2|%d5GirmokiLve@2_ADbiM=_CHv)e zOwO!n265|*`x?va;^WqmFPU~Y>AQb$6|Ne+X#*95UybWMoVr!;j6?hf025WQCVd?$ z=PBn)b-8r3z)O)iEpvTzztK@9)@*jc&_`Rld?3rddOXj#>}o>J9Q6`pzS({w$oGSW z96j_~PiV{DKHr^>qSH>9$m#|bm(yuQ8*<-b)fH*E1lKixJ@qB3n-{#T)}^OM=F_k? zEAxXhm0y5r{rVA(!AW3HWbl^&nRI6<3<9UsnHUj<3(h+^_{dG z)}fa&)MjB!S>$06`AAY?wG#2G?G(*g|BNA&gRRMET>@nxZe<}_Vz@lL6%rZq6=~~; z!=sLzzGUELN>8Alk`%WJ+srG3&5S&IegW8f=;F14l_w<-c3Z-pek@<96xw=u zshkyoB}M?uZBf`_zJ6rNBTXmbf_F}kzP>I}VdC?#8QN%4i)#Y$^*IIbL3hE&kwcmb3guTeN2;$7 z9NF!|SKD~+RT4yGE|G(hzbhukyIjjXreF;f#5H}|`H-`kYEAHwTJqCpD|heSCjto$ znjc;STVQW;yR;u>ydC$Yz97HoH63_|q9_#CLAPZ2X430vpcud7J&NxLc$F9&O2lLt zv8&IW^ilUtr;IUmR-IPSy`HRF>2j=cjy0E~dvBy^-`_(!I(+g7EB_v53xuGjD$~BF z-E8T5RpCS5DsS98OC#|lMj^WwS5+tlj0xpeF#Hi@m-&(bhMaYORx`8SZ?o+pN?Sl# z?&lEu`evPM*Zc7ky@jk!D;gQYj4dp;_JkaQXGDBWykOtoyY+?3%4?yqWb~P9`NEfJ ziV**KKJcU;!>GBQ;L-9Fnl{zZ?Iay8mauCB#QTRDAxRKwx*Yi&l-m?`#}~vm=k7p2 zizW~zL;Vu;TAug=nRiUF(^ftCFPK~vYknXT$;q$ox$5j6*)8eF0m9kfXDmz&eAjG$ znMRMuo6b9tVP!o3dr`5^A=HQ+c0p&`X_{BL_lxuI1#heA6GB&0_rcI}Ms83aY57f| zV8?LXP9W7Qr|^5w$FL}(sg>Hb0Een~}O zp-cUj=HIcR{0eETp(HRQ=UtOFZROy$KTM|f;S%`DOgH8V_dd><53PESfnj5G$Y)hl zDI=F~>b?1Hm=IS;#J#Qa-u0_+ItUB`v0qj4;o7E{3O?S}jEHRyQ#t5&b^rJDpq2A= z>$yL{hUngjN2%BJUDS$Y;HS0|#yL3uP4(ikNC!54(`PndkrTeUuJj&uY4Y8wuD*N% z@m?QS?&k4@-#40tS(*vH$nm1~cfjS$DM_$(>)JSxYC! zYWRvJOu!N@&_Y?c-i{(-^_Rn`%$azu16N)m=}gJWK(%*#`*%Ig88N?)jq(yp@Yhay z%(L*~6Fo^bsT?aK7S#W}aYQ~TY$E>>GG7NpC3U0sP^&=IT#~s2M)iJ9;IWLTSV_xD ztZ_y~zE#F9IvSsrqgpR<$1!g6!K30}S zs}{tD9km@tg0~Mw#@w>Hp-4nDf8SAEEoSkf6g9So(sclh%M{oE5XN(sG?Zpca^?70 z9dlRWa_Z-9A`eb#j`v(DITc`9sizN2mo|3JvGeuY(PosJYh_%29E+}} zCUGJD@1uY*3i@e7FPh}>J z;t5iE8+B-I$ig()=}h=T!%aoaRj_@*Y7qO6wO8RaP50!d%B zssCFi%LKA0Xarq1+Z)AmL*8zMOtCK`_-D*gHc3R~HlO%R7i=3;S>Mp8Nxpku)|H zD-QFy6FQY7pu%`%5T5uiZxEbcH%;M5h_(~HH%d8Q#jR_OC@aBBn-KnTJuHcHP-w9^ zl_@t@+-O|k*3|mPSjYmra~9M}ng4Bwl)ojgIV4=1EMyNZD8u-*y@+Z=zC@{Q$Gup@ zxCr+*j5>qnePFPi^QSW-Ph>QnO==m;(#Uhg*)7~jxFnIefo8>9CDl#4_XIQjj>BuS zA!Pk;k$eSb&^ffpcRu=)M&4ymX`EQzU~-kS;d#!F6|`_>&0eC}Lc5{&yfvEmA56Xs z8rzq@JEI%F%GM~vfZog3Zl%-rd@}r-gJX|E_umlwyXReX5zKuFs$me9OLbxnZgjNE zj}ByqQ(~b6`5AY1xM0TJD%|=)sxMuVr#@he8H@RF_svNEg2`92f>SBiL3#9zl=~>v z{~y#W!R(6i-ruqNFE_M>_8(q*;%E4@$RT+Vw zQG!DG6vRw5HZ^4f3h3bcj9V85FrwA3vv+7~mp}?|fRk{GnNtC(GbW2cZsYous2?Ec zb{$9de8B?KB?}=WcL6DpWYS!E*sY0LIrw*4#UHke9fl5QpXk^E<9G{~DJ9p|h&GKW(3aAi{^(1Ac0^`Hzlrw&n{L*-{o|veimYqYU)$~m&$8f;BQ#q`( z)GnF*^@{DxN_Tg6x}fVoatvFlM6AE`!VLX%t($?;;KgN)Yly@X;O6DCb$D=fe4{d= z60ILQPHRs6vae=y@V#%PdpFNRn9J4kvFAe?Jb2%7g%w^$7Dgz4RsQlaBG;p~rI>^M z?(_G?j{b39sc8)}BxPDWLiIkJUm57KHLaB^cpDt)^Y%8DvMjMJI|4iYi@&vUx%Vq6rK-JL zXh;8)O;X7<(HBH%%h96+udQp0j1G-IQiYgw5>Blh>~7}c)9kT?yilT!|1LC%d5Ay~;6RjplkU^i`e&PHd52P2iQ0db8t31<&u*XL{ckR96qo*E=yWF?)8c z-ru*A`(5AkCluN>KhTCq5ao-i+8q%Ct4UAKL;ttINx$yeRtuCd>VyiPjJ~W=@~1g{i|r%-PqdXyB^Ns1-7QAw6Vhk+0g|^iSfOi zua&6F3H6@JpDI}QT(}RJk$!ly@jY(k)DeYfL%=InSX#AqJCY|?r5|>4F>rr-p8cb4 z`B_#5`^RlGPdY}T=T)ryXlq^N%F*3aLxID>YolG`YIMr2HA zjO)w_Q}5mYx4^5Q}%A&!IXA1fD zf2m0$4m`FdEI-b96{`0qh8uMs#%a0gr%NWyIS$TBuue4(Y%|q|x)H6t%TlqvcU7~u zKPI+oF0t|Bvbn8FPTr0Zy@ zynp(~-!&eWyf{*F%9zj{T~MJa8YC{W_wwbKTa<=LwjX2gP4f^jS_iQc2(`|AtNpYb ze&Ni}G1_T*k5N;T+w|kI;V{-B>xFdvAgH5TmAzdG+5c=jpGi4y-r~e0s`zz!dq3W! z_C0P*m*k2%Zm_!&ml6uRvBSTiO_|1U2?+^dn^IHL)ltm&@U^bWs=xLjEQ-!|8{oDYwbyQFt2Q%=DTyUZE!sS zAyC%cz6&%N82*6fLNKu%-rwBUirOHE#Ohl7rZ3)#a2=0dCPnG}T+Dbeg!jmQOR}Vb z<9hC$IoD`x)RVXF^U&iTwYuZL|E1i1aI==v!B6JTrQkt4c1qBuo#@C_9Nmt|#J--#7{?2~J>o+5Gk_jL4 z*;`(Dm_7M~&#LnrDGFaODB)=b*~Z(3o`OL- zsk3`N(Ot{GCKw77if+CsP);a?y`x=l*IyW!XyV;4V-1c@{_KIXR|c+j6w$Vi=QPpu z$96$tG<&%f1KWAL`j)DvweLQHvxY>FeK=^aXvNL zFZ*>u*7#w;#n@36oC_*L(+FN^X@BPF;^a5KmX6AgFD9%&d3DG2vNY(mc@91QBvk0X z({51jd^)`4&fWxCP7yEhiS3^Ff9&PF52}&-{8HaDSfSedt7li_Qj9)p>-_8tgcn(Z zy6YKTyLshf!2oG>P`tvRz`6zbM+u`vOR9QsP2b6b=iHll@FT6=ghcyNSeip1UdX|Z zc8Vbx95kNK&q>PxAZrIK2598*U}t}=tSm9fgyO+T*_D)eK`uv6!@mJc;em5*O6$j# z?`rm!`}M$h`?Pv$N?Nf#<_QlwJ1QDlo%2qGK2_%hd3&JJ3-6{ab`?@Zk2FGA(GH3(^E{^Ma`8=rjN!l%_tkW5pi1!HU*A)Iz@f zd&f+q;Rc$#qT+zAT@!djL#S>(%}iende(E~=i$B$NAx}|fa8RDaU-j0>NXm7Oo4U@ z;PHQ~&?;swCSh@_leUo|#i(8H?BT4*Qr(^y=-QlzMU?$Y4FxCiECL7QceoKhowo@* z%FVfYxByKc!J@mG>ZR<|A6Dhd3=Ng^LBmu_&qlY<*~`59eakgLX5$eFL09aEwu&>` zR^LG(iVO*({>Gd8_AtFI;)fYo=RDgeBuI2vV=o#DB$`l@h{}< z2hCnqx#~zT9buo|kkJh)DN$qZY>ybRww!-?L`O>z`XV~o)O3%z1b@T!X1O3glR8EQ z(3_TQW|?ASq=0LseMh7JP?quFl?LFl@YQI=(kpHrfpIqjV zqh}*^0vf7c3l^LDt>T*3kiD<2Hx3TIm=0T{aC6&ZWTc^7Oufc?3K=UiYi+$hXABgZ zIlh2%4TXYQnTy~1l+j9mq0;N9BTDE8+{}Hyb85+!jlKTlDWhTcxu(8;qth140{M5) z_W>g}KQYlYaTA#CNuSX6@vGwVF_xR1{LC5aNqEa`sViRnwZ#dm2z>PO9Jl>g8|6F5 z_Yd?4zlWCmeh1egK6$66pnK}75xL?!egAsl+hc2K%Xk$jOvqz&{aCiNrAYKHJU=P_ z@~!LPbhG(Ld+wja@Xep-1syAiY1coEb@Yb6{MZjL#_pQv)+|t6SI6$vEkiD6cuP_Q4FVOuZ|)NX&BOr|U8J|kpM!^9$e9la zs5|XyK8g9EL9ghO9kI~9jCYXO$WSqTDFS&*5$jG_IqNi3Ym%AA7U4lzDd8e{-!4BH z6INi&8Q!rL+_Ax=fBGAk2?A0SHqKV?SEMBdL8E2z9gx`-ApT{^cS~{L&vr7srOMGl zrQ2Z>|7+M&+q#v7x0G3Jpe9Cw_`fZ4>M1eH@R2*KsH}8aT6|zg2$i8GB3lHJ+Gez2 z^(%YuNZJ1;7DL^gRA-z?Vjg0>g zA0HST`&p%L21pKX8$GXGY!bxpM{|d<+q=uD$w~rD%Bx18XSfvhE;{K4YG6m>-e(lTl|Iir64B z|CYi+MnS4^M2A4$fzR#^or@QRfPetF$cTDvGR;@mRqSfG@tMo`4#puYJKr9jX(K~Ei{&wg zdbB5Q39Tb5=O0mF%f1xhHQmH;gaII*3HwF3_9iZeIyK(D>|@{PxUcKAoZ{H#2`t^m z7`Hh)@jM4j-fV4J-7;78gXXZd_r$lYw`)4*tbbYSpM2lEpVJSq*yyivR4_LsHhwW#Fcm@H>G4`#9y(a!m6bNfPa9TN+Sa-*ISxbKZ9N6P-;3J5I5#>ck; z*Tyf^KF2FwxAd^+34s$5fHx0tM zbdHq03G;!;QGV@~FcQzvDZaFyI${I%>x(08Nix))f0~<`&J(q?jsdMrx(F(kKSmd< ztkfL;TmX9TtI%q7)z+2J+|n9g7ofQq+X6C4z!_=G;Q@zo8;^nZ1KRlf=o84{gFuCc zdmZ{|0U)iq%XP>E|K)SviIVrA%YN8m3H23X-yNZ-{VAUGD3s~)wpauGw2kpuJ|i(O z1QrpS@TgPF=KyzpT#jmKs(ILb9R1uU{q+v!`-WDf`>VO;(j%c(?dgt8zMjQIpH!1A zn^mBs&>2=N_V7n#5*!+gEo2+UBF^5`3oG+L{FJ%W_wNL#dky~rsuD1`Kdm_nk0lxE zt6sCBDi@!`vAtfITJn8dABzGiY>GI!+Q-U+9lFeits~|JBWm;{kZtny6Ltt1#IgPp zA|v5Dci!ll6y^k}37>*KE%d$LFqkf|ksQl02zMd(2`7!SY+__lcOwl{jd`Djy^L)r zLHzwl#Rbc8J@CFMP5C1Gv5xd=_A7S%~2R{b3LEZ1t%2s zfs1EcsRPM&ekM0Y&&u8-yv=xPqSw;iMI$Xk|fY5YLfr)A+A z=@9vNA9<*^iLju67i>tvgBQSFVCWtvUSN9?2Mvr zcDNKweCbwk^;?`APs2=)o{y8xJGXiUvKDh8*LXz>|NF@vtO;C5r2fpOc8)02w4_+4 zb%jlRae;vaB)OCywatT6MMIEsCf%XQzc{z8HWH*ZGx^LNo2mr2YxT}k__bSfy3#^# zv)R`dyyKEzNjx8z_EEL!W*4gN79793xx2Z!6Ta^i2Q+D*{GV<;f`mxuJPb1sY~G{k z*4ipJrcTbxpc>)w2ftc0=T(==?>^z6~Mv77-Mm;q|N1^f)P`XwIMXEj2 zi_if7&9j#w&&I^@_97yU?N&C|a9x{)xaYlY7hWyCGjpacal(pG7||_>b}m$B;AmFP zLBLMWjh6*YLr@p9KTm03dMqF{MOJPR6R;(_^?p^!OJiYH*;hV#x+B7 zneayw1kj*El2~#9@^)!=`fy9U7omuBJt0!k3^}w67`pjwT3H?{^_ChheqxIyJHUDYG#XIMWOZx_sCn`A#|Dp5826= zn8V@BCfOJiSl@6{P0{QU^n%Q$Y>X@{Zu$#Z8P`LNq@zmpE+}GTxP*S)U$qijT3IGB z8(1EFRL)X?e6RbtebibLZt8b9dMvU^AgDC)?IV-d1LIg&oOfFLwA4HTDW5>9sa9?yO_P;OqYlkIl7275D8XD+{!~Bfa19}_F3h?U;0>U^vBti8Db6{y zFaKCvQvK;Tc48pcj~vcGX3CZ@E zQ)lPo5RJ8hhHd+%LrNYLsrJmd=CXYZE$cmCCHzCF(4nJyXas@Ct83CxqVQFxRO$HC zcs06KH9O;oNDL7OTgvOSUnvq)U7ZpJ4e5Y+{!9S)1yZG#HByaJhuGNY!!w4k;J8Zzw#i=>XGZ(HqqdfzFjM<<=K(Il4l2uitPzvac zDJdyC=G_ZmD`jSS13bo>oaraJx@Owi^Z_f?OAzvg>CNa}yabhnt*u5>31i`AZHxDK z)SvEB#~@=96I6&v?k$XknK?cA;mwBIS%)khSjnXA8U!EDf8%k4Wb@WX$uRNRpYRep zZS7>qBc)-R!@HJ1uQRqld1vT37s8!64oY4NJLwHQRGt5MxeCkbS(s|$-gCv~9NWia z`nvD&&afM)!gbXHKgf5i7jsTSx`C`eU|LHG^6Y$HD$pcLe-={B_5Q`kIdPw^v>LiQ z^7%>f^Q=PU4OLtp6VIPxQB_~*U>;g4Q0+vyUv+agMY)AD(-Rs3rU&=p(CUIS6W5Kef8rI8=sh*jxmaT zULskuh<~a-I#%uJmSP&Nh?rZoWEh@ab84b5Gt|ux81j$2?!h*l->0*(wk9FKb^o~Z zuKarQ4Gefp0<+0-ha4iBfrXBR+1Xvk-Gp)Zqr#ec$spo`#sjy zJsOj1@5i=);7@r{d)#S+_fg}svqX^Rr!T7HJ3pP28@&5uAGq^rl1e(nuMhu&g%%y>z16EXWBYTruVs7QWP+#pL z`Rj}TmvL44)&da^d9__KifHTn5(-swP!KiNWSumlq)Zp3o_w;A_t06n>F>^NqjM$f zj&d}DsSFW_CdnFcNQ^|Y=!w$CTDx*eAN*%=adB#DtJVN+Zf<^lY;VSvESF1Vt(vSZoUb z`_^7&_@E$-5+t9E*|lCA-d2|mD5njrr2w#$OvKw5Ty+60c7j>MKyztwG9x_&9WtY> z4UD%TZ}Z2Re45_1vZrQA3)fWpXeJYvd%BHA`DI*F4RiO;PwNz@^0165i7gMv7rzMAxuf{UWMrS;S8tN6s#+%~&ZGnpcetJ=J zkoCB?AUjhU0iM#+7D+>*MkaTUs6+G*8A>0pU_9gHQW|7Op7s z;OM9rLxTE;4{7{9le68oA#g8mEx#Y8o2Kk)M*nzUU0z}UK)0sma;+y)T2k_$xG)%D zwu%T>s++Y5(}G~-VJ07lWfv~243zbb?ZSnfjeS>}#E~(hD7rBUiH>sRU(M5WywGP9 z+A`e}ybd@h@IM2yOmNT#AJpR=P8tRA2@!$d3N50Pg-{ZcpeX4Eb+1K9;Wj30D2Q+w z$NqQIar+#Q3j;qgOmBYmv6K##+7*tb-=U$|t{NMDTC?2S0|=#s=iTh{8dM^u{#$2X zRD1L-LfxpFpgN|pp3Ndb@14xl#0msXMRtFucAvbpov*_Lo`*MPK&1;Za1&BJwR2Uq z=d@T2%9$M6yg5>eu1GElOCM{kwhjTs_jw?d8s``xCr%=>fxzwMak+zN<%VJUTfx(AFua}pI-h;_q}vpH`^tb3(MIPBf51*{uG79 zM!(w21x(Rvnc8Ds1H19ZI{=B-<~s(h?{3}#nAB#j#^|nw6i^{&x;z#dG)*{C^r?0_ zPvT~S7h$b;@6Ip<1?L7tz4L3uI-)$St?o)bwr>gxctNEc9JR%LO?RGA#Td75O5P2j zJ4Q{Wf3D8U{lF=jppu_sAy#V!LJ^~FyKFrJE}9Q_`~{2!)#MMH|86)Q^`Qnw}}jlcaYC3q=ZC( zeCW(+B69TZf&U)#H-dTw$On+Xe~=_QjU<=6aOFHWTTAM5O2nrbu&S?Qz5=bE0*m@j zT!1b~P{+#RK2PcYUYX*|lVoDqUA^v5Z1Xq$C+_UQ(j2K-YpDgx1Znp6PT*0b)m8=A z!tVRegKB<}g?y8sCazR~TKjwv&bEUa+q9BA#)}SrBN(=GzrauO{X%ysOtED-y1m8& z2{)wy#6=&m?32c~C|Rf_Yj`OYm)hPnv-D_avL!2xjjnjSQO z0Hqs&3onacMi}Xk$?B1%1+>H#4;4b|rA&N88LDU91hsBD5qkG8{lg8^<)s&wC%yo! z9%G65vyrcyetC7(@egqkaKE_Na#bBf&M3}pJaG;M(XEjFWxFiQVdGQV&R#>+jMP3j zG{_A|N6yT>{c29f$5t>F)0C zyYPG8@BZ!`_n*sPFy!FbXRozaJZnDlnX^VS6u39e^e&W_R5bSpBf)@*>+Rxf6&LP@ z@_kIUBEz2=6yrb*8hmywDLnioBK)O3|I05O&oS~LbFGo~p*^b#bCgG~o;`)<+xwU2 z_eg6I;o@-qv#z8$s9y#u|~BBu7=}r$M5js0m^J_hl;rTmMYu=*1B9Hj#t`G_^bKkWjId!#!`swk`r`zsVd4WyeGc*c}mdg9vQd%`A*tPryC9hL0^^hWA4XbT&8iY zQo|V>Hv2O7h7JNkf~d$y9A-5O1A{Lc=#L`+Fi*kHp|&4MKRj+9eFo`jG_4E`v9Sot z9HtZqLIOD02!K})sQXUjFdnzTk`g7eZJi)we_&#uxm~#s+>av2x6WArRm8hfWz;TY zxdluYX3bOM18$f>7K8R7Xi3JVX3qI7$?28kvG=AH#cp2R)D{6HPOe%qSMjI($?2Tt zY@OT@`LdIB_o;=ZOl$ZWY?~Ih<|5?dfBuwP4>lAOFnA#XG71k5H&vfx9Je_Yr=6%t z+fqHWik{=bz|}KFb-JH*xpwAC;%d-nNDXfu<8erR8S>NvGAPvYNl7WMJ@;>GfDiTW zpEiU0{H7`+4YP-ooG3Ml9v+@nx9WRa9n*2|OJ$f-q-WOF@QAj#mC}A}C~IDAtOh8t zX_v0moH8dVx@~E$dx84bD$T6$efg{Ykv2|{s&C(s2+O)ZIoz#qGu+moKc~7oJx#x$ z9w^CwQF{JarlpoF^P{Y;U5M6aIew}wQgvrz`}OYmN}j>YVpH7n@{d*PNzoS$!?Fy8 zt?TcnJQF4yl{V~>VGsWhpmR@glh$4Mh7`)_>81Ne`&UaOrN%b8j;{CS#x8h1XtCA| zhV)gKPYAeN_yx`0+b$oxswHjV{WMv6_J>QHUAfvCHc=O_@p@^bnToo-Tn5ycMo+a- z50cm7Z%ZBgRYeUG6WjvqiSParJiri|s{+;TZ@hLdp$CnG_}IB>m>iUJMrM6GJD(P= z2gj*Xy)g5UUd_S3R~S!wc|lu7t_-*rP;m?Ht200+2;!#72GM#k2$v zb_u4t!W%0MCR+ZnW=X-O|3ZcD@A`KXp=Y5-R*7#zsz!K19hvrM7NT#Sg2AL1HpvmtoA~r zBQm5q)C+{v#+Gj{Qp!5RZtw3p?5V@%BMO6Ob~Nmx%oLqsNN-9`7po|58t8a$wbpVM z2#ZMe83q+3-436Ln0$U@a{c*1V~Hcx3hd_BcjGq(lU4G^fON8H#qo}zozzeFA5Kqw zg^+B5P4;u5=?!WrFS2jP|9pT_?B`x7l4L2KX`D+L=s|QwxFJof%a9xn8Fux{Gw6UP z=`IF8rOp6+kMyaNGndlj7v|TjWt5|8vh=vUKferov*?FPn_8bu2|kJDu8cre^*@`k zbd`@)pki@M73I_eE+CRTi51q4N^6z4n&qk2FH_W4($dq@GwSuHjtv2uLZnp$WRHrq zpaDu>hHP5hvvUed>dn4A5LtrVaPy;hoQkIFTVLtI+KU#bdge0jrwX*h)^a;4!V>QD zdA>$fFDZfm=O!E}lh3?fCZ9az&VEdqHuyFNcuQw0!$Ce!cZ4idIy~L^HiOL61APJ! zT8mQ)9N=7@%8i!(;3;Kp6)D?>I~H56Ex*izczByG^>LTOZz!1pGYGgdlBnJaJyhM^ z-N_U24v&tu>dkGQG_JRE7-kwC9!w5m1{nYXgKLfJ-rgQvwAtvo9LVq|IL}N^J6X&` zM5KUS6V%q~>*<5M^XGTj7YpBHsd$}MC*#?S6A}~fQ3A4KH}b~vHRm)Xlu-gkxtBLS zdEW#3At1$1l0pKY8Y3;Oxk16Z*~y{JrKO{uS``LH#z1!lp!u&+WzpX~fN}9{Z+91< z=3Q%csJpQ&J@*720^S?v$)+l!ATJ1jkv~6jqVrV%F##hL4=9=PR_@Wz(1c4Ff)blx z9OeWT`v7hH)ga0>NybqzsqL;?(9vh5#_{ef;RCf}x<{(`di?S&Ul! z?VTN%rs3|A2Mut>(#z`DWO7}vF>d2u2E}uQ4;F>FxmU+0fSt{!B3PHuZv@=+%%iB} zwrW+lMRO0<)z)55q?6`^z_u4 z%agbCRgqosM#ITCZ}Y2WAeKTDB-$f6c)6Z$?dI-|%b@xC^NDbjp2kfNAV}7L%^$!z z01t0PME_J{w>A^9fEM?L*Zcl94K8qaiB zOkcPe>SGK}ibH>=mo$z136t_PKt|*iy7pwxtOLh-jMDM_x4dC$>VmbGa;#c z+`F>K-BRm;;?m|}yLmB)02KudVLxpNMKFdXtRw)D=WTikU`GLAIH1pehA9Oma5Q!_ zdQsc?OMRmG{u<>VMUuUTqw^vvo)BF?x^Z7hmNnjTQiX%4h559wcN~U-n6$%3y)aLH zj@c?7>Z)s>>OdZR${bp+;;x%l0L8ku+|hP+ZM1L65KXpfMM|5Ink7eoz>(&IrvT@p zkO}W&sZqtKT?&XC*muEqzpoIH{Y#IaAqivRqT-^WVs)3Z!Ms%m9=k`K4l>F=R6wG+ z{%D*~Q9s)(hN4f>QCZ0iRLK21vAFF z;m+vc7*y!Zo!(I?juM8k826`~wb(OT%+y+vQAFmvxIX<&X*_cAUh)MOuXm=jZT+sD z>E0RkQ)U!%eiR;e^~q)Xts7Pu1yH&rrbvni5q9njWlJPt$b}oo=mDx;rMo?uZ~P-(|C>|@vv>ZKv8b1Glv0d=di6P;dXqMB5)GXl*s;(GSR!n zq|smj_6T6n@(h;|CGiEPj=8iP-=BMKL> zd&g*&{0*2wU=1%h>W|UHWzy7PjRzH2E2J%!NCyh8*6-BJewtJG3RPdULe=}3rB-;Z z^j$i^VM<@|E-GRI?6Sp@^2$maTx`LMTl;A%pM*0oYJh9bpUaN<|LP^3YqR`U zFR9DQPT*=c>YgheSk7ZT6(eFYR^5PuIQ&N-NQpeS-R(G+x~!}W@wV$2F%|%S5Fb&p z{v_^`iBDe9k`z_7s13=mvt(ls>LLqXPfISy`|fhX(UgiRJSU0X_k*gRvZb{6JrqNq z_{(CSJM6zH>z?k@^*SX7l$=6k`SPi$H9g(dXgCIQm9j6Lf9MQV9JSq-D6=xL=Gx4< z21_kw=PYK&=qP<9xAVHK>%Hb#Y0@Y+l4RSsSj#GqiNg@oihi{YsAYL!bhc5Yc@1n?^;0jbtSlMt&6t{;1S7D=pmcav41`0O3a%NrC_b7@r29Y;0;CKei(9!1z1DV@ zs~Y@t7w1Rg8hW~>K)Gg)-X19sDQU`}qeSz~Sy0w_!iq}evA<;{Up6R#v{S>%G#665W-0P3t}OG5 zh%Z#nG-H;nNGyd%ArA1PC}1?H_Zko|64Rf?$jXc|96BA}+0ZB*oD^L=)m_0UG9Cm0 znYfpim$JCBve7sQ1}oN58G~aPyiNtfFoW7y8jp^}7zeE$yk zLd?H3ho~Y!h>@R{m*TpQ&-Y;c)}_(m&c*fZ{dPR_D9x$6KwU6_^b_o=7& z(;kxeN6UpwQ+_WztY_Zp~6>dQEe+Qnzj9RJV$@+Y>96xND?yVG7K9+mE^$F04PqcXyiM9!AT zgJ2YmumMxoQds63|0WmRtYgt8>HKw&G}qZTlR7)3jH7BWMPdGah4J45H;fZdPQN=< zA1F%3e>_P}uD$Wn`z+udh0x0ekJ^u@*QHIh6)t&c_zjS_Lp<<&x%tARgSIiwF2cBZ zw6a`m1$X|0ZiHzqMe@5>TgU8H3i`oFQRVvS3TMPK4mb+k$L*yy%C3yauc(M_z3@Fhc4kI>*{E;XCS;E&xfULl7g2&s-dX~;>V!JXR0Ge{ z!O4Llm0Orf(TG|=h>H~#LY0ULq4dAP7%utaOLg}85{PX!LaimbIYR4kr@qcnf0S}R z`-YD{MB&BfptQkdoIL)kdM*4vIT@quz9e^$A4rzl$Vim3N-syd2>5 z+2=>0f`hoZxdq$^qXn{2@h)zzR)cs4lgvr?7!;8dtVaW73&T0$GTHoZsXl4Pv+XB=`0Xoh1tT%;yXTe zXX1re69VU~v*Kdlqzw{IuA32o;0ZGlM@_x5tk2xnPw;&44{;o+%6IruJ zA+n3X(1mZ+g+;U{6u0hS)Yz4|FIe~D77TbsCnRK4PyCXZS1Wi3 z&^)?xm-;RX&~qSl1Hh|%-e-=$L>FajUiUC9_tHaP@UwH(L%qYdX-Lx-wkBV|ORgUw z0ahpR$%rWXQY9z5MzFrWJ+AE&)O_9C{2@)c2@pu4DX5%m8?lNRPCn3;W6_HWnM}XN z)ZY-=6Zzm71a=HzE>lM_Mz!ocy%^IDd5Xx~pUQwHK<1zI;|!Db&G+7F8YVib=TGl$ zLScjehZ~$H#K?xfD!vLZ_=ynaxI#b_fNg}x{dnwHc@ZK-pq;Tlp}dY}tGl^z>-~>~Tdfwvq>K~oX9s4ED`S3O_f`z+#+&a%o>xl$U~A`y=rbyT zp3Li7P?V*fslcMfAje!boRaWMfI3?`Gq11+7V<1VQD0|fa*8cZ_8GnTc}{AMO){L0 zPsEI9MK>=&5lIa442KMbRv{Obu0xjRsRCv{c=#iK7=Ku_v4WDLCBq?0+MmiY&}oNf zuU)RSRt@Ho)35dT^oRKa`i-D9&`fRFy$59@aU&qQX%qOXq@sfIITG;RaO;)%t9Erd zPPO@>yyEO?RKG;ketiIFtZr@X<6D)m7+;pCs%gLonI1tX5oO<1RF>sLl zY7@FCkV9C%(*xGmjORY0zrwvv%d>0JIxWj7oi}n}jIh}i>q$EdgFh~i3|c}f>sE`l z`BFaZ_D5i>jSxO`Qvhh-MuBSAq?^wWxBjEMh6szEVq6C?rOKM#t48*@u33^t&PR>s z2CsEP#(SFUM#>k+z`ZfCe}7@B#v2(yJmD)T-+0K&H+ZFi`h-TEd>x^m*h5oEQd46G zH%^!}s~5M|^Co`dhFH4bg1A@U!Vlz^l-v9+yj&1L_@O;K)Bo&iBmTykLO;YQfBd3_xgOTY`j4q%X#Cy4`CZRrKPj{of2;qI-V7t^l}*VzCWg$EGBso6HN$ zl6@IgPoJcZ^5%>wC7bhQF5Sz$vCP?8a-&+4v$GzTLrd{x0dw-5lWlL_P`DiI*VoRi znVFkM4N%J^a>^ASmSl9MxaZ}!iVR{HvI5P;-y zF_Y>YMYKwYYlZ-gXkTGp_lsTZVC#UXl>L+iud9%S=0@nfYF{;o&(ONn0^Kt`37&xP z^%FhM!3OKpA+ie1ntPNNri>^(mNt8OiG!6DH`!xFuBi{nns39#}?K|RSwHs-vZ{h1v$<+yOMJ9 zwNhcY2Et&|dp?q^HrcJMt*fif)U#tC=|_BXu^$!^JZ2u0VTupr`q-}J2r+lWpA(@> zwU(fk>67SpG82HZP4yYoPqi*vdOhu?quEPWf?i8nWIJ=7_lQ{-?jLrs+PwU17TlM# z#^<`3Wnz(Mdc!=jQFjdkInC|~0H*I``PRl*q3KICzJ1*-?6>106{R1+?fhFROdR7A z16t({NlfqPGR=}g+nBa}YlGb>lKbJZqX>FMLkuBJl_%xM@xtk7N0Z}|lwE)LzqhPD zN7&*kP*zF4Ql#5`eG*FX!K77+4}Z&XD)aTJNR8W1rZ!fv7SvNS3G82=?EqNF7N7=; z;W}gysvkbst`1=!N+8zTs_vNWsB37jjk3YKBqx768WQ|2`{w#;8^?X$e`CC%?tCp) zTf5riRahrj0|A5vO5pB##^}#D`MsO}>aw^yYJq%6RB)6molIj)O45(p%Lo3V_&6ig z_71ZCTa4;9Dm78D;epb;^@&q7Q+mj$r*7&8teuqJ|TnCI;)_4Y1x{e<7+L>Y&T0mZlD%Ni0BNxC^0roGywFv*A4n!mP0ctytsO`e z{G_083}V&qi{aqN12|1yS6i{?=T2N)RX`L9xLb&JicXA;Z$+~odO&=DyO{1={?2$^ zHK;pF_xBf5R2&7v*Fa$^N?y=);;x?ItCz*iR)`?!EzKl(M@ySBINDUId}5Yai=XTW z6!@u8nwud`R&VRBIrSbM5*;E&)Dglo`FY>TH^=j=KqXGx6Tx^1UxTo$k7<<%_2_#y z%{!-(vsai@Lu@ix(a%PE44bNG81#z`TQ{ZOa#c7b9Fcx+TA|YwZ#hfa0k%l=G&FH3 zujhJtr0Jq#Vxv=eTzdfFWXm`;z-un>?)J_L#8+TKGcz+I_@#ZMbEJOHc3V8(q{=nB z_520xogbNTOslKO_C}efa#n&!Rvi z;GK@M^17ry@e8VnHVzk6XpiHuIdk21U5M%0A3+EdjZvardvawkI8X8dV3=H8PWw z>;N<2a3A-}C)r()3GH122-s0m102vDc0b?MWsM7c7fR}R{9*FfZ;TgyN3Jg|7E7vY zpqBq{buAEpK`sYZ9)M+h_PeCo7^jeKgX`-nuk$ryH_r>m;#}3jm}kvWBiF_+Ua##p zr^K6PqZf%FongB<^h!9}r(N#}Jc38rQR4lIHoi$ey}7Kc+(0sSmGyxD;1y8cA3SA> zg}QH8)$1XophI-bjiII|S$i{;2jIcU47ERI7DSZ!%gxC8#@RKqh=LzvJZ{XErbcRa z@Y|(CVTZryV-VK8{6io5=b}VHVZC`ehINjTx@fj8=b?SEj1pd2rvok zxWp#_hgpRS+f?u7s8{2rc=d`h_4O7J%0bu9=k*ua7v;?B{FguGX8;wXb!9!w2=1?# z)pAMtffSkUL^ zJP|>XJC*5(E^GaE4wB*`n<*#<0l>p_@Ak=KO*21c5XJ6x&w%8!_!ntfU3&JArncSn zKY}mloi6|U4^4jEE;)}f6 zT8h=&$)kPW;|9|-%d3ED26CnfRQSK^0Ao^ae(AMijZwI_7D{j$BGN8RZNZQ8kr2kf zN8A%OqU$8J=Oh(q#LV?$4;AMV|5s@S2?nh)WPft%E#Mb-0LG}4F;q+>Q(<^|)`6dX zuDN?^p!tKbfiOLUhM9KX#C(h@fFg1}U|Jny$4pHNiWR%oYR!Qa0>I56L&M|hpENS? z>u0*#>|K{??8GuW(EJbZBnr@t=d+QWQCyAQ17M1T9~6 zR1bJgli8>@Vtf_i-G1LX)@qh1V*Hznxj1u&7G+>DG0M@|DaIHM!mP8)>l-|?U7L7) z>blNzt&Sky$%gYU2jo$xXJ0j#f;C7K4K*x>j(Mg^KUg|*nIQ2J(ckUn$tKPT`qQ-& zUVsq-V?HnGg=%UXj*_fL`NbFmPz3=j_sx+Ady=;mr z^VJPxlQN^_gsq%Lwy+>(&**o;yCal2EB!3!z1C-6p^J`8jo7UHRL_J@Zhm+QPX>A3 z{ZaAaJ=Vlx(RN+sXBp{S)q^s-lHd7RO_P*1s!9_Hi3Q5E@yY|T!6k{a9}8nHzKNn{ z?)m4|wo=EtN|ad69XW{o!)AG;*f>A;7ja#_;($DV!8!TojGe%9z3z$Ivs5?Jy~3U{ z?j+%PN81Y<+Y7w$%78!GHd0(b%SU{@a;S$x^$`H%k+XMD6~o)ggX7jn4CAnYcJXR+;N#6G9} zSihF1fszhiWy19*G727|iTH^imdguqOey(OR9yzAtAw#ymYu5m*Z-21DkVisUV=wj z%#8_oF7*TES(FJ%WuY+um{S|c$YYigu|94{LDip9l-5|EzJ&f!wvkfR$oT=znuUz8 z%hzEWg~6svn-vcA>wcyC0|2)7a;3@{0~egtX0|G}X0Fx4X7#h&2>QhJ$hCR=Sdjik z3c>Rgr5eU_d&cqB3O&bLpE96%g(&wgUj(AFjboP&QwQ^NspplKX|2dehB9sY-Wdli z@6AOvbgX2knlHG?1*{ZqfulBOzkgRgZ?yVvuYlB(xS#$(X4S7)@7}f$?a-i>RXY@` ztIUMsKCxTXP(k_P@8A;@eUNX z`>J7jS#DD3N^2G!*|M5YjexH3y@wND=T)ykHKG^`!OtB{YY8~7x&K}gaYo#PJ;ws8 z(-i$gX7tK%i>?L*!t<@DOVmR#Ov-^t9@WfLMZ;Jv=64Yi^43J6uy6Y`!6HUO_a5Lmb$0U-p<)xNnWO+JioB2zM-f_A@k#NsTXAa;)Zr|vm$tW z?lIB9zH?LM>P6Hrkh#gol|_C7-5FZ+@{La~#8aO#e@nX;zQ2o&RhP3MzZ%e zik(ri>OvJ;GVY=j%_{O95~MyNcnxo!hXY-+N21vhF}RgSrc?C$qpa#oR*z#G|s3G%qZ|UXi?C-F#eBg*n}SCcRGXW zeZP&SMQ$CX|Jncl-nj@83|N-H+lHzmB`Io|P1y;2g__I&y|nH*;QRgMwmnaXBOpu! zY%SGl6W@r4;l0@+=}b49E5NU8e7H&kB{g#{4B!&Du#fNghvP?MXkg%VJe+N#KlBhrdv& z(A~SAZj7vNyQF@7@@f=Y+R(`HK5Nu_V?3G<`{Aed5>v_Ma9sPi=Ck^>cYM4-n%Di^ zf<*n6xB&x`?t7Q$8d~Y=gA50(W~y%+UH0x107#AX2vMj|&ud&Epi2tun`eIxZjM2{gmvXaKn^osMwPA@5x6sx{P*>HVaXHJgQ6b^f|(&KMHgg^OAj00bS zh??=&P<#R`vT1RJJW6lfpk@v{rdpu_$2xU94}wjB<_vvVLNF8emjS4nOi1S57>9s=QOdBqk`yQr#*bhvkNxBN^*!{2LlgrU`WelL7V&# zms12O&HYun+`1^!(dIV#_LQd1h06|i0bde( z$x{{gGt1PeWwka6{(K;V&d<<1B1J?&vY>S6VMDz*P%;H$`GNrG@pExa1miLT6_#{p z9gtK2sy+#Vx&O7z^b zA1VsFdU^zY9|##50xxTO2Rrpz2V0OB)5d>${yyXXsciEar#0eH)WhNfjgRSE^IJ#gz#|w|N6Lyx;FrP8sS@=1+74&RfL-mk=fB>CT zATmC_Vk!S&WXqvG%zB}6MP1!(x#j9VGAKjMt!nX8pFB?lITQIy@1&&Oaob&P&brzI z78CR4izI&c{gc54kG-?A4~k?GzXM(WW}H6gyl+yJ7nCB4C}k~cm?>8yi|9)f#1&^D z3i5@Ez_Vj8fsO3Sgb2MtJ)8-XC4=+%m=-088T?ZojuKlcJ0mAKmm^-1Cn~k}CB>`E zHzA#zh9h{8Aqtb5-9z-W0k5zQhLrc6pm^QteyH)PEHy`=$C@NVwjlhJ9sAeUC;jkb zLArXD`)pbC-9!%3}cNA5iyxZBD<4ong%lcHdc%@)xSn}od3@U(V6JnpT zWi=ijPbc#_Z{4qRdZ1tI&5;se-k$SXn`w)8%6{;3J4mSq(nGIK?{eW=@k861d?iu1 zaoAzhY=C)CLFRNvo5DfiNR~7?U9uQ-YBeoi1s<}OK9DiBQD%m0U{^yMOt`{r`e9R+ zF203DkDuLeIOY#r_6ZYf&S|WX00P9aB~%{W`#^u^++fxZ{-tb=KJ{ok1#%&@K1P2l zU5mynKWh`EJ1zXYQe`cU7crja360^%!1d?p?-`*{IN>zx<_o!ulqz6dQwVu8E}-b& zzh;Sz50(SdII#pg6qJjEBvTq!$qm-v3Smk~RZYn4iEI1dO#u zGqkDVB2TrOQ6flRpRDm}elx-AS8V-f)d^&P1OQdyTmAc)3y?PL#uprc=u84m!jogJz;l#R8u%&N8);8(@Zuh(zAzqFqg zpvj~{7Y#hzB4C7=RAvM6h>ul=zMOg6($CLa^h4bny^*g3)w#u+a9206OFoFsFkXW31Z>IS=-Y*vIMoLB zosypM>C`AE_Gi(n=@vrSbD4+?$#=zpr;Hl4Hy|^ zdEZ}6oG|klTUiyUNoFeKe%#)h8at|fJ{$3Oo!LILa_MH7J+0PeMnFFZc!8Hx4lS$7 zZ=ZJl+EcSo^iNcSzKszK)6~@^d9n5=m!Vn>)i5xd-q;8ZD)BNmP*lW%gi9Ne_mIOt z7s&Ob7c^pZ_jIdr+;_(J0B4$qm)F{R4h@XcmQuEgYlE$PZ>`yjxh-#YeN#m4Ydp4^ zlyN5Qai&_z4fu}!)3CIN8;`Pcqw%A83|cW$psO12yXt22tzcYfb#b2;5ri}OzV}e2a`fgK$is9chY4zLwd3S~uZAcp`QtNy z$_HUlxDa?TIG@w`;C_bk)Dddi^>OigO$5Jcy|GylJjExb1Z~hE@xR~L9{adE9 znBpOBX=Ic~{8DP=jBA{5hrNA*^%ptd1~#BVExSna#BKyO zI#D3((=PnB93JREQ`8iG?Z6c7@#`thP_`RZ;%1_;)a&cJZ>nr;} zc(alqyF1u{`Y3HEet94(CDbh|KG{iEMIXO+mINZos06l>U0CBGZa&D%NE*N%cwt8t zpZ-H8h~k#plNyz)4$cRN)Pnq1-IYBQ3Ke|V?L~c<`)zvC0p0wkVzem8@H%dAHM zB0B~!)@awntnCN+%*Oj`4P0E~IjOIpeon~e;r@7dCmnP1d^Iv0Alk<(aP7Hx)CcqO!mTJ8 zdmeA$#!H~_ZLTsucB5UBGx^--M((T5n$NwXo*6>MI4Qqc@`GEAdR`nq5I-nQ&fLwz zF>k*c&2WvI1#%XXS(YLKL6WPp%NN(g!RUq)G~z^2zC0XpZ!RNq9K5uI&D98kWDOIh zxZfqnrw=J1123J==V&kxAEDnzA(l)HvuY}m8;1q;`v3GVsHvLFMX=V>^YfdFdri!oD3NTZ);+zz4!N z3(fI+czSx>oP`Ahh@+!BBlzWhq-&w_U*dK;;7)dCDdX|Jo2BL!hvOUsu5$hHo4 zKco!t@#8_3(&My5LXu(x{%KGEQSg$A?~*psA)Srjmy4yCj}J2zCSA>Bh?=^>Z>RQZ zv{6XrQE7Gd3C#8hcvb^|m2=po#L;80^QgODL*d|jv%W)!ZW=F}6;jxLjNQB5pE~n! zR9aZLH?d$bSz$g1>ZIX6Gf@R^9c)}IpaNbw_xY!d+p7}`3yV-9?&;Y-Z3d;FFkM-& zyeFKcMxynQ-(#tLqcSmpf|8Py&!(%n`F^;QWXYz+{bGNV{(K3M6jk*D#OCnu9_C^R zQ2b_VJSWETO~I)FIJW||w1fLOm(V$VGhTu!ErAyYSQxvjypkoxS;fbdj{Kio)>36| zV49{U^`=&9*zPid;K?Sw-kpEmxJJ(rCX3WgIZIuLbiRwJ>uv413YuI~o!R7k{o2K@ zI;ptfY9Y-p5SXErdoRFHMd=qArh@^k4Pu4b4=m_H3MndankyBaKfuL<5I)~e;Xa`|s&>k$RV(>h;*K6rgp zQ|4>F82#FOc#K*-);mm;Av&U=p|rHLH}Sw&8}UJBbCqRw&o;AL2_uM$nqCU}!$NH@&cMaNG_Ly@+1^V|nnm?Z09X@$<_iU2W5%3;Bcj4_~~aATzaa zcE5d7Q!RLROgJ42Oz?8k208_i28lq>{ZK8MWys5X^N36KiwEo?Z}_&BF*su#AH0m`eAFwIBA$#Cs@I^e+wU zx>hA2mnVVNwNNpL&2Ezwkm&D!1^6XsWiZm}2?Vo|EF^<*P zH!d)_9~G3e)wLB2j0S>}cq|8F7664Dvgl?seb&dW$VSqbAy?)|Z$4v)F8FGH>+0^N zm|cd%-3T?YwY~9Q?7Fr%JZ3O#tl{wR*_b;tt@h7Aw>~?eNe)v*seQ+{2GoR|{J=BA z8aJ0-!_t^6gm_sVZZk7cS-#}vYPTi6#*DeSUPMUGw`ZZGvuw1iMn=ne2!8Q)=kUSp zmA^~TQeK!E4bU^wCY9N2CHc$T!M;VLwXifFG_cR3m!_=O(?FySEs@gK3ijC!`R)oq>3rd7X&~Up%4SrEmS5t|>9yK{_rzg!&$bCUK*53-vS#(>Dj83@gd~bvP z*bdr_=)k4yagsU(sVdq}0gl=$ z)_O+osG>j@gU{@dK_=WWkCtA`{KfXJhy>%_AOwfMZh8S7#b3r0z2mT=Fi1qBOZ^Wo7$8+Wzb28-%EK4o=&P<2~#Phu|+<1Nvk^?d!j}HR(duaq$2RS0j-i zu)tTBn0)iX{A83^h34wimSE0g{T5Nx0xh^dD3yCKyik&&KKT}nAOy5QuHUr`6PJGV z_&zbJ*Ptq|sJXH2RpZ*F+6mS=6Va+Ujrj^1A2$rh+O=GmYS$ewB0-!MnKg^l-)s{KN?E>7jN2sS2$p*B;tSoxb^Ps_4Rs?>K_j4 z3M2S)u*)g?_v9-m`L7{g-Q6HQ;u&vKxeww&-_>Y}XO09wXNWBeaR*W{4|zYgkY%+| zCcP=rB2`u5&DB@+EbT$MX5cJ2qQ__aFP%FkDI+A3j z)zHEZ>OTbS=gBssMs*=RvUJ4F%9BJ^%aMpss2Jswnt0P(P-1o0h&vuyT3h~UIeeki zH;@RAHMgji^}!7%Nb>k8Qi~wgKbyy+)yqwcLfX(hG_L@FRMoCmadB}Ua?5KI=;lr( z%zD%we!fPVP-SbY*k0JzH(JN>d$HdrM#zguj!>3Y=O8_ca_M}`Donk=OydB5voJn8 zIYWPriURIHJ`pWeZ|;Bz;IDHfp7;@d0`CO zGT`vCdmu-(3)t`el%)bwy|u6i=I^tIGoS1p^Xp&2(HvY(hzs~4v6PC!3SVJO$OGE& zNcKU30q%PHsA>~TTpWM84MuKv)WBxg97X1Wqildosb}WlC}s*;q8ls36uV()EZL?f zr}x#W4dU}{M6X5@4dRDuWo-`Qb3VSVU82g{@?rKnck*rMk1}clR#WQ6mD3VGaTM{( zAB6TuW&$JABfm`7N9l3E>~(AB|NKq>LKEFH5b6XmG0Oq!2C|%pkOYG5Uzk!V)PnSP z$dK|X4mf~XSXo;R*X4)zo@t});4Lk9S&|5NxlNI=#uH&DzFc%p;&uI1rNR&`IZ~_* z?=!wml`G2Hr7US+*AX2YucFw_KgSwhUXeRIJ|7{SxxKd$KIWLuY};jx4`Jt+nx6)5 zWPcdC{a>w}1ymeex28J=F+zX<37X&p4H`Vbo!}0^Ex3E~;SeAM3p5fuxHs;>-KFWq zAy{LL+nnV4{+WNxy?18KT~n(Ut0=0wimKD6_Sxs%&#ON>T6oF$>CNtg``ZNfl9Lc; zW1d)Mab2@iK4a2ii=H}fwk5SY0G})LKsegldue7@Rcy4BP@ zTOupVU@Jt>m?z76GII1iy{}$zhDD_2c2kz{%pSoe5_`((lRmF@V7Ri(Y2!1~ck%hy zaYOFb-3GUTudbF|^85G+ti2#FrNH*cOBg%BsAD8Vt)jT zVB%nLs|*A%fjMTr>CgZlM_>28tNDB+7B|l*=!Y3Z5MhMA1Ka zc;{G;?$@=+jb=Kthm~cnEju?~_|sfaXE@) z=e6}3Z-CcjJlGBsL2Jy3nxd;C|9Bu$n^ol0hib3`f9UCUT~uOd5=sOW1%%?8!kdjf03@q zgewUCPJeXl9U%^0oi3`Cn1k znV>ix6qRpoZ@07j-I)WZ=Kpt3xf%|v>! zRg@^)1@2P-b}DG)gCIXoP$*Pc}Jq48G8`O!bqXD`3}5|69+`o2}7jC;q#LtbJl zGOS5Jpul9YyBM=Cp!P?{jIZc1y@t!H6j*Jk$8TGTouHtTR z25b0@?>SkuoNER3Ibo)T7NlH3JzS%gD(cS6lk||mqSC{?9CE8`T_vTm!_>lmga~og z+Q5Wzpo#@Nmq)MIabzmXIsd8Tv$gvG{qm*7|v(|Jlw*WD=(*vhC zv;E!?d_$aAYs-yi1J3AC#?C(^;g8<4L;n24rMN?G4<0@oyIM}>@}2<}H*;Q>{lFpV z1THs+8#3iboCZy@u0Ll1_EW$#@YN@8qL09I-R48FkNE@xsP9OmZXAf-XSj^WzA|ca zSXZl=r`NtbqMzZ$2Y3s=%Jg{~aqPIn=-{g?YXtPA+xG)eAgDcq zeBAt%qelPgw?)@uWCIwJK|mo@A&^#|RcegZud;An~H*`OF*kmwCKx9 zOt$#x;0UmHy3nDCpU_ockq*VyybF=##e`^xlRzp(X(o^s6}y|R$>)MolpfN}N%tH+ zi-!8>PY#4KvhUC#s0*jf+^_6uY`FIc<678N@4I_m-2s2dk~Q2A40%J9P#ri%WjpGV zng6n?VLn}C>m`5KV_{b{h9*6l_}jH}53O$Be9_FbxI9i;oA6y;QQOFKeB{KeLX!&e zUHO@Tp1qW2&~Mu)n#Q_TfB^QWtAoDOSrd>ju!DU3&|daX+Pvp=krY*~>Pw$JRd^TX zQ-TOcbLp{KHx=R9cR`&%hBvU_;od~By6B|j`fwRd`v40p}emBYJx+tk!l zQ8`_$NDq{39SSl>cSn%&xUBXitY({H_${oj)L0(AROePlumN7sRgz$GFy{J$ce<(# zpw9cwojYu~eE|MAz5y6FWxmHZtW*Em*w_Fz&nhY^T_z4@W@{_h%#oi%Ni#m0c(jB3kCyIZ!`a59&)|r7JBCXt-Ug3<`&KEjj8P9?qf>k}umXw6KQextcdB1K+!=E9&kj--dT8{DYjnzH zX1D7es7X!@nOJJow z=ZLC3G+9T3KuG%eSK;)VABo1kk2VfYntXMh3%U(yHvRTv{AGxWEREt$?XLWD+#4T% zIBwl$OL$W~OruMWr_8e|zRKMhnhw?YUC{=@KEL1PvEXs{Mf}~ncO$1%prsZT78kk4 z%IC0nypIaV67$x*Y`KLA<^!4#=tAVsFuC{cZ^Qe)#2(DfA^_v(U~Qn-+j*N%-)CWE zJL~+r0-t}7=|zP0wv?vDG$5gIzj~#4Ib=x|sjdsgVJz3&z|KuOR_Ol-#=&Y!HMZVn zuu70!1$`T!>Jt+aW0UiHt@Om&a{J`RA6QyiYMt+EVMLdDwL3VbxXU^BUwNO8j*f79 zm8bbKcYhJ!sE4HjT7Z3|hH?-fAnW^FWdS7BMv!{>^w#RC>Gl`8z~Z6iWY8D_>(=XJ z)Vcv}9Djqea*(uG=RSmL5z>S!Ma2fO(=?@`ps!mt^q>1&ADXWd_bj;w0%V zy**AmGq@|lRS(0exzN~!(9x- zj6P-MlE!+KOJ(y+zO?VjxD-Mn$3=5qeKwa(DLn*FA1Mxu$>Cz2gG`eyS+-<)Jm-oP z8fbM8qRVwivzBxM@@9f^=(BIoKW*e4Im`tHcwnHkM7UABWG3U~E7*$b)-q1t{-mbk}fv-0~BN;QbN zZH-GcAdmkSO(*YSxJ`oFyQzB&-62wY8e~q3mn6Vo9YzVDA*_{UM{w;>46M+j?K!`#vqIdE{L0_r`4t% ze8L)_C-h1k+CPq)&)>(Im1<)!tA;j)b>Wv;1+0ni=Zw&K+gtHsnfg|kynW^(!b}9p9k*kCbMH7)nJtM)5MIzpH*zXQAKN*jIr%7|0qML@llxI@~ zpfq0g{^aE^&8KPNcT#BW=a6}CWnMVe;uu+x<2K#uq+&yDxTMTj(mv>jhJ zBb{Y%F~=0cdyLX%<&&fkUrQ3(*%)beDw&Uw0CZf;7H5Q8r?h>3gE87D73PuMxQNPmm6 z%a3IAwW~ozS^&o&$JIVi2;U#^tf;)4otDq6yNtRPfu+GX5v?F#VL6eL3;Toh(W>>z zwUGj~S-_=gZ6KMrg^WlKe(8hA#jGbJYPpX&*KMFOAQ^cF6+!RAOdnGwg*@9{s=l5> zdUtX;t;J`24cp@m;nID>>AO-`T?TGpQ0b)Il7Pis%lN>5(< z;@T;jccL*X?*~hSZ@&1nd@P~$@CMb#yAy-w<3fv$DM@>FX97HP2mFILGj%V$B9iCk zrZR0D1=R_&^aVi$tS9=5)F9$u=NekMpCCr=R`*cZ!jg;Zs&yJk$-8(1!_R<&2y;tO z-GnUuG=Q~l74roJhM$Gd2c6Stk;C?BV-6zi#C*pAm9>;oM`B@7;rJH6^wd4*_19yx z1a#vi!$2o5h8&XupNIR7ZCD=^BhH7BBBZD^fI-`3aha=>-KlorW; zD&>kf$J|tXeX_$yV>PQkN``P*(4G6<6L}ua@YPe!yNhyPFht?YzET-1yP&NnmM}W_ zhchp~zbn;O2n5pSt66obZf{>YZ0_V@KLJ}=TvSq2OdDO-vO0UTUl|YgJ>GT&s-eOF zE*;{7)Yb_G1}4h%7a~C#F;y%UoPBfq4O)*P9*JfuTGdvrx3&sX5V8Gq5CH)~q|lxl zw1fU77g4y6$i>W#aco(h_s#rXfr`dTCfQWMhPHjOg53_j>F|#A_0or4{Y+=0@evr0 zh&tDt?Q90`YR~+zI&r_=D7Yx!=&0qDbGB4#4HU70YNIoP#1l7mtGy%=h=Z&%Yzp9R zCl5TMmS#gDV_qIKAuf@$O+wf37Eb4lyiYD;;XUcuwbCKLPD6~X>9Obao=Gldd6h4? zC+b-E?u=aOi+nv^nb5p>!}ik*6dp=dkGBKA8$mm3G-TB~NGksG-|tvO{q)499FtMc zz^}tIL2K(x7GFR>oUOLMw|B3Qda_8DfsrnW-{O&B2a(Ng`|`m#3!zyb^yF^5qc_$0Ha=2ja0}1#Vf?woZl*l!s7O9z zhoI)1frT_9f~G6K!C`>Yi!1~2#?;@nb%+bxy>M|6QP_nL=9W1ThbVf{k;O%$rm+!; zyc)E*;3wMwvTgl?>BOL`(=WxvJHb(u0DgH({5ETXfIE65(~5#3SJu+K_vNP3nRscJ zgX#SUtRRmw$OQ&*Em93D@sU;HjA+J$hHUiM3Ypg23_+h+(DurG+xlpPbrKm#Z*#oQ zsi;vjuajG6^3pQy8m1Yeke6xiz)x~HcTN4t1>wdnxnEX$UUhHtkxnNve2vC$m)R(0 zV$ptM@;J}ntaXrCwZx)T~ zKYZ((_gC1;+M_xJ9m7PwHphJ-O)mcFp^~q7b&nm+*=LTG-FD&x5l&`N<&}4SS1{vs zDyXQd&-F~8CRS|sW4iN^I;xtAxpwa!D;f%?hlPb%oRDc)T<^_|VL36uI8;Z^cqjQ2aBwvt&Rj~uwI&F}%^Q0IJQ z=C8m0s&KS;M#zTLJdDtJ;d`lH42_ijt`{kK0>8dj@L`*)=|VIeTG#5sA9rKmy*ZEj z-tOHwR=&o}a^54uPHN>;>kmqkx%qcs>5e?z0r=_!3Kr~$K()z|^{#jwVz^jz>ihE7 zCgTAw<@&dRgJwy+R5vy#>ERCp2~6Q{jV~p&l_!2MI^0V}v_~4Hd|yVB5vR#oPLN8e z+T~u%8XVsXb63itiT(QYVH{Ru@RZJ{w{!kqJnL>JX+K8tjk-b`-!21s^Y5@zPY?Mu zONL)SKyLD<3?a+q?j`mmJ}zJV#hu7UagtOaAt8W7N0t8bMAS)QgCI7WrtZ1*UQABE zCq7P;C>{1*s@KM1)FV78_bg3Jd7E!fp9sv>`PKZeiQly`pTMhhUz`F z>awNj`ndnPwvJebm>EY^UA=e3+SIz9ULi0yA9Fq%D}ihm`SBymNqKg(z!Lx|+djrmZT zN@Wf%i;7x|(cVcd)v+hG3MV%H3cspxM)^qSTx`Zzo`aEG*4BtM8E>!?uy|+Om}cZvjji31KCip`fXTyJbAvn-CyHbt z)w>d%Si3*wXM+=_Wh?U<=0fr*^W}MF<6G5a#||}A#1DAn8FPL4H%5w)b!To9@m-(k ziZbcx8PQ4hyZHFY<2w#@6ICv*EWXOwvahnf;4b0?o?<(?xOCK53#1hW2j^|@e$%VX zEBf5Os3B?*n{C=VT#R@kINBEXPJ#tppWMplxA6FN4aK&3|B6aOwpvbJUj1YJc-OZ$ zc9#wxdqg&p^{Eypq*5@&bME|EXf-s!l6+Rc zq;BrQPDzu_E60g5o}2&eAx)|o_nU~?mz(}mMz7(pu*+OYK1<3NX>Y&&og^Jff|(vf z-;TCU+p48>u^!(^VX5ZMYfGY=wn7_qyEYr8-&~jrkNKwcp)U6JFVY{Z6KctT!il3T z1NmB?z78i%I<6xmW_@XMbr?zY~q4b8} zi&bBu!XkT_t;w6(b?(t*fNQWl*JcL~aE&@=o!oUl4A7i>ZWS%P&U2co6XIu) zE3d1Z?3pco0Oaiz3N3&5-cRMR$xIODC4!kRKl=!@r=^O#;cKb8Gwq4>dCaN zKMfSKz0OJXK8q#PcfU$lTkB6($_EEi*x@ZLX`prsP~*YJmUYH!j(Ke); z#rwo@nm6k>-U8ZP7#cum@DiP0XH{vmjFYOa$>!9iRkT6I2abr}~luO)T0ANEuC z)F0GzK2e(dF?^t%U-?uE*6FLPsxC-*(YP3zA4mIfX&(Nn9xtl7TVj5Bbb*36&FPl_ z1D_7)6-99&kl7kF-?Fp8R2<?m|!&iRi z+Ctbz#Pw$f<4so=o5f8Rd6S#oUfya2Iv`XBxQw^nv&rq_{fA-2jZ?elfrZ+ziSY^g zC!zE6^XFIg`seYh)z#GnmzLXsb6g*g6`HC!W$|!@CeO34=W*!wI@(oQ(BLD~H2loX zYkSQ~J1C{_+QxfO&25x;EgRT8nh<8Hj>k$w`Zg!_;B&T@=JMUd3@2VD7+R&}Q>bv7 zaa=v19!|weE?JfHKu1OsU%xIL9&a)8_=S6$uKJFRhnsEX#Q4@&@VsW>@p53jC?pr0tcD*&GK zJwXNS+Ajc{`0M;S85##Srz2uo7Se8d{?xDdoyM$L6hTrMtDnh;y4j$(e8n8)-}xuU z#w#wRQ}j**Tlq9XaLQ4I6TW>~``_9ZmG?YFC^J0EKY7`gLOYOw5c6yYo=xs>O8kKQL%UeL*4`22EFUfDQ4bJxVc03yS$04Esp$$+=p!Na`&Lx&0kY-Y!Q+A?o zGx^l+>M!=7+lY^E`tYjh1(0t2epStr+qcl#IQ9|L*w=I1{_m^5z9#oj&IWyII}v zee*f^`=90SsBQG$56{uTt1 zCh_|e{`YtNb7ueh=Kt=I|G7%s0WhYFVYj6-a2YkP_VY21u3vq@-hfU0zJ!op5+69g z|MkWdK=Y}Ok!E`PmyvVR@LrqU*nTKXr_n2`q{O_VuMTs|9znnd?j+>zvwY*4m6_Ri zjl#?xe25|P{4w~_`A0rT26i9uz1yR5>%Rm9tQST+9W-Z|iTeA(Mg`%RIoRx4xSXNfDy4uT1 zEhd~jw|Y5kuJ0x&4l7u_gI^X4qHZRAS$eOXecC>@lA4hQu8I<1eqSrRXsD&4;?9Yh zjnHzAjqdZF`Q}O0`Y7H0ZM#X$RLGzh2hE=(f{hIDgRhsQ;v&L&sp<`!$U3l~*rfdX zsPi4VC;Ng3REdFz961;;a2}e84JP3WM4!axPjo zX-63uOT7-lARl>_J|9L)sXm^Kun$6kaQA7Ei>$6|B zJB%f={3h{wx((Ox!JTXMNbIK4B4PLKBv{ec7UTV_f%|i>NVMy0RI5L2O_B34lCzy& z%aQ|O*-Hn7^-9a4cdHzXt${3D#cJw_3@&7!l`F>cs^%;&lcJ-`?b~RSe@OIx1N!jd zVy}$S(mm9(Q3L|<;na&_gmc}d|L`V8%pJUVXAK`NS}9F+Lq)f%p1q7*_~q;^Ka6l& zn_)8@zJ`S^!eddv1&ZSlZtiWKt8(E*T(%EAd!PL<<*jviS2V+l2^osFrLPti5*RRB zKj|YbQy=wuuU6CqKTSO3oTlZZo@SB@|IjG8%p<%-EGrrwdq?%zy|X~xqN%qy9I=!i z99JiS;l;tzi1M)C1EY^ksULADwS4!*#pSe0-d{nlwGMYc<8wOfl%>hOdAr)?pt~WS zVPC!9cc8Ncp~8l=MbjxTroL}z&V`QH;LKjRVI#cuX z)N@dM?F+yE$ntX5(7g2S;rp-9Xk{&}vjvHrbL?XYtJ#J6q`(T6u?-@bn90^#*-*j_ zDD>;V4)Ho3mvbM5a<^S^Db%njR0i&RJ+{VRB|{A5z@aqd?Vg3AXTV?>-Ou~z+=GKT zpLr?@eL0n;cr(TP4av!8S|;hS_UXL(2Lu85x#w7CG(oS*U&GRAR83SplG19V*j{Jy zo1_}=YNWx368n~7EhKa(P}Jp&N^(b1RA28#Ti=2-Cj;+=z4rK&CWq4=@XQ+`f)pmA zYo}yrSMMlT^|-}=d>-1BMk8Y~%Gdm#u=KZn5z@reF%+j%HVP98ga&(=$!YDVh%NdIGb@DL^FMNyC2E`qR#mZ32*{DOx3&f`XN>9goT z&{u)@2nDZdDyUGQt9Wpa)N{^TM$AwVdv#fl)fPunfY#L{%b|Cb$xL}LPvy{|hTmLe zev$fEnhJMg{;d;gwO^Z^#bm>Y7gT6XJDMsa@Fwu48ZSp(d&cl4=w)j2fs%--4@i)L zh3RFLRmDd}KCSwd62MOu$@`2vDmAsq2p*BIUc7$DtH4qWOhf$x0~Vib6N+(~Xn8At zF~bxf6!*MS5Qg|}JM5^DWHY`sE>I$WF5$6Q@STpiGq88{agE9W-))i0(Vb(;d875x(FX;FPiMS5y-coQT$QDE?*VWY= zb?;t?z|~pc9CwPKv+2>Sd4O;fFoyzmh~>F!g69{?e0+5_g+VRvD+bU8iK|N(u`<%q z>P2pH&4hjWtojSz=@^*cb*^*k$~i)smmy+WYpnsFUDX3v~$yq58L2~)4EmB{%)NA&{E-Ae#MQu)0l!TxzgYP~U4kzbz0$Ew3sVzy5 zB-c|c*YIy|L~Yp2xT2k3FHI%Q=ibxIdE&x$BVIu?t1^}xb>&gv+U$qtai}phHR5$? zvv8I$$WigDuwY16^>oc7g;GnsYvEo3qrA6RO4o2sBSj=b49qlYRvckX`DOlM>s5fC zp!l)doc9Nk|8{PUazjE(pc+79oY#Lpq*)XtEi(>=6RelHddZ5be`F! zVHOU&An6#`89a^{=iWAkV`1&t-(%4C-fqA~lCu3Q-f68n>@-}tUk9HViX34k=YAUg z){lbmIv5z}K4K|mlwoc}#&2s`#0f998aQsZ%QMn!UaTy=Xrs~Z2 z8^89^aLnpV@K(r=#$6@n)mLlw4|rUj%7pzHSsCc)=;peW>2iwy6}%kxJCr-PQc=s`>mzU7@N=*vM_PzplHquy?&9x@+wwN>zq`Wpq)mAG!V7=|zZbnR}7&`mFAe7D0{w+s}HJ$d!8O+vt!TM zHs^l6FG~tiU>p=wj7u;!VsJQ0{qeF+$DO)e;Fk?^nPdaOGtTuUeIH%G>LjubrlWtQ zX--QbVCsb3dS2_ixxM8NO46h@AH2z+kq|gXJ0`c+GV3;YFo859L$7|sH`YeyfStwz zWL&>C(rZ)Sn-EzjM6jtoRX7oNm2NwcwMumxi>Px&BU{jVYPRb;kNuiUnI?&T`~p+p z>H~EJUFHFe*L$xh799hk|62W-3XgR;eHz~e#lKRZ$QxDZ{8{Yi`jo@EPhhOhd^U%O z;Q(4`nRZ@s6!zq;GQ$8_pU?@DUpE_(mhlzN*SAK&k~sXj31r&tsK*k|6>lpPv-A}- z#%z%^Hg^n=nc@5%0oiqes!!3esi~=Q(UI1G2eMVqXi`#AK&bsS`fD9jG96Xl%5&*; zR@OBGQbb7NKypJVVK2E9UWxdQ7qM!uS$Ve=nM19|lIRL5i{CIL3NJu>k%n5;AMM=UX( zcUQ>7{XVrA=k*k+ET`D-J}B%G$g+Aj9f}_z(yB>vp)f46)DhrYj)E_B%71f(OUVlo z;NY~Fme&y@t*opDvr~XQ2B8)^i>R?JOPyy2`sdHg6zZ>u6ytRFlF<4B!{dltCh@7ySf>PwUgxiyTJf@ zUfzJ?Xcg{b-AVK~Ds}FCrB)Q%f9691*{povLc3TE6vlY?=s@{@BX=_vaMh**DD0o7^}YO11Lc z*-iVqd)F+9ReH`g{0MR9;HO#O)_O1hOc~$0;T`6)Q!_<(Z5LHoB9|A?dmK~ez{6FE zm51$eIVrYrGK&r>QhQQgwwK{s;oBRcfdW7|aN<5n; zNyz5AO$LqJQkYIoE>7K9WRat$T>78i;NDv5rx47ydtQqxtl6wX{gh)X>`|w&OMKB{m2ao&Q!|B zSkB)ZZ@PpnjCCsmy-`)QR2Exr2(29DbB@HoqvX$9GXg?uQ&aQHTM{;GImd(zXyp+Y z3-+ZU6(!Cc_cNK>i!TNQMUuTe)XFgz*%Wds6MudG@hCozXq+`Dpr{y)^gQ@|5DX9U6!lSnH z(AZw;kwklT((jque*=*sF6;*73-4sEn9XC7MOsILYlSJ78NlV#+GrH`~ z3n=Yaj2`TuTCgQGn0m#C6?!wJ z#_OXXF4woJs-nxQC{sNR&D{DH7M~3|zX{y|=7EP8l(ds&CJv5{+uJ7J_|IV(6PAsF z`{=Bzl%pxW(&qmGp6*oHY5$VSAn0>U%Go;+`;Yhn| zm?d!LDkvy0YfuKEOuv}~1(<<$J4U$TKY3!wkw_MF%V))(U4uU_RMy(I}3^o0zqs9DaN1Y$I@= zztoo1c^`IXdiNg4z&THjBoeEfoo#IN8Ev%6lo>36UwhCRIYTx4x$Kpu$tUSEvoWh3 zXSED#AGQ&8N`JECuKpnF%V~ABw6tWs7vaug%R)>%066-LC^}p`TzIiD9JalBV7a!o z*4ardTG20y&{4`YUhcvHo{1mKs|9f?#WPO#f>QJ012GF*Te6~r?98w@4i1hjkR&PR zgx0>!N>$idA(N*1+Wftl6h5QrcEa-`;F**cWt#;@zh4wDnA$}+m0 zF&B|ASRvV$%(DE!E(6SPtHXy8`tN6q{H3{FIQq@mg^Z zseah`$uE+{v#h#$s-BgdJ*;yn!E6Bpu61JwqtYu60~4z@rZ`QzA~ju7whz)~_{J0x z61mzAXN@4g0^QAesn9QlJ|;mJnj;e~L^9MG%tw}Bvzx;j<_t6xsv=l8xg4Cb7}0}x zkG^CSbqi2B5x1heeEo|=NILq)VwMRyF13>CHP!W3a7(AeGg-e5(;G+RS}9o>yO0Q! zr!8t3Y_SBICSL|iCaL(RSwkSZRWbGpfBngcA0BQ1SFq}-*C8rpi+#&>4;8i3vojU7m&ZGBA)DFiZ>-my`AZ&kpQs2k^G^u8|$i~LG(s^(1UJXx8KGHqkjE~E4^)s-sXoM|> zCzP9l`PGd?Cwlr^sSqh_K{O<+eZ$_ZO1vtfJ#ka4N{jf{gnXLY*~swQqPCSqV&})u zXD-)q)z_D9^K+jXf2-UGL}V;feLBXK9ZC;=Hvjr7v=_(>+oMwnc4zzx1b#nyDFVLOM{3Uz4tpFI)R>h z^{3*=-o1U35KyED=RlsuG>Ed0%x#z4u=Lk%8l{6*Sshcd# z93M%9U*Xo>Fya!ODR{7@Ol&+-U44?w=i=^GEp#$;i=y;3jj6Vo9Qv$D4_;w#&>fv~ zw8QuP_nfUu-w~6*YXT6A4ho(-9b_}T(l%1b^GIm;Fr%xyQ{#%$QHjvIpC|1j)X2~y z^dV+)7pi*DO@Ca>SKES4E9cEAPGQNV-zoWPY62C`qfnXpF{ItNf%WpbF${P^U6=~d z?nuxe!i30Yn}O~Xn03Stl{`+<&2d+-YOTx9-%K1-w?)=F@1%HFRyyBIdoTK?nKobO zXn%ieIwIs}I>`1oO%5QWLb~Bq+mG*_pvcH<3(wSdf6eO!{^|c~>-C)SD;Lyyn52t~ zimUl5!hEuo_}cwko4$U2-@FH(|OCWkdK*1z5t|oZYrz18xeYRi@%-_pFrRM?O5DU z%ErNQd!fNsn;2qKylW#zVLq2_d)M*r`PSySjseoTWrx!#`9Q-PTEvjMBJkg{kdTk4 zlUxHOWhEB9dZ*>C;Wd4?i0+@!4i#Mg%zJ`eNX>0*u0TGM8h6hRTi`J#4=&bU6Bv*` z0pNVxGj`L8c9JOwg!PO4>m+MF7)3qyj|q&AP26a)|2e<=XWu`^gZv5S|6YCmXEN~5 z`o}-wyuUa9Uw4MoHjCqdd2Knfwv{v-FbRzW*rq=d0{`I$yq$cReY!Mp<_6@UzvpMDL-+5uoM;ES z5}Ai&PGqp@Izu4yG=UR08a;zAAQ0bj84OUO*G%du(cB?`4de*>K#l;mfTlQu@y`|f z|Di*GaCe}}I&x1M%z^mf@8uUo)Dxm-;aos8{P*&=vir~a|5J1N-qfNEKe9f<0I2wX n7Jo934*h%iFXR%JsOua3U78Fb8|-nvca##77cCNg|JVNjX(rL5 diff --git a/docs/HERMES.md b/docs/HERMES.md index fbad5d939..3a48eb392 100644 --- a/docs/HERMES.md +++ b/docs/HERMES.md @@ -1,38 +1,65 @@ ## Hermes Agent Overlay -Hermes Agent supports parallel execution via worktree isolation (`hermes -w`). -Use separate worktree sessions for parallel review and triage work. +Hermes has built-in parallel subagent support via `delegate_task` (up to 3 +concurrent children). Use `delegate_task(tasks=[...])` for subjective review +batches and per-stage triage support; avoid the older worktree-based guidance +here. ### Review workflow -1. Run `desloppify review --prepare` to generate `query.json` and `.desloppify/review_packet_blind.json`. -2. Split dimensions into 3-4 batches by theme (e.g., naming + clarity, - abstraction + error consistency, testing + coverage). -3. Launch parallel Hermes sessions with worktree isolation, one per batch: +1. Prepare review prompts and the blind packet: + ```bash + desloppify review --run-batches --dry-run ``` - hermes -w -q "Score these dimensions: . Read .desloppify/review_packet_blind.json for the blind packet. Score from code evidence only." + This generates one prompt file per batch in + `.desloppify/subagents/runs//prompts/` and prints the run directory. + +2. Launch Hermes subagents in batches of 3 with `delegate_task(tasks=[...])`. + Each subagent should: + - read its prompt file at + `.desloppify/subagents/runs//prompts/batch-N.md` + - read `.desloppify/review_packet_blind.json` + - inspect the repository from the prompt's dimension + - write ONLY valid JSON to + `.desloppify/subagents/runs//results/batch-N.raw.txt` + + Example task payload: + ```json + { + "goal": "Review batch N. Read the prompt at .desloppify/subagents/runs//prompts/batch-N.md, follow it exactly, inspect the repository, and write ONLY valid JSON to .desloppify/subagents/runs//results/batch-N.raw.txt.", + "context": "Repository root: . Blind packet: .desloppify/review_packet_blind.json. The prompt file defines the required output schema. Do not edit repository source files. Only write the review result file.", + "toolsets": ["terminal", "file"] + } ``` -4. Each session writes output to a separate file. Merge assessments - (average overlapping dimension scores) and concatenate findings. -5. Import: `desloppify review --import merged.json --manual-override --attest "Hermes agents ran blind reviews against review_packet_blind.json" --scan-after-import`. -Each session must consume `.desloppify/review_packet_blind.json` (not full -`query.json`) to avoid score anchoring. + Repeat for batches 1-3, 4-6, 7-9, etc. Wait for each group of 3 to finish + before launching the next group. -### Triage workflow +3. After all prompt files for that run have matching results, import them: + ```bash + desloppify review --import-run .desloppify/subagents/runs/ --scan-after-import + ``` -Orchestrate triage with per-stage Hermes sessions: +### Key constraints + +- `delegate_task` supports at most 3 concurrent children at a time. +- Subagents do not inherit parent context; the prompt file and blind packet must + provide everything needed. +- Subagents cannot call `delegate_task`, `clarify`, `memory`, or `send_message`. +- The importer expects `results/batch-N.raw.txt` files, not `.json` filenames. +- The blind packet intentionally omits score history to prevent anchoring bias. + +### Triage workflow -1. For each stage (observe → reflect → organize → enrich → sense-check): - - Get prompt: `desloppify plan triage --stage-prompt ` - - Launch a Hermes session with that prompt: `hermes -w -q ""` - - Verify: `desloppify plan triage` (check dashboard) - - Confirm: `desloppify plan triage --confirm --attestation "..."` -2. Complete: `desloppify plan triage --complete --strategy "..." --attestation "..."` +Run triage stages sequentially. For each stage: -Run stages sequentially. Within observe and sense-check, use parallel -worktree sessions (`hermes -w`) for per-dimension-group and per-cluster -batches respectively. +1. Get the stage prompt or use the command suggested by `desloppify next`. +2. If the stage benefits from parallel review work, use `delegate_task(tasks=[...])` + in groups of 3; otherwise run the stage directly in the parent session. +3. Record the stage output with `desloppify plan triage --stage --report "..."` + or the corresponding `--run-stages --runner ...` command when available. +4. Confirm with `desloppify plan triage --confirm --attestation "..."`. +5. Finish with `desloppify plan triage --complete --strategy "..." --attestation "..."`. diff --git a/docs/SKILL.md b/docs/SKILL.md index 704fe88f1..6fc23ef9c 100644 --- a/docs/SKILL.md +++ b/docs/SKILL.md @@ -10,7 +10,7 @@ allowed-tools: Bash(desloppify *) --- - + # Desloppify @@ -203,6 +203,20 @@ desloppify config set commit_tracking_enabled false # disable guidance After resolving findings as `fixed`, the tool shows uncommitted work, committed history, and a suggested commit message. After committing externally, run `record` to move findings from uncommitted to committed and auto-update the linked PR description. +### Agent directives + +Directives are messages shown to agents at lifecycle phase transitions — use them to switch models, set constraints, or give context-specific instructions. + +```bash +desloppify directives # show all configured directives +desloppify directives set execute "Switch to claude-sonnet-4-6. Focus on speed." +desloppify directives set triage "Switch to claude-opus-4-6. Read carefully." +desloppify directives set review "Use blind packet. Do not anchor on previous scores." +desloppify directives unset execute # remove a directive +``` + +Available phases: `execute`, `review`, `triage`, `workflow`, `scan` (and fine-grained variants like `review_initial`, `triage_postflight`, etc.). + ### Quick reference ```bash diff --git a/website/index.html b/website/index.html index 3be291215..4cca5c1e3 100644 --- a/website/index.html +++ b/website/index.html @@ -114,6 +114,8 @@

Experimental Token

Our first initiative: a $1,000 bounty to find something poorly engineered in our 91k-line AI-built codebase. 262 comments. Winner:
@agustif. + Now live: $1,000 + if Desloppify does something stupid when refactoring your codebase.

Read the full token page → diff --git a/website/token.css b/website/token.css index d959b2f16..5b3c926a7 100644 --- a/website/token.css +++ b/website/token.css @@ -141,11 +141,16 @@ body #details { color: #fff; } -.status-soon { +.status-active { background: var(--amber); color: #fff; } +.status-soon { + background: #999; + color: #fff; +} + .initiative h3 { font-family: var(--font-hand); font-size: 1.6rem; diff --git a/website/token.html b/website/token.html index 55b6726e8..f1a72b4df 100644 --- a/website/token.html +++ b/website/token.html @@ -114,17 +114,36 @@

$1,000 Bounty: Find Something Poorly Engineered in This ~91k LOC Codebase -
+
Initiative #2 - Coming Soon + Active
-

More details coming soon

+

$1,000 if Desloppify Does Something Stupid When Refactoring Your Codebase

+

+ Desloppify is an agent harness that refactors and improves code quality. We're + reasonably confident it generally improves codebases — but we want to surface + the cases where it genuinely makes something worse. +

- The next challenge will change the format — less time-based, more focused on - who gets the objectively best response. Follow along on - Discord or - @peterom on X. + If Desloppify refactors your codebase and makes a demonstrably stupid decision — + an abstraction that degrades readability, a change that introduces fragility, a + refactor that makes the code harder to maintain — share your evidence and you + could claim $1,000. +

+
+

Requirements

+

+ Codebase must be 10k+ lines. Run with Claude 4.5/Sonnet 4.6 or GPT-5.3/5.4. + Share your logs and code evidence. One submission per person. + Deadline: March 21st 23:59:59 UTC. +

+

$1,000 in SOL to the winner

+
+
From 0f41dca3164f63ca5ba6f16f6dea544ec28d1893 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 02:22:00 +0100 Subject: [PATCH 11/43] fix(r): correct shell quote escaping in lintr command (PR #424) Co-Authored-By: Maximilian Scholz --- desloppify/languages/r/__init__.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/desloppify/languages/r/__init__.py b/desloppify/languages/r/__init__.py index 39677c7cb..c89716eff 100644 --- a/desloppify/languages/r/__init__.py +++ b/desloppify/languages/r/__init__.py @@ -10,9 +10,7 @@ { "label": "lintr", "cmd": ( - 'Rscript -e \'cat(paste(capture.output(' - 'lintr::lint_dir(".", show_notifications=FALSE)' - '), collapse="\\n"))\'' + "Rscript -e \"lintr::lint_dir('.')\"" ), "fmt": "gnu", "id": "lintr_lint", From 55f9aaf0b8f9797fe74eb783fd912f787e8576ce Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 02:22:03 +0100 Subject: [PATCH 12/43] feat(r): add Jarl as fast R linter with autofix (PR #425) Co-Authored-By: Maximilian Scholz --- desloppify/languages/r/__init__.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/desloppify/languages/r/__init__.py b/desloppify/languages/r/__init__.py index c89716eff..3a28cf1ed 100644 --- a/desloppify/languages/r/__init__.py +++ b/desloppify/languages/r/__init__.py @@ -1,4 +1,4 @@ -"""R language plugin — lintr + tree-sitter.""" +"""R language plugin — Jarl, lintr + tree-sitter.""" from desloppify.languages._framework.generic_support.core import generic_lang from desloppify.languages._framework.treesitter import R_SPEC @@ -7,6 +7,14 @@ name="r", extensions=[".R", ".r"], tools=[ + { + "label": "jarl", + "cmd": "jarl check .", + "fmt": "gnu", + "id": "jarl_lint", + "tier": 2, + "fix_cmd": "jarl check . --fix --allow-dirty", + }, { "label": "lintr", "cmd": ( @@ -14,7 +22,7 @@ ), "fmt": "gnu", "id": "lintr_lint", - "tier": 2, + "tier": 3, "fix_cmd": None, }, ], From 6dd58a10789ba4df1f821e0c5d476f34c314123b Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 02:22:10 +0100 Subject: [PATCH 13/43] fix: phpstan stderr/JSON parser fixes (PR #420) Co-Authored-By: Nick Perkins --- .../_framework/generic_parts/parsers.py | 21 ++++ .../_framework/generic_parts/tool_runner.py | 15 ++- desloppify/languages/php/__init__.py | 2 +- .../tests/lang/common/test_generic_plugin.py | 105 +++++++++++++++++- 4 files changed, 137 insertions(+), 6 deletions(-) diff --git a/desloppify/languages/_framework/generic_parts/parsers.py b/desloppify/languages/_framework/generic_parts/parsers.py index 271f764a2..1b4db2a5e 100644 --- a/desloppify/languages/_framework/generic_parts/parsers.py +++ b/desloppify/languages/_framework/generic_parts/parsers.py @@ -181,11 +181,31 @@ def parse_eslint(output: str, scan_path: Path) -> list[dict]: return entries +def parse_phpstan(output: str, scan_path: Path) -> list[dict]: + """Parse PHPStan JSON: ``{"files": {"": {"messages": [{"message": "...", "line": 42}]}}}``.""" + del scan_path + entries: list[dict] = [] + data = _load_json_output(output, parser_name="phpstan") + files = data.get("files") if isinstance(data, dict) else {} + for filepath, fdata in (files or {}).items(): + if not isinstance(fdata, dict): + continue + for msg in fdata.get("messages") or []: + if not isinstance(msg, dict): + continue + line = _coerce_line(msg.get("line", 0)) + message = msg.get("message", "") + if filepath and message and line is not None: + entries.append({"file": str(filepath), "line": line, "message": str(message)}) + return entries + + PARSERS: dict[str, Callable[[str, Path], list[dict]]] = { "gnu": parse_gnu, "golangci": parse_golangci, "json": parse_json, "credo": parse_credo, + "phpstan": parse_phpstan, "rubocop": parse_rubocop, "cargo": parse_cargo, "eslint": parse_eslint, @@ -201,5 +221,6 @@ def parse_eslint(output: str, scan_path: Path) -> list[dict]: "parse_gnu", "parse_golangci", "parse_json", + "parse_phpstan", "parse_rubocop", ] diff --git a/desloppify/languages/_framework/generic_parts/tool_runner.py b/desloppify/languages/_framework/generic_parts/tool_runner.py index 90508e3db..e8c1d1936 100644 --- a/desloppify/languages/_framework/generic_parts/tool_runner.py +++ b/desloppify/languages/_framework/generic_parts/tool_runner.py @@ -91,8 +91,15 @@ def run_tool_result( error_kind="tool_timeout", message=str(exc), ) - output = (result.stdout or "") + (result.stderr or "") - if not output.strip(): + stdout = result.stdout or "" + stderr = result.stderr or "" + # Parse stdout when it has content (structured JSON tools always write + # there). Fall back to combined stdout+stderr only when stdout is empty, + # so that tools which emit diagnostics to stderr don't corrupt the JSON + # parse input while still being treated as "no output" when truly silent. + parse_input = stdout if stdout.strip() else (stdout + stderr) + combined = stdout + stderr + if not combined.strip(): if result.returncode not in (0, None): return ToolRunResult( entries=[], @@ -107,7 +114,7 @@ def run_tool_result( returncode=result.returncode, ) try: - parsed = parser(output, path) + parsed = parser(parse_input, path) except ToolParserError as exc: logger.debug("Parser decode error for tool output: %s", exc) return ToolRunResult( @@ -136,7 +143,7 @@ def run_tool_result( ) if not parsed: if result.returncode not in (0, None): - preview = _output_preview(output) + preview = _output_preview(combined) return ToolRunResult( entries=[], status="error", diff --git a/desloppify/languages/php/__init__.py b/desloppify/languages/php/__init__.py index 08031ac17..7b6edda24 100644 --- a/desloppify/languages/php/__init__.py +++ b/desloppify/languages/php/__init__.py @@ -73,7 +73,7 @@ { "label": "phpstan", "cmd": "phpstan analyse --error-format=json --no-progress", - "fmt": "json", + "fmt": "phpstan", "id": "phpstan_error", "tier": 2, "fix_cmd": None, diff --git a/desloppify/tests/lang/common/test_generic_plugin.py b/desloppify/tests/lang/common/test_generic_plugin.py index 8958d65ee..f4eeb667c 100644 --- a/desloppify/tests/lang/common/test_generic_plugin.py +++ b/desloppify/tests/lang/common/test_generic_plugin.py @@ -22,7 +22,7 @@ parse_json, parse_rubocop, ) -from desloppify.languages._framework.generic_parts.parsers import ToolParserError +from desloppify.languages._framework.generic_parts.parsers import ToolParserError, parse_phpstan from desloppify.languages._framework.generic_parts.tool_runner import ( resolve_command_argv, run_tool_result, @@ -233,6 +233,71 @@ def test_invalid_json(self): parse_eslint("not json", Path(".")) +class TestParsePhpstan: + def test_extracts_messages(self): + data = { + "totals": {"errors": 2}, + "files": { + "/app/src/Foo.php": { + "messages": [ + {"message": "Call to undefined method Bar::baz().", "line": 10}, + {"message": "Variable $x might not be defined.", "line": 25}, + ] + }, + "/app/src/Bar.php": { + "messages": [ + {"message": "Parameter #1 expects int, string given.", "line": 5}, + ] + }, + }, + } + entries = parse_phpstan(json.dumps(data), Path(".")) + assert len(entries) == 3 + assert entries[0] == { + "file": "/app/src/Foo.php", + "line": 10, + "message": "Call to undefined method Bar::baz().", + } + assert entries[2] == { + "file": "/app/src/Bar.php", + "line": 5, + "message": "Parameter #1 expects int, string given.", + } + + def test_empty_files(self): + assert parse_phpstan(json.dumps({"files": {}}), Path(".")) == [] + + def test_empty_messages_list(self): + data = {"files": {"/app/src/Foo.php": {"messages": []}}} + assert parse_phpstan(json.dumps(data), Path(".")) == [] + + def test_skips_non_dict_messages(self): + data = { + "files": { + "/app/src/Foo.php": { + "messages": [ + "unexpected string", + {"message": "Real error", "line": 7}, + ] + } + } + } + entries = parse_phpstan(json.dumps(data), Path(".")) + assert len(entries) == 1 + assert entries[0]["message"] == "Real error" + + def test_skips_non_dict_file_values(self): + data = {"files": {"/app/src/Foo.php": "not a dict"}} + assert parse_phpstan(json.dumps(data), Path(".")) == [] + + def test_invalid_json(self): + with pytest.raises(ToolParserError): + parse_phpstan("not json", Path(".")) + + def test_non_dict_root(self): + assert parse_phpstan(json.dumps([1, 2, 3]), Path(".")) == [] + + # ── make_tool_phase tests ───────────────────────────────── @@ -360,6 +425,44 @@ def test_run_tool_result_parser_decode_error_is_error(self, tmp_path): assert failed_result.status == "error" assert failed_result.error_kind == "parser_error" + def test_run_tool_result_parses_stdout_ignoring_stderr_preamble(self, tmp_path): + """stdout JSON should parse successfully even when stderr has non-JSON diagnostics.""" + valid_json = json.dumps([{"file": "a.php", "line": 1, "message": "err"}]) + result_with_stderr_noise = subprocess.CompletedProcess( + args="fake", + returncode=1, + stdout=valid_json, + stderr="Note: Using configuration file /app/phpstan.neon.dist.\n", + ) + result = run_tool_result( + "fake", + tmp_path, + parse_json, + run_subprocess=lambda *_a, **_k: result_with_stderr_noise, + ) + assert result.status == "ok" + assert len(result.entries) == 1 + + def test_run_tool_result_error_preview_uses_combined_output(self, tmp_path): + """Error preview in tool_failed_unparsed_output message includes both stdout and stderr.""" + result_bad = subprocess.CompletedProcess( + args="fake", + returncode=2, + stdout='{"not": "an array"}', + stderr="Note: some diagnostic from stderr\n", + ) + result = run_tool_result( + "fake", + tmp_path, + parse_json, + run_subprocess=lambda *_a, **_k: result_bad, + ) + assert result.status == "error" + assert result.error_kind == "tool_failed_unparsed_output" + assert result.message is not None + assert "not" in result.message # from stdout + assert "diagnostic from stderr" in result.message # from stderr + def test_resolve_command_argv_plain_command_does_not_shell_fallback(self): argv = resolve_command_argv("nonexistent_tool_xyz_123 --version") assert argv == ["nonexistent_tool_xyz_123", "--version"] From 0f00b4313984b367739d159014cb7025beb94d87 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 02:22:39 +0100 Subject: [PATCH 14/43] fix(engine): prevent workflow::create-plan re-injection after resolution (PR #435) Co-Authored-By: Charles Dunda --- .../app/commands/review/importing/plan_sync.py | 6 +++++- desloppify/app/commands/scan/plan_reconcile.py | 9 ++++++++- desloppify/engine/_plan/sync/workflow.py | 18 ++++++++++++++++++ 3 files changed, 31 insertions(+), 2 deletions(-) diff --git a/desloppify/app/commands/review/importing/plan_sync.py b/desloppify/app/commands/review/importing/plan_sync.py index 8b8e68dd7..bf71e3fb6 100644 --- a/desloppify/app/commands/review/importing/plan_sync.py +++ b/desloppify/app/commands/review/importing/plan_sync.py @@ -28,7 +28,10 @@ reconcile_plan, ) from desloppify.engine._plan.sync.workflow_gates import sync_import_scores_needed -from desloppify.engine._plan.sync.workflow import clear_score_communicated_sentinel +from desloppify.engine._plan.sync.workflow import ( + clear_create_plan_sentinel, + clear_score_communicated_sentinel, +) from desloppify.engine._plan.refresh_lifecycle import mark_subjective_review_completed from desloppify.engine.plan_triage import ( TRIAGE_CMD_RUN_STAGES_CLAUDE, @@ -247,6 +250,7 @@ def _apply_import_plan_transitions( ) if trusted: clear_score_communicated_sentinel(plan) + clear_create_plan_sentinel(plan) if sync_inputs.covered_ids: mark_subjective_review_completed( plan, diff --git a/desloppify/app/commands/scan/plan_reconcile.py b/desloppify/app/commands/scan/plan_reconcile.py index 4955ce629..17be193a1 100644 --- a/desloppify/app/commands/scan/plan_reconcile.py +++ b/desloppify/app/commands/scan/plan_reconcile.py @@ -28,7 +28,10 @@ ) from desloppify.engine._plan.sync.dimensions import current_unscored_ids from desloppify.engine._plan.sync.context import is_mid_cycle -from desloppify.engine._plan.sync.workflow import clear_score_communicated_sentinel +from desloppify.engine._plan.sync.workflow import ( + clear_create_plan_sentinel, + clear_score_communicated_sentinel, +) from desloppify.engine.work_queue import build_deferred_disposition_item logger = logging.getLogger(__name__) @@ -48,6 +51,8 @@ def _reset_cycle_for_force_rescan(plan: dict[str, object]) -> bool: for item in synthetic: order.remove(item) clear_score_communicated_sentinel(plan) + clear_create_plan_sentinel(plan) + plan.pop("scan_count_at_plan_start", None) meta = plan.get("epic_triage_meta", {}) if isinstance(meta, dict): meta.pop("triage_recommended", None) @@ -103,6 +108,7 @@ def _seed_plan_start_scores(plan: dict[str, object], state: state_mod.StateModel "verified": scores.verified, } clear_score_communicated_sentinel(plan) + clear_create_plan_sentinel(plan) plan["scan_count_at_plan_start"] = int(state.get("scan_count", 0) or 0) return True @@ -149,6 +155,7 @@ def _clear_plan_start_scores_if_queue_empty( state["_plan_start_scores_for_reveal"] = dict(plan["plan_start_scores"]) plan["plan_start_scores"] = {} clear_score_communicated_sentinel(plan) + clear_create_plan_sentinel(plan) return True diff --git a/desloppify/engine/_plan/sync/workflow.py b/desloppify/engine/_plan/sync/workflow.py index d3894f299..480a5553a 100644 --- a/desloppify/engine/_plan/sync/workflow.py +++ b/desloppify/engine/_plan/sync/workflow.py @@ -303,6 +303,16 @@ def clear_score_communicated_sentinel(plan: PlanModel) -> None: plan.pop("previous_plan_start_scores", None) +def clear_create_plan_sentinel(plan: PlanModel) -> None: + """Clear the ``create_plan_resolved_this_cycle`` sentinel. + + Call this at the same cycle-boundary points as + ``clear_score_communicated_sentinel`` so that ``sync_create_plan_needed`` + can re-inject ``workflow::create-plan`` in the next cycle. + """ + plan.pop("create_plan_resolved_this_cycle", None) + + _EMPTY = QueueSyncResult @@ -343,6 +353,7 @@ def sync_create_plan_needed( - At least one objective issue exists - ``workflow::create-plan`` is not already in the queue - No triage stages are pending + - ``workflow::create-plan`` has not already been resolved this cycle Front-loads it into the workflow prefix so it stays ahead of triage. """ @@ -351,6 +362,11 @@ def sync_create_plan_needed( if WORKFLOW_CREATE_PLAN_ID in order: return _EMPTY() + # Already resolved this cycle — sentinel is set when injected and + # cleared at cycle boundaries (force-rescan, score seeding, queue + # drain, trusted import). + if plan.get("create_plan_resolved_this_cycle"): + return _EMPTY() if any(sid in order for sid in TRIAGE_IDS): return _EMPTY() if not _subjective_review_current_for_cycle(plan, state, policy=policy): @@ -359,6 +375,7 @@ def sync_create_plan_needed( if not has_objective_backlog(state, policy): return _EMPTY() + plan["create_plan_resolved_this_cycle"] = True return _inject(plan, WORKFLOW_CREATE_PLAN_ID) @@ -503,6 +520,7 @@ def _rebaseline_plan_start_scores( __all__ = [ "PendingImportScoresMeta", "ScoreSnapshot", + "clear_create_plan_sentinel", "clear_score_communicated_sentinel", "import_scores_meta_matches", "pending_import_scores_meta", From 0104241737ec67562b32ea7e760e91d4d02ce740 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 02:22:45 +0100 Subject: [PATCH 15/43] feat: add SCSS language plugin (PR #428) Co-Authored-By: Klaus Agnoletti --- desloppify/languages/scss/__init__.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100644 desloppify/languages/scss/__init__.py diff --git a/desloppify/languages/scss/__init__.py b/desloppify/languages/scss/__init__.py new file mode 100644 index 000000000..ce0e377ac --- /dev/null +++ b/desloppify/languages/scss/__init__.py @@ -0,0 +1,25 @@ +"""SCSS language plugin -- stylelint.""" + +from desloppify.languages._framework.generic_support.core import generic_lang + +generic_lang( + name="scss", + extensions=[".scss", ".sass"], + tools=[ + { + "label": "stylelint", + "cmd": "stylelint {file_path} --formatter json --max-warnings 1000", + "fmt": "json", + "id": "stylelint_issue", + "tier": 2, + "fix_cmd": "stylelint --fix {file_path}", + }, + ], + exclude=["node_modules", "_output", ".quarto", "vendor"], + detect_markers=["_scss", ".stylelintrc"], + treesitter_spec=None, +) + +__all__ = [ + "generic_lang", +] From ba9f1907f72e35ab29686324d5f9af62404d426b Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 02:22:49 +0100 Subject: [PATCH 16/43] fix: Rust dep graph hangs from string-literal fake imports (PR #429) Co-Authored-By: Riccardo Spagni --- desloppify/languages/rust/detectors/deps.py | 4 + desloppify/languages/rust/support.py | 279 +++++++++++++++--- .../languages/rust/tests/test_support.py | 30 ++ 3 files changed, 277 insertions(+), 36 deletions(-) diff --git a/desloppify/languages/rust/detectors/deps.py b/desloppify/languages/rust/detectors/deps.py index 55bbbf004..1f64df3cc 100644 --- a/desloppify/languages/rust/detectors/deps.py +++ b/desloppify/languages/rust/detectors/deps.py @@ -7,6 +7,7 @@ from desloppify.engine.detectors.graph import finalize_graph from desloppify.languages.rust.support import ( + build_production_file_index, build_workspace_package_index, find_rust_files, iter_mod_targets, @@ -31,6 +32,7 @@ def build_dep_graph( return {} file_set = set(graph.keys()) + production_index = build_production_file_index(file_set) package_index = build_workspace_package_index() for filepath in files: content = read_text_or_none(filepath) @@ -44,6 +46,7 @@ def build_dep_graph( filepath, file_set, declared_path=declared_path, + production_index=production_index, ) if resolved and resolved != filepath: graph[filepath]["imports"].add(resolved) @@ -56,6 +59,7 @@ def build_dep_graph( file_set, package_index, allow_crate_root_fallback=False, + production_index=production_index, ) if resolved and resolved != filepath: graph[filepath]["imports"].add(resolved) diff --git a/desloppify/languages/rust/support.py b/desloppify/languages/rust/support.py index 71bca2afe..fb5afc20f 100644 --- a/desloppify/languages/rust/support.py +++ b/desloppify/languages/rust/support.py @@ -2,6 +2,7 @@ from __future__ import annotations +import functools import re import tomllib from dataclasses import dataclass @@ -34,6 +35,15 @@ class RustFileContext: module_segments: tuple[str, ...] +@dataclass(frozen=True) +class RustProductionFileIndex: + """Precomputed lookup tables for production-file resolution.""" + + project_root: Path + by_absolute: dict[str, str] + by_relative: dict[str, str] + + def normalize_crate_name(name: str | None) -> str | None: """Normalize Cargo package names to Rust crate names.""" if not name: @@ -53,6 +63,35 @@ def find_rust_files(path: Path | str) -> list[str]: ) +def build_production_file_index( + production_files: set[str], + *, + project_root: Path | None = None, +) -> RustProductionFileIndex: + """Build O(1) absolute/relative lookup maps for production files.""" + root = (project_root or get_project_root()).resolve() + by_absolute: dict[str, str] = {} + by_relative: dict[str, str] = {} + for production_file in production_files: + prod_path = Path(production_file) + resolved = ( + prod_path.resolve() if prod_path.is_absolute() else (root / prod_path).resolve() + ) + resolved_str = str(resolved) + by_absolute.setdefault(resolved_str, production_file) + try: + rel_path = rel(resolved, project_root=root) + except (TypeError, ValueError, OSError): + rel_path = None + if rel_path is not None: + by_relative.setdefault(rel_path, production_file) + return RustProductionFileIndex( + project_root=root, + by_absolute=by_absolute, + by_relative=by_relative, + ) + + def read_text_or_none(path: Path | str, *, errors: str = "replace") -> str | None: """Read a file as text, returning ``None`` when the file is unavailable.""" try: @@ -181,22 +220,119 @@ def iter_mod_targets(content: str) -> list[tuple[str, str | None]]: def iter_use_specs(content: str) -> list[str]: """Return normalized Rust `use` / `pub use` specs from a file.""" - stripped = strip_rust_comments(content) - specs: list[str] = [] - for match in USE_STATEMENT_RE.finditer(stripped): - specs.extend(_expand_use_tree(match.group(1))) - return specs + return _iter_use_specs_with_pattern(content, USE_STATEMENT_RE) def iter_pub_use_specs(content: str) -> list[str]: """Return normalized `pub use` specs from a file.""" - stripped = strip_rust_comments(content) + return _iter_use_specs_with_pattern(content, PUB_USE_STATEMENT_RE) + + +def _iter_use_specs_with_pattern(content: str, pattern: re.Pattern[str]) -> list[str]: + """Extract `use` specs while ignoring string-literal contents. + + Rust files can contain natural-language strings (for example JSON tool + descriptions) with lines that begin with "use ...". We mask string literal + content before regex matching so import extraction only sees real code. + """ + stripped = strip_rust_comments(content, preserve_lines=True) + masked = _mask_rust_string_literals_preserve_lines(stripped) specs: list[str] = [] - for match in PUB_USE_STATEMENT_RE.finditer(stripped): - specs.extend(_expand_use_tree(match.group(1))) + for match in pattern.finditer(masked): + start, end = match.span(1) + specs.extend(_expand_use_tree(stripped[start:end])) return specs +def _mask_rust_string_literals_preserve_lines(content: str) -> str: + """Replace string literal contents with spaces while preserving newlines.""" + chars = list(content) + result = chars[:] + length = len(chars) + i = 0 + in_normal_string = False + raw_hash_count: int | None = None + while i < length: + ch = chars[i] + + if raw_hash_count is not None: + if ch == '"': + if raw_hash_count == 0: + result[i] = " " + raw_hash_count = None + i += 1 + continue + hash_count = raw_hash_count + hashes = "#" * hash_count + if content.startswith(hashes, i + 1): + result[i] = " " + for j in range(i + 1, i + 1 + hash_count): + result[j] = " " + raw_hash_count = None + i += 1 + hash_count + continue + result[i] = "\n" if ch == "\n" else " " + i += 1 + continue + + if in_normal_string: + if ch == "\\" and i + 1 < length: + result[i] = " " + result[i + 1] = "\n" if chars[i + 1] == "\n" else " " + i += 2 + continue + result[i] = "\n" if ch == "\n" else " " + if ch == '"': + in_normal_string = False + i += 1 + continue + + raw_prefix = _raw_string_prefix_length(chars, i) + if raw_prefix is not None: + prefix_len, hashes = raw_prefix + for j in range(i, i + prefix_len): + result[j] = " " + raw_hash_count = hashes + i += prefix_len + continue + + if ch == '"': + result[i] = " " + in_normal_string = True + i += 1 + continue + + if ch == "b" and i + 1 < length and chars[i + 1] == '"': + result[i] = " " + result[i + 1] = " " + in_normal_string = True + i += 2 + continue + + i += 1 + return "".join(result) + + +def _raw_string_prefix_length(chars: list[str], index: int) -> tuple[int, int] | None: + """Return (prefix_length, hash_count) for raw string prefixes at index.""" + length = len(chars) + j = index + if chars[j] == "b": + j += 1 + if j >= length: + return None + if chars[j] != "r": + return None + j += 1 + hash_count = 0 + while j < length and chars[j] == "#": + hash_count += 1 + j += 1 + if j >= length or chars[j] != '"': + return None + return (j - index + 1, hash_count) + + def find_manifest_dir(path: Path | str) -> Path | None: """Walk up from path to the nearest Cargo.toml root.""" candidate = Path(resolve_path(str(path))) @@ -240,9 +376,16 @@ def _read_manifest_data(manifest_dir: Path) -> dict[str, Any]: def build_workspace_package_index(scan_root: Path | None = None) -> dict[str, Path]: """Return local crate-name -> Cargo manifest dir for the active project root.""" root = find_workspace_root(scan_root) if scan_root is not None else get_project_root() + return _build_workspace_package_index_cached(root) + + +@functools.lru_cache(maxsize=8) +def _build_workspace_package_index_cached(root: Path) -> dict[str, Path]: + """Cached inner implementation of workspace package index building.""" + _exclusions = set(RUST_FILE_EXCLUSIONS) packages: dict[str, Path] = {} for manifest in root.rglob("Cargo.toml"): - if any(part in RUST_FILE_EXCLUSIONS for part in manifest.parts): + if any(part in _exclusions for part in manifest.relative_to(root).parts[:-1]): continue manifest_dir = manifest.parent.resolve() for name in { @@ -254,14 +397,21 @@ def build_workspace_package_index(scan_root: Path | None = None) -> dict[str, Pa return packages -def build_local_dependency_alias_index( - manifest_dir: Path, - package_index: dict[str, Path] | None = None, +@functools.lru_cache(maxsize=64) +def _build_local_dependency_alias_index_cached( + normalized_manifest_dir: Path, + workspace_root: Path, + package_index_items: tuple[tuple[str, str], ...], ) -> dict[str, Path]: - """Map local dependency aliases usable from one manifest to their crate roots.""" - normalized_manifest_dir = manifest_dir.resolve() - workspace_root = find_workspace_root(normalized_manifest_dir) - package_index = package_index or build_workspace_package_index(workspace_root) + """Cached implementation of local dependency alias extraction. + + Keying on primitive tuples keeps the cache hashable while preserving exact + package_index state for correctness. + """ + package_index: dict[str, Path] = { + name: Path(manifest_dir) + for name, manifest_dir in package_index_items + } workspace_aliases = _workspace_dependency_alias_index(workspace_root, package_index) aliases: dict[str, Path] = {} data = _read_manifest_data(normalized_manifest_dir) @@ -281,6 +431,24 @@ def build_local_dependency_alias_index( return aliases +def build_local_dependency_alias_index( + manifest_dir: Path, + package_index: dict[str, Path] | None = None, +) -> dict[str, Path]: + """Map local dependency aliases usable from one manifest to their crate roots.""" + normalized_manifest_dir = manifest_dir.resolve() + workspace_root = find_workspace_root(normalized_manifest_dir) + package_index = package_index or build_workspace_package_index(workspace_root) + package_index_items = tuple( + sorted((name, str(path.resolve())) for name, path in package_index.items()) + ) + return _build_local_dependency_alias_index_cached( + normalized_manifest_dir, + workspace_root, + package_index_items, + ) + + def _workspace_dependency_alias_index( workspace_root: Path, package_index: dict[str, Path], @@ -471,6 +639,7 @@ def resolve_mod_declaration( production_files: set[str], *, declared_path: str | None = None, + production_index: RustProductionFileIndex | None = None, ) -> str | None: """Resolve `mod foo;` to `foo.rs` or `foo/mod.rs` relative to the file's module dir.""" source = Path(resolve_path(str(source_file))).resolve() @@ -484,7 +653,11 @@ def resolve_mod_declaration( candidates.append(source.parent / declared_path) candidates.extend((base_dir / f"{module_name}.rs", base_dir / module_name / "mod.rs")) for candidate in candidates: - matched = _candidate_matches(candidate, production_files) + matched = _candidate_matches( + candidate, + production_files, + production_index=production_index, + ) if matched: return matched return None @@ -497,6 +670,7 @@ def resolve_use_spec( package_index: dict[str, Path] | None = None, *, allow_crate_root_fallback: bool = True, + production_index: RustProductionFileIndex | None = None, ) -> str | None: """Resolve a Rust `use` spec to a local module file when possible.""" cleaned = _normalize_use_spec(spec) @@ -521,6 +695,7 @@ def resolve_use_spec( context.root_files, segments[1:], production_files, + production_index=production_index, allow_root_fallback=allow_crate_root_fallback, ) ) @@ -532,6 +707,7 @@ def resolve_use_spec( context.root_files, resolved_segments, production_files, + production_index=production_index, allow_root_fallback=allow_crate_root_fallback, ) ) @@ -546,6 +722,7 @@ def resolve_use_spec( (manifest_dir / "src" / "lib.rs", manifest_dir / "src" / "main.rs"), segments[1:], production_files, + production_index=production_index, allow_root_fallback=allow_crate_root_fallback, ) ) @@ -555,6 +732,7 @@ def resolve_use_spec( context.root_files, list(context.module_segments) + segments, production_files, + production_index=production_index, allow_root_fallback=False, ) ) @@ -564,6 +742,7 @@ def resolve_use_spec( context.root_files, segments, production_files, + production_index=production_index, allow_root_fallback=allow_crate_root_fallback, ) ) @@ -578,6 +757,7 @@ def resolve_barrel_targets( filepath: str | Path, production_files: set[str], package_index: dict[str, Path] | None = None, + production_index: RustProductionFileIndex | None = None, ) -> set[str]: """Resolve `pub use` / `pub mod` targets from a Rust facade file.""" try: @@ -594,6 +774,7 @@ def resolve_barrel_targets( production_files, package_index, allow_crate_root_fallback=False, + production_index=production_index, ) if resolved: targets.add(resolved) @@ -603,6 +784,7 @@ def resolve_barrel_targets( filepath, production_files, declared_path=declared_path, + production_index=production_index, ) if resolved: targets.add(resolved) @@ -643,10 +825,15 @@ def _resolve_from_source_root( segments: list[str], production_files: set[str], *, + production_index: RustProductionFileIndex | None, allow_root_fallback: bool, ) -> str | None: if not segments: - return _match_root_files(root_files, production_files) + return _match_root_files( + root_files, + production_files, + production_index=production_index, + ) for width in range(len(segments), 0, -1): module_parts = segments[:width] @@ -655,42 +842,60 @@ def _resolve_from_source_root( file_candidate = source_root.joinpath(*module_parts).with_suffix(".rs") mod_candidate = source_root.joinpath(*module_parts, "mod.rs") for candidate in (file_candidate, mod_candidate): - matched = _candidate_matches(candidate, production_files) + matched = _candidate_matches( + candidate, + production_files, + production_index=production_index, + ) if matched: return matched if allow_root_fallback: - return _match_root_files(root_files, production_files) + return _match_root_files( + root_files, + production_files, + production_index=production_index, + ) return None -def _match_root_files(root_files: tuple[Path, ...], production_files: set[str]) -> str | None: +def _match_root_files( + root_files: tuple[Path, ...], + production_files: set[str], + *, + production_index: RustProductionFileIndex | None, +) -> str | None: for root_file in root_files: - matched = _candidate_matches(root_file, production_files) + matched = _candidate_matches( + root_file, + production_files, + production_index=production_index, + ) if matched: return matched return None -def _candidate_matches(candidate: Path, production_files: set[str]) -> str | None: +def _candidate_matches( + candidate: Path, + production_files: set[str], + *, + production_index: RustProductionFileIndex | None = None, +) -> str | None: + index = production_index or build_production_file_index(production_files) resolved_candidate = candidate.resolve() - project_root = get_project_root() candidate_abs = str(resolved_candidate) + absolute_match = index.by_absolute.get(candidate_abs) + if absolute_match is not None: + return absolute_match try: - candidate_rel = rel(resolved_candidate, project_root=project_root) + candidate_rel = rel(resolved_candidate, project_root=index.project_root) except (TypeError, ValueError, OSError): candidate_rel = None - - for production_file in production_files: - prod_path = Path(production_file) - if prod_path.is_absolute(): - normalized = str(prod_path.resolve()) - else: - normalized = str((project_root / prod_path).resolve()) - if normalized == candidate_abs: - return production_file - if candidate_rel is not None and production_file == candidate_rel: - return production_file + if candidate_rel is not None: + relative_match = index.by_relative.get(candidate_rel) + if relative_match is not None: + return relative_match return None @@ -804,7 +1009,9 @@ def _load_toml_dict(path: Path) -> dict[str, Any] | None: "RUST_FILE_EXCLUSIONS", "PUB_USE_STATEMENT_RE", "RustFileContext", + "RustProductionFileIndex", "USE_STATEMENT_RE", + "build_production_file_index", "build_workspace_package_index", "build_local_dependency_alias_index", "describe_rust_file", diff --git a/desloppify/languages/rust/tests/test_support.py b/desloppify/languages/rust/tests/test_support.py index f490fa303..5ca88e58c 100644 --- a/desloppify/languages/rust/tests/test_support.py +++ b/desloppify/languages/rust/tests/test_support.py @@ -5,7 +5,10 @@ from pathlib import Path from desloppify.languages.rust.support import ( + build_production_file_index, find_workspace_root, + iter_use_specs, + match_production_candidate, read_text_or_none, strip_rust_comments, ) @@ -80,3 +83,30 @@ def test_find_workspace_root_skips_invalid_nested_manifest(tmp_path): source = _write(tmp_path, "app/src/lib.rs", "pub fn run() {}\n") assert find_workspace_root(source) == tmp_path.resolve() + + +def test_iter_use_specs_ignores_use_text_inside_strings(): + content = r''' +fn registry() { + let description = r#" + use this wording in docs only; still not an import. + "#; +} +use crate::real::Thing; +''' + + specs = iter_use_specs(content) + + assert specs == ["crate::real::Thing"] + + +def test_match_production_candidate_uses_relative_index_key(tmp_path): + prod = _write(tmp_path, "src/lib.rs", "pub fn run() {}\n") + production_files = {"src/lib.rs"} + + from desloppify.base.runtime_state import RuntimeContext, runtime_scope + + with runtime_scope(RuntimeContext(project_root=tmp_path)): + index = build_production_file_index(production_files) + assert match_production_candidate(prod, production_files) == "src/lib.rs" + assert index.by_relative["src/lib.rs"] == "src/lib.rs" From a0b0fdec21ee2e0c830dff6a25515a652e43ee17 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 02:22:53 +0100 Subject: [PATCH 17/43] fix: binding-aware unused import detection for JS/TS (PR #433) Co-Authored-By: Tom --- .../treesitter/analysis/unused_imports.py | 294 +++++++++++++++ .../languages/_framework/treesitter/phases.py | 4 +- ...t_treesitter_complexity_and_integration.py | 335 ++++++++++++++++++ 3 files changed, 632 insertions(+), 1 deletion(-) diff --git a/desloppify/languages/_framework/treesitter/analysis/unused_imports.py b/desloppify/languages/_framework/treesitter/analysis/unused_imports.py index f06899918..0064064ea 100644 --- a/desloppify/languages/_framework/treesitter/analysis/unused_imports.py +++ b/desloppify/languages/_framework/treesitter/analysis/unused_imports.py @@ -19,6 +19,34 @@ logger = logging.getLogger(__name__) +_ECMASCRIPT_IMPORT_NODE_TYPE = "import_statement" + +# Identifier-ish nodes that represent a reference to a binding in JavaScript/TypeScript. +# JSX tag names are typically represented as `identifier` in tree-sitter-javascript/tsx, +# but we include `jsx_identifier` as well for compatibility with grammar variants. +_ECMASCRIPT_REFERENCE_NODE_TYPES = frozenset({ + "identifier", + "jsx_identifier", + "type_identifier", + "shorthand_property_identifier", +}) + +_ECMASCRIPT_ASSIGNMENT_PATTERN_NODE_TYPES = frozenset({ + "assignment_pattern", + "object_assignment_pattern", + "array_assignment_pattern", +}) + +_ECMASCRIPT_DECLARATION_NAME_NODE_TYPES = frozenset({ + # JS + "function_declaration", + "class_declaration", + # TS/TSX + "type_alias_declaration", + "interface_declaration", + "enum_declaration", +}) + def detect_unused_imports( file_list: list[str], @@ -37,6 +65,11 @@ def detect_unused_imports( logger.debug("tree-sitter init failed: %s", exc) return [] + # JavaScript/JSX: extract imported *local bindings* and check whether each + # binding is referenced in the file body. This avoids module-path heuristics. + if spec.grammar in ("javascript", "tsx"): + return _detect_unused_imports_ecmascript(file_list, spec, parser, language) + query = _make_query(language, spec.import_query) entries: list[dict] = [] @@ -89,6 +122,267 @@ def detect_unused_imports( return entries +def _detect_unused_imports_ecmascript( + file_list: list[str], + spec: TreeSitterLangSpec, + parser, + language, +) -> list[dict]: + """Binding-aware unused import detection for JavaScript/TypeScript (JSX/TSX). + + Emits one entry per unused imported local binding: + {file, line, name, symbol} + + Side-effect-only imports (e.g. `import "x"`) are ignored. + """ + query = _make_query(language, f"({_ECMASCRIPT_IMPORT_NODE_TYPE}) @import") + entries: list[dict] = [] + + for filepath in file_list: + cached = get_or_parse_tree(filepath, parser, spec.grammar) + if cached is None: + continue + source, tree = cached + + # Some real-world repos contain stray NUL bytes (e.g. broken fixtures). + # Tree-sitter can treat these as parse-stopping errors, leading to false + # positives due to missing references. Replace NUL with space (same length) + # and re-parse for analysis. + if b"\x00" in source: + source = source.replace(b"\x00", b" ") + tree = parser.parse(source) + + # If the parse is still errorful, be conservative and skip this file to + # avoid false positives from incomplete trees. + if getattr(tree.root_node, "has_error", False): + continue + + matches = _run_query(query, tree.root_node) + if not matches: + continue + + referenced = _collect_ecmascript_references(tree.root_node) + + for _pattern_idx, captures in matches: + import_node = _unwrap_node(captures.get("import")) + if not import_node: + continue + + bindings = _extract_ecmascript_import_bindings(import_node) + if not bindings: + # Side-effect import (`import "x"`) or empty named import (`import {} from "x"`). + continue + + line = import_node.start_point[0] + 1 + for symbol in bindings: + if symbol not in referenced: + entries.append({ + "file": filepath, + "line": line, + "name": symbol, + "symbol": symbol, + }) + + return entries + + +def _extract_ecmascript_import_bindings(import_node) -> list[str]: + """Extract local binding names from an ECMAScript import_statement node.""" + import_clause = None + for child in import_node.named_children: + if child.type == "import_clause": + import_clause = child + break + if import_clause is None: + return [] + + bindings: list[str] = [] + seen: set[str] = set() + + def add(name: str | None) -> None: + if not name or name in seen: + return + seen.add(name) + bindings.append(name) + + for child in import_clause.named_children: + # Default import: `import Foo from "x"` + if child.type == "identifier": + add(_node_text(child)) + continue + + # Namespace import: `import * as ns from "x"` + if child.type == "namespace_import": + for grand in child.named_children: + if grand.type == "identifier": + add(_node_text(grand)) + break + continue + + # Named imports: `import { a, b as c } from "x"` + if child.type == "named_imports": + for spec in child.named_children: + if spec.type != "import_specifier": + continue + alias = spec.child_by_field_name("alias") + name = spec.child_by_field_name("name") + add(_node_text(alias) if alias is not None else _node_text(name)) + continue + + return bindings + + +def _collect_ecmascript_references(root_node) -> set[str]: + """Collect identifier-like references outside ECMAScript import statements.""" + referenced: set[str] = set() + stack = [root_node] + + while stack: + node = stack.pop() + if node.type in _ECMASCRIPT_REFERENCE_NODE_TYPES and not _has_ancestor_type( + node, {_ECMASCRIPT_IMPORT_NODE_TYPE} + ): + if not _is_ecmascript_declaration_occurrence(node): + text = _node_text(node) + if text: + referenced.add(text) + + for child in reversed(node.named_children): + stack.append(child) + + return referenced + + +def _has_ancestor_type(node, ancestor_types: set[str]) -> bool: + parent = node.parent + while parent is not None: + if parent.type in ancestor_types: + return True + parent = parent.parent + return False + + +def _is_ecmascript_declaration_occurrence(node) -> bool: + """Return True when `node` appears in a declaration/binding position. + + This prevents counting declarations as references (e.g. destructuring patterns, + parameter names, catch parameters, type names). + + Not a full scope resolver; it is a conservative structural filter. + """ + # If we're on the right side of an assignment pattern, treat as an expression reference. + if _is_within_assignment_pattern_right(node): + return False + + cur = node + while cur is not None: + # Variable declarators: `const foo = ...`, `const {a: b} = ...` + if cur.type == "variable_declarator": + name = cur.child_by_field_name("name") + if name is not None and _is_descendant(name, node): + return True + + # TS/TSX params: `required_parameter` / `optional_parameter` pattern field. + if cur.type in ("required_parameter", "optional_parameter"): + pattern = cur.child_by_field_name("pattern") + if pattern is not None and _is_descendant(pattern, node): + return True + + # JS params: patterns live directly under `formal_parameters`. + if cur.type == "formal_parameters": + param_root = _direct_child_under(cur, node) + if param_root is not None: + # TS/TSX wraps params in required/optional_parameter; handled above. + if param_root.type not in ("required_parameter", "optional_parameter"): + if _is_param_binding_occurrence(param_root, node): + return True + + # Catch binding: `catch (e) { ... }` + if cur.type == "catch_clause": + param = cur.child_by_field_name("parameter") + if param is not None and _is_descendant(param, node): + return True + + # Declaration names (function/class/type/interface/enum) + if cur.type in _ECMASCRIPT_DECLARATION_NAME_NODE_TYPES: + name = cur.child_by_field_name("name") + if name is not None and _is_descendant(name, node): + return True + + # `for (const x of xs)` / `for (let x in xs)` binding. + if cur.type == "for_in_statement": + left = cur.child_by_field_name("left") + if left is not None and _is_descendant(left, node): + # Only treat as a declaration if preceded by a declaration keyword. + prev = left.prev_sibling + if prev is not None and prev.type in ("const", "let", "var"): + return True + + cur = cur.parent + + return False + + +def _is_within_assignment_pattern_right(node) -> bool: + """Return True if node appears within the `right` field of an assignment pattern.""" + cur = node + while cur is not None: + parent = cur.parent + if parent is None: + return False + if parent.type in _ECMASCRIPT_ASSIGNMENT_PATTERN_NODE_TYPES: + right = parent.child_by_field_name("right") + if right is not None and _is_descendant(right, node): + return True + cur = parent + return False + + +def _is_descendant(ancestor, node) -> bool: + cur = node + while cur is not None: + if cur == ancestor: + return True + cur = cur.parent + return False + + +def _direct_child_under(ancestor, node): + """Return the direct child of `ancestor` that contains `node`, if any.""" + cur = node + while cur is not None and cur.parent is not None and cur.parent != ancestor: + cur = cur.parent + if cur is not None and cur.parent == ancestor: + return cur + return None + + +def _is_param_binding_occurrence(param_root, node) -> bool: + """Return True if `node` is part of the parameter binding pattern. + + `param_root` is the direct child of `formal_parameters` that contains `node`. + """ + if _is_within_assignment_pattern_right(node): + return False + + # `x` in `(x)` or `...rest` in `(...rest)` are bindings. + if param_root.type in ("identifier", "rest_pattern"): + return True + + # `x=Default` binds `x` on the left; right side is an expression. + if param_root.type == "assignment_pattern": + left = param_root.child_by_field_name("left") + if left is not None and _is_descendant(left, node): + return True + return False + + # Destructuring patterns (object/array) bind identifiers inside them. + if param_root.type in ("object_pattern", "array_pattern", "pair_pattern"): + return True + + return False + + def _extract_alias(import_node) -> str | None: """Extract alias name from import nodes. diff --git a/desloppify/languages/_framework/treesitter/phases.py b/desloppify/languages/_framework/treesitter/phases.py index 7438a750a..e05273a59 100644 --- a/desloppify/languages/_framework/treesitter/phases.py +++ b/desloppify/languages/_framework/treesitter/phases.py @@ -126,8 +126,10 @@ def run(path: Path, lang: LangRuntimeContract) -> tuple[list[Issue], dict[str, i entries = detect_unused_imports(file_list, spec) for e in entries: + symbol = e.get("symbol") + issue_name = f"unused_import::{e['line']}" + (f"::{symbol}" if symbol else "") issues.append(make_issue( - "unused", e["file"], f"unused_import::{e['line']}", + "unused", e["file"], issue_name, tier=3, confidence="medium", summary=f"Unused import: {e['name']}", )) diff --git a/desloppify/tests/lang/common/test_treesitter_complexity_and_integration.py b/desloppify/tests/lang/common/test_treesitter_complexity_and_integration.py index c3cbe82a1..28990a885 100644 --- a/desloppify/tests/lang/common/test_treesitter_complexity_and_integration.py +++ b/desloppify/tests/lang/common/test_treesitter_complexity_and_integration.py @@ -737,6 +737,341 @@ def test_no_import_query_returns_empty(self, tmp_path): entries = detect_unused_imports([], spec) assert entries == [] + def test_js_named_imports_all_used_no_issue(self, tmp_path): + from desloppify.languages._framework.treesitter import JS_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = """\ +import { rateLimit, RateLimitConfig } from "@/lib/rate-limit"; + +console.log(rateLimit, RateLimitConfig); +""" + f = tmp_path / "main.js" + f.write_text(code) + + entries = detect_unused_imports([str(f)], JS_SPEC) + assert entries == [] + + def test_js_default_import_used_in_jsx_no_issue(self, tmp_path): + from desloppify.languages._framework.treesitter import JS_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = """\ +import ReactMarkdown from "react-markdown"; + +export function App() { + return ; +} +""" + f = tmp_path / "main.jsx" + f.write_text(code) + + entries = detect_unused_imports([str(f)], JS_SPEC) + assert entries == [] + + def test_js_aliased_named_import_used_no_issue(self, tmp_path): + from desloppify.languages._framework.treesitter import JS_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = """\ +import { foo as bar } from "x"; + +console.log(bar); +""" + f = tmp_path / "main.js" + f.write_text(code) + + entries = detect_unused_imports([str(f)], JS_SPEC) + assert entries == [] + + def test_js_namespace_import_used_no_issue(self, tmp_path): + from desloppify.languages._framework.treesitter import JS_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = """\ +import * as ns from "x"; + +console.log(ns.foo); +""" + f = tmp_path / "main.js" + f.write_text(code) + + entries = detect_unused_imports([str(f)], JS_SPEC) + assert entries == [] + + def test_js_partially_unused_import_line_flags_only_unused_symbol(self, tmp_path): + from desloppify.languages._framework.treesitter import JS_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = """\ +import { + used, + unused, +} from "x"; + +console.log(used); +""" + f = tmp_path / "main.js" + f.write_text(code) + + entries = detect_unused_imports([str(f)], JS_SPEC) + names = [e["name"] for e in entries] + assert "unused" in names + assert "used" not in names + assert len(entries) == 1 + + def test_js_side_effect_import_only_not_flagged(self, tmp_path): + from desloppify.languages._framework.treesitter import JS_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = """\ +import "x"; + +console.log("hi"); +""" + f = tmp_path / "main.js" + f.write_text(code) + + entries = detect_unused_imports([str(f)], JS_SPEC) + assert entries == [] + + def test_js_unused_import_issue_id_includes_line_and_symbol(self, tmp_path): + from unittest.mock import MagicMock + + from desloppify.languages._framework.treesitter import JS_SPEC + from desloppify.languages._framework.treesitter.phases import ( + make_unused_imports_phase, + ) + + code = """\ +import { used, unused } from "x"; + +console.log(used); +""" + f = tmp_path / "main.js" + f.write_text(code) + + phase = make_unused_imports_phase(JS_SPEC) + mock_lang = MagicMock() + mock_lang.file_finder.return_value = [str(f)] + + issues, potentials = phase.run(tmp_path, mock_lang) + + assert potentials["unused_imports"] == 1 + assert len(issues) == 1 + assert issues[0]["id"].endswith("::unused_import::1::unused") + assert issues[0]["summary"] == "Unused import: unused" + + def test_ts_named_imports_all_used_no_issue(self, tmp_path): + from desloppify.languages._framework.treesitter import TYPESCRIPT_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = """\ +import { rateLimit, RateLimitConfig } from "@/lib/rate-limit"; + +export const x: RateLimitConfig = rateLimit(); +""" + f = tmp_path / "main.ts" + f.write_text(code) + + entries = detect_unused_imports([str(f)], TYPESCRIPT_SPEC) + assert entries == [] + + def test_ts_default_import_used_in_tsx_no_issue(self, tmp_path): + from desloppify.languages._framework.treesitter import TYPESCRIPT_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = """\ +import ReactMarkdown from "react-markdown"; + +export function App() { + return ; +} +""" + f = tmp_path / "main.tsx" + f.write_text(code) + + entries = detect_unused_imports([str(f)], TYPESCRIPT_SPEC) + assert entries == [] + + def test_ts_type_only_named_import_used_in_type_position_no_issue(self, tmp_path): + from desloppify.languages._framework.treesitter import TYPESCRIPT_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = """\ +import type { Foo } from "x"; + +export type X = Foo; +""" + f = tmp_path / "main.ts" + f.write_text(code) + + entries = detect_unused_imports([str(f)], TYPESCRIPT_SPEC) + assert entries == [] + + def test_ts_partially_unused_import_line_flags_only_unused_symbol(self, tmp_path): + from desloppify.languages._framework.treesitter import TYPESCRIPT_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = """\ +import { + used, + unused, +} from "x"; + +export const y = used; +""" + f = tmp_path / "main.ts" + f.write_text(code) + + entries = detect_unused_imports([str(f)], TYPESCRIPT_SPEC) + names = [e["name"] for e in entries] + assert "unused" in names + assert "used" not in names + assert len(entries) == 1 + + def test_ts_side_effect_import_only_not_flagged(self, tmp_path): + from desloppify.languages._framework.treesitter import TYPESCRIPT_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = """\ +import "x"; + +export const x = 1; +""" + f = tmp_path / "main.ts" + f.write_text(code) + + entries = detect_unused_imports([str(f)], TYPESCRIPT_SPEC) + assert entries == [] + + def test_ts_unused_import_issue_id_includes_line_and_symbol(self, tmp_path): + from unittest.mock import MagicMock + + from desloppify.languages._framework.treesitter import TYPESCRIPT_SPEC + from desloppify.languages._framework.treesitter.phases import ( + make_unused_imports_phase, + ) + + code = """\ +import { used, unused } from "x"; + +export const z = used; +""" + f = tmp_path / "main.ts" + f.write_text(code) + + phase = make_unused_imports_phase(TYPESCRIPT_SPEC) + mock_lang = MagicMock() + mock_lang.file_finder.return_value = [str(f)] + + issues, potentials = phase.run(tmp_path, mock_lang) + + assert potentials["unused_imports"] == 1 + assert len(issues) == 1 + assert issues[0]["id"].endswith("::unused_import::1::unused") + assert issues[0]["summary"] == "Unused import: unused" + + def test_js_destructuring_default_value_counts_as_usage(self, tmp_path): + """Default values inside destructuring patterns should count as usage.""" + from desloppify.languages._framework.treesitter import JS_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = """\ +import { imported } from "x"; + +const { x = imported } = obj; +console.log(x); +""" + f = tmp_path / "main.js" + f.write_text(code) + + entries = detect_unused_imports([str(f)], JS_SPEC) + assert entries == [] + + def test_js_param_default_value_counts_as_usage(self, tmp_path): + """Parameter default values should count as usage (JS grammar).""" + from desloppify.languages._framework.treesitter import JS_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = """\ +import { Bar } from "x"; + +function f(x = Bar) { + return x; +} +""" + f = tmp_path / "main.js" + f.write_text(code) + + entries = detect_unused_imports([str(f)], JS_SPEC) + assert entries == [] + + def test_ts_param_type_annotation_counts_as_usage(self, tmp_path): + """Parameter type annotations should count as usage (TSX grammar).""" + from desloppify.languages._framework.treesitter import TYPESCRIPT_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = """\ +import type { Foo } from "x"; + +export function f(x: Foo) { + return x; +} +""" + f = tmp_path / "main.ts" + f.write_text(code) + + entries = detect_unused_imports([str(f)], TYPESCRIPT_SPEC) + assert entries == [] + + def test_ts_file_with_nul_byte_does_not_false_positive(self, tmp_path): + """Stray NUL bytes should not cause parse-truncation false positives.""" + from desloppify.languages._framework.treesitter import TYPESCRIPT_SPEC + from desloppify.languages._framework.treesitter.analysis.unused_imports import ( + detect_unused_imports, + ) + + code = ( + b'import { jest } from "@jest/globals";\n' + b'jest.spyOn(console, "log");\n' + b'\x00\n' + b'jest.spyOn(console, "warn");\n' + ) + f = tmp_path / "main.ts" + f.write_bytes(code) + + entries = detect_unused_imports([str(f)], TYPESCRIPT_SPEC) + assert entries == [] + # ── Signature variance tests ───────────────────────────────── From 0cbd123eeb0bf7fffdbf3df7588f68cf8941338c Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 02:23:53 +0100 Subject: [PATCH 18/43] fix: project root detection, force-rescan plan wipe, and manual cluster visibility (PR #439) --- desloppify/app/commands/scan/plan_reconcile.py | 1 - 1 file changed, 1 deletion(-) diff --git a/desloppify/app/commands/scan/plan_reconcile.py b/desloppify/app/commands/scan/plan_reconcile.py index 17be193a1..630194ce2 100644 --- a/desloppify/app/commands/scan/plan_reconcile.py +++ b/desloppify/app/commands/scan/plan_reconcile.py @@ -52,7 +52,6 @@ def _reset_cycle_for_force_rescan(plan: dict[str, object]) -> bool: order.remove(item) clear_score_communicated_sentinel(plan) clear_create_plan_sentinel(plan) - plan.pop("scan_count_at_plan_start", None) meta = plan.get("epic_triage_meta", {}) if isinstance(meta, dict): meta.pop("triage_recommended", None) From 8f1f6db140932a25cb74efd2b2b69515fbca63a2 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 02:24:36 +0100 Subject: [PATCH 19/43] perf(scan): detector prefetch + cache for faster scans (PR #432) Co-Authored-By: Tom --- desloppify/app/commands/scan/helpers.py | 36 +- desloppify/app/commands/scan/workflow.py | 78 +++- desloppify/engine/detectors/dupes.py | 198 +++++++- desloppify/engine/planning/scan.py | 8 +- .../review/context_holistic/budget/scan.py | 11 +- .../review/prepare_holistic_orchestration.py | 46 +- .../_framework/base/shared_phases_review.py | 435 +++++++++++++++++- .../_framework/runtime_support/runtime.py | 4 +- desloppify/languages/framework.py | 20 + .../tests/commands/scan/test_cmd_scan.py | 18 +- .../scan/test_scan_orchestrator_direct.py | 15 +- desloppify/tests/detectors/test_dupes.py | 66 +++ ...test_review_import_prepare_split_direct.py | 72 +++ ...ared_phases_and_structural_split_direct.py | 247 +++++++++- .../common/test_lang_runtime_isolation.py | 10 + .../tests/plan/test_plan_modules_direct.py | 56 +++ 16 files changed, 1272 insertions(+), 48 deletions(-) diff --git a/desloppify/app/commands/scan/helpers.py b/desloppify/app/commands/scan/helpers.py index 5469bce51..efc588aa2 100644 --- a/desloppify/app/commands/scan/helpers.py +++ b/desloppify/app/commands/scan/helpers.py @@ -77,16 +77,29 @@ def audit_excluded_dirs( return stale_issues -def collect_codebase_metrics(lang, path: Path) -> dict | None: +def collect_codebase_metrics( + lang, + path: Path, + *, + files: list[str] | None = None, +) -> dict | None: """Collect LOC/file/directory counts for the configured language.""" - if not lang or not lang.file_finder: + if not lang: return None - files = lang.file_finder(path) + if files is None and not lang.file_finder: + return None + scan_root = Path(path) + files = _resolve_scan_files(lang, scan_root, files=files) total_loc = 0 dirs = set() for filepath in files: try: - total_loc += count_lines(Path(filepath)) + abs_path = _resolve_scan_file_path(filepath, project_root=scan_root) + content = read_file_text(abs_path) + if content is not None: + total_loc += len(content.splitlines()) + else: + total_loc += count_lines(Path(abs_path)) dirs.add(str(Path(filepath).parent)) except (OSError, UnicodeDecodeError) as exc: logger.debug( @@ -101,6 +114,21 @@ def collect_codebase_metrics(lang, path: Path) -> dict | None: } +def _resolve_scan_files(lang, path: Path, *, files: list[str] | None = None) -> list[str]: + """Return discovered source files, preferring an explicit precomputed list.""" + if files is not None: + return files + return lang.file_finder(path) + + +def _resolve_scan_file_path(filepath: str, *, project_root: Path) -> str: + """Resolve relative scan filepaths against the active scan path.""" + file_path = Path(filepath) + if file_path.is_absolute(): + return str(file_path) + return str((project_root / file_path).resolve()) + + def resolve_scan_profile(profile: str | None, lang) -> str: """Resolve effective scan profile from CLI and language defaults.""" if profile in {"objective", "full", "ci"}: diff --git a/desloppify/app/commands/scan/workflow.py b/desloppify/app/commands/scan/workflow.py index ceb37a6eb..e04ca411a 100644 --- a/desloppify/app/commands/scan/workflow.py +++ b/desloppify/app/commands/scan/workflow.py @@ -119,6 +119,20 @@ def _ensure_state_lang_capabilities( ) +def _state_review_cache(state: StateModel) -> dict[str, object]: + """Return language review cache payload, creating storage when missing.""" + review_cache = state.get("review_cache") + if review_cache is None: + normalized: dict[str, object] = {} + state["review_cache"] = normalized + return normalized + if isinstance(review_cache, dict): + return review_cache + raise ScanStateContractError( + "state.review_cache must be an object when present" + ) + + def _state_issues(state: StateModel) -> dict[str, dict[str, Any]]: """Return normalized issue map from state.""" issues = state.get("work_items") @@ -191,7 +205,7 @@ def _configure_lang_runtime( runtime_lang = make_lang_run( lang, overrides=LangRunOverrides( - review_cache=state.get("review_cache", {}), + review_cache=_state_review_cache(state), review_max_age_days=config.get("review_max_age_days", 30), subjective_assessments=_state_subjective_assessments(state), runtime_settings=lang_settings, @@ -308,13 +322,16 @@ def prepare_scan_runtime(args: argparse.Namespace) -> ScanRuntime: def _augment_with_stale_exclusion_issues( issues: list[dict[str, Any]], runtime: ScanRuntime, + *, + scanned_files: list[str] | None = None, ) -> list[dict[str, Any]]: """Append stale exclude issues when excluded dirs are unreferenced.""" extra_exclusions = get_exclusions() if not (extra_exclusions and runtime.lang and runtime.lang.file_finder): return issues - scanned_files = runtime.lang.file_finder(runtime.path) + if scanned_files is None: + scanned_files = runtime.lang.file_finder(runtime.path) stale = audit_excluded_dirs( extra_exclusions, scanned_files, get_project_root() ) @@ -328,6 +345,21 @@ def _augment_with_stale_exclusion_issues( return augmented +def _resolve_scanned_files(runtime: ScanRuntime) -> list[str]: + """Resolve scan file list once for post-generation lifecycle steps.""" + if not runtime.lang: + return [] + zone_map = getattr(runtime.lang, "zone_map", None) + if zone_map is not None and hasattr(zone_map, "all_files"): + files = zone_map.all_files() + if isinstance(files, list): + return files + file_finder = getattr(runtime.lang, "file_finder", None) + if not file_finder: + return [] + return file_finder(runtime.path) + + def _augment_with_stale_wontfix_issues( issues: list[dict[str, Any]], runtime: ScanRuntime, @@ -360,27 +392,35 @@ def run_scan_generation( profile=runtime.profile, ), ) + scanned_files = _resolve_scanned_files(runtime) + codebase_metrics = collect_codebase_metrics( + runtime.lang, + runtime.path, + files=scanned_files, + ) + warn_explicit_lang_with_no_files( + runtime.args, runtime.lang, runtime.path, codebase_metrics + ) + issues = _augment_with_stale_exclusion_issues( + issues, + runtime, + scanned_files=scanned_files, + ) + decay_scans = _coerce_int( + runtime.config.get("wontfix_decay_scans"), + default=_WONTFIX_DECAY_SCANS_DEFAULT, + ) + issues, monitored_wontfix = _augment_with_stale_wontfix_issues( + issues, + runtime, + decay_scans=max(decay_scans, 0), + ) + potentials["stale_wontfix"] = monitored_wontfix + return issues, potentials, codebase_metrics finally: disable_parse_cache() disable_file_cache() - codebase_metrics = collect_codebase_metrics(runtime.lang, runtime.path) - warn_explicit_lang_with_no_files( - runtime.args, runtime.lang, runtime.path, codebase_metrics - ) - issues = _augment_with_stale_exclusion_issues(issues, runtime) - decay_scans = _coerce_int( - runtime.config.get("wontfix_decay_scans"), - default=_WONTFIX_DECAY_SCANS_DEFAULT, - ) - issues, monitored_wontfix = _augment_with_stale_wontfix_issues( - issues, - runtime, - decay_scans=max(decay_scans, 0), - ) - potentials["stale_wontfix"] = monitored_wontfix - return issues, potentials, codebase_metrics - def merge_scan_results( runtime: ScanRuntime, diff --git a/desloppify/engine/detectors/dupes.py b/desloppify/engine/detectors/dupes.py index 58edfc7b7..688fe3d76 100644 --- a/desloppify/engine/detectors/dupes.py +++ b/desloppify/engine/detectors/dupes.py @@ -17,6 +17,10 @@ PairKey: TypeAlias = tuple[str, str] MatchedPair: TypeAlias = tuple[int, int, float, str] +_DUPES_CACHE_VERSION = 1 +_DUPES_CACHE_MAX_NEAR_PAIRS = 20_000 +_DUPES_AUTOJUNK_MIN_LINES = 80 + class DuplicateMember(TypedDict): file: str @@ -34,6 +38,17 @@ class DuplicateEntry(TypedDict): cluster: list[DuplicateMember] +class _CachedFunctionMeta(TypedDict): + body_hash: str + loc: int + + +class _CachedNearPair(TypedDict): + a: str + b: str + similarity: float + + def _build_clusters( pairs: list[MatchedPair], n: int ) -> list[list[int]]: @@ -80,13 +95,146 @@ def _dupes_debug_settings() -> tuple[bool, int]: def _pair_key(fn_a: FunctionInfo, fn_b: FunctionInfo) -> PairKey: """Build a stable pair key for duplicate tracking.""" - def _identity(fn: FunctionInfo) -> str: - end_line = getattr(fn, "end_line", None) - if not isinstance(end_line, int): - end_line = int(getattr(fn, "line", 0)) + int(getattr(fn, "loc", 0)) - return f"{fn.file}:{fn.name}:{fn.line}:{end_line}" + return (_function_identity(fn_a), _function_identity(fn_b)) + + +def _function_identity(fn: FunctionInfo) -> str: + """Build a stable identity token for one function.""" + end_line = getattr(fn, "end_line", None) + if not isinstance(end_line, int): + end_line = int(getattr(fn, "line", 0)) + int(getattr(fn, "loc", 0)) + return f"{fn.file}:{fn.name}:{fn.line}:{end_line}" + + +def _build_function_cache_map( + functions: list[FunctionInfo], +) -> tuple[dict[str, _CachedFunctionMeta], dict[str, int]]: + """Build cache metadata and index map for function identities.""" + meta_by_id: dict[str, _CachedFunctionMeta] = {} + index_by_id: dict[str, int] = {} + for idx, fn in enumerate(functions): + func_id = _function_identity(fn) + meta_by_id[func_id] = { + "body_hash": fn.body_hash, + "loc": int(fn.loc), + } + index_by_id[func_id] = idx + return meta_by_id, index_by_id - return (_identity(fn_a), _identity(fn_b)) + +def _load_cached_near_pairs( + *, + cache: dict[str, object], + threshold: float, + functions: list[FunctionInfo], + function_meta: dict[str, _CachedFunctionMeta], + index_by_id: dict[str, int], + seen_pairs: set[PairKey], +) -> tuple[list[MatchedPair], set[str] | None]: + """Return reusable near-duplicate pairs and changed function identities. + + Returns ``([], None)`` when cache is missing/incompatible, signaling that + near-duplicate pass should run in full mode. + """ + if cache.get("version") != _DUPES_CACHE_VERSION: + return [], None + + cached_threshold = cache.get("threshold") + if not isinstance(cached_threshold, int | float): + return [], None + if float(cached_threshold) != float(threshold): + return [], None + + cached_functions = cache.get("functions") + cached_near_pairs = cache.get("near_pairs") + if not isinstance(cached_functions, dict) or not isinstance(cached_near_pairs, list): + return [], None + + changed_ids: set[str] = set() + for func_id, meta in function_meta.items(): + previous = cached_functions.get(func_id) + if not isinstance(previous, dict): + changed_ids.add(func_id) + continue + if previous.get("body_hash") != meta["body_hash"]: + changed_ids.add(func_id) + continue + prev_loc = previous.get("loc") + if not isinstance(prev_loc, int): + changed_ids.add(func_id) + continue + if prev_loc != meta["loc"]: + changed_ids.add(func_id) + + reusable_pairs: list[MatchedPair] = [] + for raw_pair in cached_near_pairs: + if not isinstance(raw_pair, dict): + continue + left_id = raw_pair.get("a") + right_id = raw_pair.get("b") + similarity = raw_pair.get("similarity") + if ( + not isinstance(left_id, str) + or not isinstance(right_id, str) + or not isinstance(similarity, int | float) + ): + continue + if left_id in changed_ids or right_id in changed_ids: + continue + left_idx = index_by_id.get(left_id) + right_idx = index_by_id.get(right_id) + if left_idx is None or right_idx is None or left_idx == right_idx: + continue + + left_fn = functions[left_idx] + right_fn = functions[right_idx] + if left_fn.body_hash == right_fn.body_hash: + continue + pair_key = _pair_key(left_fn, right_fn) + if pair_key in seen_pairs: + continue + seen_pairs.add(pair_key) + reusable_pairs.append((left_idx, right_idx, float(similarity), "near-duplicate")) + + return reusable_pairs, changed_ids + + +def _store_dupes_cache( + *, + cache: dict[str, object], + threshold: float, + functions: list[FunctionInfo], + function_meta: dict[str, _CachedFunctionMeta], + pairs: list[MatchedPair], +) -> None: + """Persist near-duplicate cache payload for reuse on next scan.""" + near_pairs: list[_CachedNearPair] = [] + for left_idx, right_idx, similarity, kind in pairs: + if kind != "near-duplicate": + continue + left_id = _function_identity(functions[left_idx]) + right_id = _function_identity(functions[right_idx]) + near_pairs.append( + { + "a": left_id, + "b": right_id, + "similarity": round(float(similarity), 6), + } + ) + + near_pairs.sort(key=lambda pair: (-pair["similarity"], pair["a"], pair["b"])) + if len(near_pairs) > _DUPES_CACHE_MAX_NEAR_PAIRS: + near_pairs = near_pairs[:_DUPES_CACHE_MAX_NEAR_PAIRS] + + cache.clear() + cache.update( + { + "version": _DUPES_CACHE_VERSION, + "threshold": float(threshold), + "functions": function_meta, + "near_pairs": near_pairs, + } + ) def _collect_exact_duplicate_pairs( @@ -119,10 +267,13 @@ def _collect_near_duplicate_pairs( threshold: float, *, seen_pairs: set[PairKey], + active_indices: set[int] | None, debug: bool, debug_every: int, ) -> list[MatchedPair]: """Collect near-duplicate pairs using SequenceMatcher with pruning.""" + if active_indices is not None and not active_indices: + return [] large_idx = [(idx, fn) for idx, fn in enumerate(functions) if fn.loc >= 15] large_idx.sort(key=lambda item: item[1].loc) normalized_lines = [fn.normalized.splitlines() for fn in functions] @@ -152,6 +303,9 @@ def _collect_near_duplicate_pairs( pair_key = _pair_key(fn_a, fn_b) if pair_key in seen_pairs or fn_a.body_hash == fn_b.body_hash: continue + if active_indices is not None: + if idx_a not in active_indices and idx_b not in active_indices: + continue # ratio = 2*M/(len_a+len_b), with M <= min(len_a, len_b) len_a = normalized_line_counts[idx_a] @@ -168,7 +322,7 @@ def _collect_near_duplicate_pairs( None, normalized_lines[idx_a], normalized_lines[idx_b], - autojunk=False, + autojunk=len_a >= _DUPES_AUTOJUNK_MIN_LINES and len_b >= _DUPES_AUTOJUNK_MIN_LINES, ) if matcher.real_quick_ratio() < threshold: continue @@ -261,6 +415,8 @@ def _build_duplicate_entries( def detect_duplicates( functions: list[FunctionInfo], threshold: float = 0.9, + *, + cache: dict[str, object] | None = None, ) -> tuple[list[DuplicateEntry], int]: """Find duplicate or near-duplicate functions clustered by similarity.""" if not functions: @@ -269,11 +425,31 @@ def detect_duplicates( seen_pairs: set[PairKey] = set() pairs = _collect_exact_duplicate_pairs(functions, seen_pairs) + function_meta, index_by_id = _build_function_cache_map(functions) + active_indices: set[int] | None = None + if isinstance(cache, dict): + cached_pairs, changed_ids = _load_cached_near_pairs( + cache=cache, + threshold=threshold, + functions=functions, + function_meta=function_meta, + index_by_id=index_by_id, + seen_pairs=seen_pairs, + ) + pairs.extend(cached_pairs) + if changed_ids is not None: + active_indices = { + index_by_id[func_id] + for func_id in changed_ids + if func_id in index_by_id + } + pairs.extend( _collect_near_duplicate_pairs( functions, threshold, seen_pairs=seen_pairs, + active_indices=active_indices, debug=debug, debug_every=debug_every, ) @@ -281,6 +457,14 @@ def detect_duplicates( clusters = _build_clusters(pairs, len(functions)) entries = _build_duplicate_entries(functions, pairs, clusters) + if isinstance(cache, dict): + _store_dupes_cache( + cache=cache, + threshold=threshold, + functions=functions, + function_meta=function_meta, + pairs=pairs, + ) return sorted(entries, key=lambda e: (-e["similarity"], -e["cluster_size"])), len( functions ) diff --git a/desloppify/engine/planning/scan.py b/desloppify/engine/planning/scan.py index e00fbaae7..656a0cf22 100644 --- a/desloppify/engine/planning/scan.py +++ b/desloppify/engine/planning/scan.py @@ -12,6 +12,7 @@ from desloppify.engine.planning.helpers import is_subjective_phase from desloppify.engine.policy.zones import ZONE_POLICIES, FileZoneMap from desloppify.languages.framework import ( + clear_review_phase_prefetch, DetectorPhase, LangConfig, LangRun, @@ -20,6 +21,7 @@ capability_report, get_lang, make_lang_run, + prewarm_review_phase_detectors, ) from desloppify.state_io import Issue @@ -130,7 +132,11 @@ def _generate_issues_from_lang( """Run detector phases from a LangRun.""" _build_zone_map(path, lang, zone_overrides) phases = _select_phases(lang, include_slow=include_slow, profile=profile) - issues, all_potentials = _run_phases(path, lang, phases) + prewarm_review_phase_detectors(path, lang, phases) + try: + issues, all_potentials = _run_phases(path, lang, phases) + finally: + clear_review_phase_prefetch(lang) _stamp_issue_context(issues, lang) _stderr(f"\n Total: {len(issues)} issues") return issues, all_potentials diff --git a/desloppify/intelligence/review/context_holistic/budget/scan.py b/desloppify/intelligence/review/context_holistic/budget/scan.py index f0f55956c..33a1dafbd 100644 --- a/desloppify/intelligence/review/context_holistic/budget/scan.py +++ b/desloppify/intelligence/review/context_holistic/budget/scan.py @@ -9,7 +9,6 @@ from pathlib import Path from desloppify.base.discovery.file_paths import rel -from desloppify.intelligence.review.context import file_excerpt from .analysis import _count_signature_params, _extract_type_names from .axes import _assemble_context, _compute_sub_axes @@ -55,6 +54,14 @@ ) +def _excerpt_from_content(content: str, *, max_lines: int = 30) -> str: + """Return a short leading excerpt directly from in-memory file content.""" + lines = content.splitlines(keepends=True) + if len(lines) <= max_lines: + return content + return "".join(lines[:max_lines]) + f"\n... ({len(lines) - max_lines} more lines)" + + @dataclasses.dataclass class _AbstractionsCollector: """Accumulated state for the abstractions scan pass.""" @@ -90,7 +97,7 @@ def _scan_file( basename = Path(rpath).stem.lower() if basename in {"utils", "helpers", "util", "helper", "common", "misc"}: col.util_files.append( - {"file": rpath, "loc": loc, "excerpt": file_excerpt(filepath) or ""} + {"file": rpath, "loc": loc, "excerpt": _excerpt_from_content(content)} ) signatures = _DEF_SIGNATURE_RE.findall(content) diff --git a/desloppify/intelligence/review/prepare_holistic_orchestration.py b/desloppify/intelligence/review/prepare_holistic_orchestration.py index 35c8ffb44..18e7120ba 100644 --- a/desloppify/intelligence/review/prepare_holistic_orchestration.py +++ b/desloppify/intelligence/review/prepare_holistic_orchestration.py @@ -217,12 +217,52 @@ def prepare_holistic_review_payload( continue batch_dims = batch_item.get("dimensions", []) if isinstance(batch_dims, list): - batch_item["dimension_contexts"] = { - d: dim_contexts[d] for d in batch_dims if d in dim_contexts - } + compact_contexts = _compact_batch_dimension_contexts( + dimensions=batch_dims, + all_contexts=dim_contexts, + ) + if compact_contexts: + batch_item["dimension_contexts"] = compact_contexts payload["investigation_batches"] = batches return payload +def _compact_batch_dimension_contexts( + *, + dimensions: list[str], + all_contexts: dict[str, Any], +) -> dict[str, dict[str, list[dict[str, object]]]]: + """Attach a prompt-facing slice of dimension contexts for each batch. + + Batch prompts only need insight headers and settled/positive flags. Keeping + this payload compact avoids duplicating full insight descriptions across all + batches while preserving packet-level full context in payload["dimension_contexts"]. + """ + compact: dict[str, dict[str, list[dict[str, object]]]] = {} + for dimension in dimensions: + raw_context = all_contexts.get(dimension) + if not isinstance(raw_context, dict): + continue + raw_insights = raw_context.get("insights") + if not isinstance(raw_insights, list): + continue + insights: list[dict[str, object]] = [] + for item in raw_insights: + if not isinstance(item, dict): + continue + header = str(item.get("header", "")).strip() + if not header: + continue + insight: dict[str, object] = {"header": header} + if bool(item.get("settled", False)): + insight["settled"] = True + if bool(item.get("positive", False)): + insight["positive"] = True + insights.append(insight) + if insights: + compact[dimension] = {"insights": insights} + return compact + + __all__ = ["HolisticPrepareDependencies", "prepare_holistic_review_payload"] diff --git a/desloppify/languages/_framework/base/shared_phases_review.py b/desloppify/languages/_framework/base/shared_phases_review.py index a77419d35..1af09e747 100644 --- a/desloppify/languages/_framework/base/shared_phases_review.py +++ b/desloppify/languages/_framework/base/shared_phases_review.py @@ -2,8 +2,15 @@ from __future__ import annotations +import concurrent.futures +import hashlib +import logging +import os from collections.abc import Callable from pathlib import Path +from typing import Any + +logger = logging.getLogger(__name__) from desloppify.base.discovery.file_paths import rel from desloppify.base.output.terminal import log @@ -15,11 +22,17 @@ from desloppify.engine.detectors.test_coverage.detector import detect_test_coverage from desloppify.engine._state.filtering import make_issue from desloppify.engine.policy.zones import EXCLUDED_ZONES, filter_entries -from desloppify.languages._framework.base.types import DetectorEntry, LangRuntimeContract +from desloppify.languages._framework.base.types import ( + DetectorCoverageStatus, + DetectorEntry, + LangRuntimeContract, + LangSecurityResult, +) from desloppify.languages._framework.issue_factories import make_dupe_issues from desloppify.state_io import Issue from .shared_phases_helpers import ( + _coverage_to_dict, _entries_to_issues, _filter_boilerplate_entries_by_zone, _find_external_test_files, @@ -31,10 +44,367 @@ # security detector symbol from this module. detect_security_issues = _detect_security_issues_default +_DETECTOR_CACHE_VERSION = 1 +_PREFETCH_ATTR = "_shared_review_prefetch_futures" +_FUNCTION_CACHE_ATTR = "_shared_review_function_cache" +_PREFETCH_BOILERPLATE_KEY = "boilerplate" +_PREFETCH_SECURITY_KEY = "security_lang" +_PREFETCH_EXECUTOR = concurrent.futures.ThreadPoolExecutor(max_workers=2) + + +def _detector_cache(review_cache: object, detector: str) -> dict[str, object] | None: + """Return mutable detector cache payload from review cache.""" + if not isinstance(review_cache, dict): + return None + detectors = review_cache.get("detectors") + if not isinstance(detectors, dict): + detectors = {} + review_cache["detectors"] = detectors + payload = detectors.get(detector) + if not isinstance(payload, dict): + payload = {} + detectors[detector] = payload + return payload + + +def _dupes_cache(review_cache: object) -> dict[str, object] | None: + return _detector_cache(review_cache, "dupes") + + +def _boilerplate_cache(review_cache: object) -> dict[str, object] | None: + return _detector_cache(review_cache, "boilerplate") + + +def _security_cache(review_cache: object) -> dict[str, object] | None: + return _detector_cache(review_cache, "security") + + +def _get_prefetch_futures( + lang: object, + *, + create: bool, +) -> dict[str, concurrent.futures.Future[Any]]: + """Read/write in-memory review prefetch futures attached to LangRun.""" + payload = getattr(lang, _PREFETCH_ATTR, None) + if isinstance(payload, dict): + # Filter to only valid str->Future entries; rebuild only when needed. + bad_keys = [ + k for k, v in payload.items() + if not isinstance(k, str) or not isinstance(v, concurrent.futures.Future) + ] + if bad_keys: + for k in bad_keys: + payload.pop(k, None) + return payload + if not create: + return {} + initialized: dict[str, concurrent.futures.Future[Any]] = {} + setattr(lang, _PREFETCH_ATTR, initialized) + return initialized + + +def _pop_prefetch_future( + lang: object, + key: str, +) -> concurrent.futures.Future[Any] | None: + """Detach and return one prefetch future.""" + futures = _get_prefetch_futures(lang, create=False) + future = futures.pop(key, None) + if not futures: + try: + delattr(lang, _PREFETCH_ATTR) + except AttributeError: + pass + if isinstance(future, concurrent.futures.Future): + return future + return None + + +def _consume_prefetch_result( + lang: object, + key: str, +) -> object | None: + """Return completed prefetch result, swallowing async failures.""" + future = _pop_prefetch_future(lang, key) + if future is None: + return None + try: + return future.result() + except Exception: + logger.debug("prefetch %s failed, falling back to synchronous run", key, exc_info=True) + return None + + +def _has_phase( + phases: list[object], + *, + labels: set[str], + run_names: set[str], +) -> bool: + for phase in phases: + label = str(getattr(phase, "label", "")).strip().lower() + run = getattr(phase, "run", None) + run_name = str(getattr(run, "__name__", "")).strip().lower() + run_func = getattr(run, "func", None) + run_func_name = str(getattr(run_func, "__name__", "")).strip().lower() + if label in labels or run_name in run_names or run_func_name in run_names: + return True + return False + + +def _resolve_review_functions(path: Path, lang: LangRuntimeContract): + """Resolve language function extraction once per scan path.""" + cache = getattr(lang, _FUNCTION_CACHE_ATTR, None) + if not isinstance(cache, dict): + cache = {} + setattr(lang, _FUNCTION_CACHE_ATTR, cache) + cache_key = str(path.resolve()) + cached = cache.get(cache_key) + if isinstance(cached, list): + return cached + extracted = lang.extract_functions(path) + cache[cache_key] = extracted + return extracted + + +def _resolve_detector_files(path: Path, lang: LangRuntimeContract) -> list[str]: + """Resolve a detector file list for cache fingerprinting.""" + zone_map = getattr(lang, "zone_map", None) + if zone_map is not None and hasattr(zone_map, "all_files"): + zone_files = zone_map.all_files() + if isinstance(zone_files, list): + return zone_files + file_finder = getattr(lang, "file_finder", None) + if file_finder: + return file_finder(path) + return [] + + +def _resolve_detector_file_path(scan_root: Path, filepath: str) -> Path: + """Resolve a detector file path against the active scan root.""" + file_path = Path(filepath) + if file_path.is_absolute(): + return file_path + return (scan_root / file_path).resolve() + + +def _file_fingerprint( + *, + scan_root: Path, + files: list[str], + zone_map=None, + include_zone: bool = False, + salt: str = "", +) -> str: + """Build a stable file-signature hash from path + mtime + size + zone.""" + hasher = hashlib.blake2b(digest_size=20) + hasher.update(str(scan_root.resolve()).encode("utf-8", errors="replace")) + hasher.update(b"\0") + hasher.update(salt.encode("utf-8", errors="replace")) + hasher.update(b"\0") + for filepath in sorted({str(item) for item in files}): + resolved = _resolve_detector_file_path(scan_root, filepath) + normalized = filepath.replace("\\", "/") + hasher.update(normalized.encode("utf-8", errors="replace")) + hasher.update(b"\0") + try: + stats = os.stat(resolved) + hasher.update(str(stats.st_size).encode("ascii", errors="ignore")) + hasher.update(b"\0") + hasher.update(str(stats.st_mtime_ns).encode("ascii", errors="ignore")) + hasher.update(b"\0") + except OSError: + hasher.update(b"-1\0-1\0") + if include_zone and zone_map is not None: + zone = zone_map.get(filepath) + zone_value = getattr(zone, "value", zone) + hasher.update(str(zone_value or "").encode("utf-8", errors="replace")) + hasher.update(b"\0") + return hasher.hexdigest() + + +def _load_cached_boilerplate_entries( + cache: dict[str, object], + *, + fingerprint: str, +) -> list[dict] | None: + """Load cached boilerplate entries when fingerprint is unchanged.""" + if cache.get("version") != _DETECTOR_CACHE_VERSION: + return None + if cache.get("fingerprint") != fingerprint: + return None + entries = cache.get("entries") + if not isinstance(entries, list): + return None + return [entry for entry in entries if isinstance(entry, dict)] + + +def _store_cached_boilerplate_entries( + cache: dict[str, object], + *, + fingerprint: str, + entries: list[dict], +) -> None: + """Persist boilerplate detector entries for unchanged scans.""" + cache.clear() + cache.update( + { + "version": _DETECTOR_CACHE_VERSION, + "fingerprint": fingerprint, + "entries": [entry for entry in entries if isinstance(entry, dict)], + } + ) + + +def _coverage_from_record(payload: object) -> DetectorCoverageStatus | None: + """Rebuild coverage dataclass from serialized cache payload.""" + if not isinstance(payload, dict): + return None + detector = str(payload.get("detector", "")).strip() + status = str(payload.get("status", "")).strip() + if not detector or status not in {"full", "reduced"}: + return None + confidence_raw = payload.get("confidence", 1.0) + try: + confidence = float(confidence_raw) + except (TypeError, ValueError): + confidence = 1.0 + return DetectorCoverageStatus( + detector=detector, + status=status, + confidence=confidence, + summary=str(payload.get("summary", "") or ""), + impact=str(payload.get("impact", "") or ""), + remediation=str(payload.get("remediation", "") or ""), + tool=str(payload.get("tool", "") or ""), + reason=str(payload.get("reason", "") or ""), + ) + + +def _load_cached_security_result( + cache: dict[str, object], + *, + fingerprint: str, +) -> LangSecurityResult | None: + """Load cached language-specific security result when unchanged.""" + if cache.get("version") != _DETECTOR_CACHE_VERSION: + return None + if cache.get("fingerprint") != fingerprint: + return None + entries = cache.get("entries") + files_scanned = cache.get("files_scanned") + if not isinstance(entries, list) or not isinstance(files_scanned, int): + return None + normalized_entries = [entry for entry in entries if isinstance(entry, dict)] + return LangSecurityResult( + entries=normalized_entries, + files_scanned=max(0, files_scanned), + coverage=_coverage_from_record(cache.get("coverage")), + ) + + +def _store_cached_security_result( + cache: dict[str, object], + *, + fingerprint: str, + result: LangSecurityResult, +) -> None: + """Persist language-specific security results for unchanged scans.""" + cache.clear() + cache.update( + { + "version": _DETECTOR_CACHE_VERSION, + "fingerprint": fingerprint, + "entries": [entry for entry in result.entries if isinstance(entry, dict)], + "files_scanned": max(0, int(result.files_scanned)), + "coverage": ( + _coverage_to_dict(result.coverage) if result.coverage is not None else None + ), + } + ) + + +def prewarm_review_phase_detectors( + path: Path, + lang: LangRuntimeContract, + phases: list[object], +) -> None: + """Start expensive shared review detectors in background for overlap.""" + futures = _get_prefetch_futures(lang, create=True) + + if _has_phase( + phases, + labels={"boilerplate duplication"}, + run_names={"phase_boilerplate_duplication"}, + ): + boilerplate_cache = _boilerplate_cache(getattr(lang, "review_cache", None)) + detector_files = _resolve_detector_files(path, lang) + fingerprint = _file_fingerprint( + scan_root=path, + files=detector_files, + salt=f"boilerplate:{getattr(lang, 'name', '')}", + ) + cached_entries = ( + _load_cached_boilerplate_entries(boilerplate_cache, fingerprint=fingerprint) + if isinstance(boilerplate_cache, dict) + else None + ) + if cached_entries is None and _PREFETCH_BOILERPLATE_KEY not in futures: + futures[_PREFETCH_BOILERPLATE_KEY] = _PREFETCH_EXECUTOR.submit( + detect_with_jscpd, + path, + ) + + if _has_phase( + phases, + labels={"security"}, + run_names={"phase_security"}, + ): + file_finder = getattr(lang, "file_finder", None) + files = file_finder(path) if file_finder else [] + zone_map = getattr(lang, "zone_map", None) + security_cache = _security_cache(getattr(lang, "review_cache", None)) + fingerprint = _file_fingerprint( + scan_root=path, + files=files, + zone_map=zone_map, + include_zone=True, + salt=f"security:{getattr(lang, 'name', '')}", + ) + cached_result = ( + _load_cached_security_result(security_cache, fingerprint=fingerprint) + if isinstance(security_cache, dict) + else None + ) + if cached_result is None and _PREFETCH_SECURITY_KEY not in futures: + futures[_PREFETCH_SECURITY_KEY] = _PREFETCH_EXECUTOR.submit( + lang.detect_lang_security_detailed, + files, + zone_map, + ) + + +def clear_review_phase_prefetch(lang: object) -> None: + """Drop in-memory prefetch futures and function caches after scan run.""" + futures = _get_prefetch_futures(lang, create=False) + for future in futures.values(): + if isinstance(future, concurrent.futures.Future) and not future.done(): + future.cancel() + if hasattr(lang, _PREFETCH_ATTR): + try: + delattr(lang, _PREFETCH_ATTR) + except AttributeError: + pass + if hasattr(lang, _FUNCTION_CACHE_ATTR): + try: + delattr(lang, _FUNCTION_CACHE_ATTR) + except AttributeError: + pass + def phase_dupes(path: Path, lang: LangRuntimeContract) -> tuple[list[Issue], dict[str, int]]: """Shared phase runner: detect duplicate functions via lang.extract_functions.""" - functions = lang.extract_functions(path) + functions = _resolve_review_functions(path, lang) if lang.zone_map is not None: before = len(functions) @@ -47,7 +417,10 @@ def phase_dupes(path: Path, lang: LangRuntimeContract) -> tuple[list[Issue], dic if excluded: log(f" zones: {excluded} functions excluded (non-production)") - entries, total_functions = detect_duplicates(functions) + entries, total_functions = detect_duplicates( + functions, + cache=_dupes_cache(getattr(lang, "review_cache", None)), + ) issues = make_dupe_issues(entries, log) return issues, {"dupes": total_functions} @@ -57,7 +430,29 @@ def phase_boilerplate_duplication( lang: LangRuntimeContract, ) -> tuple[list[Issue], dict[str, int]]: """Shared phase runner: detect repeated boilerplate code via jscpd.""" - entries = detect_with_jscpd(path) + cache = _boilerplate_cache(getattr(lang, "review_cache", None)) + detector_files = _resolve_detector_files(path, lang) + fingerprint = _file_fingerprint( + scan_root=path, + files=detector_files, + salt=f"boilerplate:{getattr(lang, 'name', '')}", + ) + entries = ( + _load_cached_boilerplate_entries(cache, fingerprint=fingerprint) + if isinstance(cache, dict) + else None + ) + if entries is None: + prefetched = _consume_prefetch_result(lang, _PREFETCH_BOILERPLATE_KEY) + entries = prefetched if isinstance(prefetched, list) else None + if entries is None: + entries = detect_with_jscpd(path) + if isinstance(cache, dict) and entries is not None: + _store_cached_boilerplate_entries( + cache, + fingerprint=fingerprint, + entries=entries, + ) if entries is None: return [], {} entries = _filter_boilerplate_entries_by_zone(entries, lang.zone_map) @@ -116,7 +511,33 @@ def phase_security( ) lang_scanned = 0 - lang_result = lang.detect_lang_security_detailed(files, zone_map) + security_cache = _security_cache(getattr(lang, "review_cache", None)) + security_fingerprint = _file_fingerprint( + scan_root=path, + files=files, + zone_map=zone_map, + include_zone=True, + salt=f"security:{getattr(lang, 'name', '')}", + ) + lang_result = ( + _load_cached_security_result( + security_cache, + fingerprint=security_fingerprint, + ) + if isinstance(security_cache, dict) + else None + ) + if lang_result is None: + prefetched = _consume_prefetch_result(lang, _PREFETCH_SECURITY_KEY) + lang_result = prefetched if isinstance(prefetched, LangSecurityResult) else None + if lang_result is None: + lang_result = lang.detect_lang_security_detailed(files, zone_map) + if isinstance(security_cache, dict): + _store_cached_security_result( + security_cache, + fingerprint=security_fingerprint, + result=lang_result, + ) lang_entries = lang_result.entries lang_scanned = max(0, int(lang_result.files_scanned)) _record_detector_coverage(lang, lang_result.coverage) @@ -255,7 +676,7 @@ def phase_signature(path: Path, lang: LangRuntimeContract) -> tuple[list[Issue], """Shared phase runner: detect signature variance via lang.extract_functions.""" from desloppify.engine.detectors.signature import detect_signature_variance - functions = lang.extract_functions(path) + functions = _resolve_review_functions(path, lang) issues: list[Issue] = [] potentials: dict[str, int] = {} @@ -286,9 +707,11 @@ def phase_signature(path: Path, lang: LangRuntimeContract) -> tuple[list[Issue], __all__ = [ + "clear_review_phase_prefetch", "phase_boilerplate_duplication", "phase_dupes", "phase_private_imports", + "prewarm_review_phase_detectors", "phase_security", "phase_signature", "phase_subjective_review", diff --git a/desloppify/languages/_framework/runtime_support/runtime.py b/desloppify/languages/_framework/runtime_support/runtime.py index 27965b1a8..b0a5c387f 100644 --- a/desloppify/languages/_framework/runtime_support/runtime.py +++ b/desloppify/languages/_framework/runtime_support/runtime.py @@ -150,9 +150,9 @@ def __dir__(self): def _coerce_lang_override(field_name: str, value: object) -> object: """Normalize override values to LangRuntimeState-compatible payloads.""" if field_name in _LANG_OVERRIDE_DICT_FIELDS: - return value or {} + return value if isinstance(value, dict) else {} if field_name in _LANG_OVERRIDE_LIST_FIELDS: - return value or [] + return value if isinstance(value, list) else [] if field_name in _LANG_OVERRIDE_INT_FIELDS: return int(value or 0) if field_name == "review_max_age_days": diff --git a/desloppify/languages/framework.py b/desloppify/languages/framework.py index 3430d5118..508738c80 100644 --- a/desloppify/languages/framework.py +++ b/desloppify/languages/framework.py @@ -75,6 +75,24 @@ def reset_script_import_caches(scan_path: str | None = None) -> None: _reset_script_import_caches(scan_path) +def prewarm_review_phase_detectors(path, lang, phases) -> None: + """Prime expensive shared review detectors for overlap during scan.""" + from desloppify.languages._framework.base.shared_phases_review import ( + prewarm_review_phase_detectors as _prewarm_review_phase_detectors, + ) + + _prewarm_review_phase_detectors(path, lang, phases) + + +def clear_review_phase_prefetch(lang) -> None: + """Clear in-memory shared review detector prefetch state.""" + from desloppify.languages._framework.base.shared_phases_review import ( + clear_review_phase_prefetch as _clear_review_phase_prefetch, + ) + + _clear_review_phase_prefetch(lang) + + __all__ = [ "BoundaryRule", "LangConfig", @@ -90,12 +108,14 @@ def reset_script_import_caches(scan_path: str | None = None) -> None: "auto_detect_lang", "available_langs", "capability_report", + "clear_review_phase_prefetch", "disable_parse_cache", "enable_parse_cache", "get_lang", "load_all", "make_lang_run", "make_lang_config", + "prewarm_review_phase_detectors", "reset_script_import_caches", "registry_state", "shared_phase_labels", diff --git a/desloppify/tests/commands/scan/test_cmd_scan.py b/desloppify/tests/commands/scan/test_cmd_scan.py index da2499273..03960276b 100644 --- a/desloppify/tests/commands/scan/test_cmd_scan.py +++ b/desloppify/tests/commands/scan/test_cmd_scan.py @@ -527,6 +527,23 @@ def file_finder(self, path): assert result["total_loc"] == 6 # 2 + 1 + 3 assert result["total_directories"] == 2 # tmp_path and sub + def test_uses_precomputed_file_list_when_provided(self, tmp_path): + file_path = tmp_path / "a.py" + file_path.write_text("line1\nline2\n") + + class FakeLang: + def file_finder(self, _path): + raise AssertionError("file_finder should not run when files are provided") + + result = collect_codebase_metrics( + FakeLang(), + tmp_path, + files=[str(file_path)], + ) + assert result is not None + assert result["total_files"] == 1 + assert result["total_loc"] == 2 + # --------------------------------------------------------------------------- # warn_explicit_lang_with_no_files @@ -581,4 +598,3 @@ class FakeLang: # show_post_scan_analysis # --------------------------------------------------------------------------- - diff --git a/desloppify/tests/commands/scan/test_scan_orchestrator_direct.py b/desloppify/tests/commands/scan/test_scan_orchestrator_direct.py index 3a4e3d094..267a3f2fa 100644 --- a/desloppify/tests/commands/scan/test_scan_orchestrator_direct.py +++ b/desloppify/tests/commands/scan/test_scan_orchestrator_direct.py @@ -83,14 +83,24 @@ def test_run_scan_generation_uses_planning_scan_surface(monkeypatch) -> None: ([{"id": "open-1"}], {"smells": 1}), )[1], ) - monkeypatch.setattr(scan_workflow_mod, "collect_codebase_metrics", lambda _lang, _path: {"loc": 10}) + monkeypatch.setattr( + scan_workflow_mod, + "collect_codebase_metrics", + lambda _lang, _path, **_kwargs: ( + calls.setdefault("metrics_kwargs", _kwargs), + {"loc": 10}, + )[1], + ) monkeypatch.setattr(scan_workflow_mod, "warn_explicit_lang_with_no_files", lambda *_a, **_k: None) monkeypatch.setattr(scan_workflow_mod, "get_exclusions", lambda: []) monkeypatch.setattr(scan_workflow_mod, "_augment_stale_wontfix_impl", lambda issues, **_k: (issues, 0)) runtime = SimpleNamespace( path=".", - lang=SimpleNamespace(file_finder=None), + lang=SimpleNamespace( + file_finder=None, + zone_map=SimpleNamespace(all_files=lambda: ["src/a.py"]), + ), effective_include_slow=True, zone_overrides={"src": "prod"}, profile="full", @@ -110,6 +120,7 @@ def test_run_scan_generation_uses_planning_scan_surface(monkeypatch) -> None: assert calls["generate"][2].include_slow is True assert calls["generate"][2].zone_overrides == {"src": "prod"} assert calls["generate"][2].profile == "full" + assert calls["metrics_kwargs"] == {"files": ["src/a.py"]} assert calls["file_cache_on"] is True assert calls["file_cache_off"] is True assert calls["parse_cache_on"] is True diff --git a/desloppify/tests/detectors/test_dupes.py b/desloppify/tests/detectors/test_dupes.py index ff91c2645..6c4f06be6 100644 --- a/desloppify/tests/detectors/test_dupes.py +++ b/desloppify/tests/detectors/test_dupes.py @@ -2,6 +2,7 @@ import hashlib +import desloppify.engine.detectors.dupes as dupes_mod from desloppify.engine.detectors.base import FunctionInfo from desloppify.engine.detectors.dupes import detect_duplicates @@ -231,3 +232,68 @@ def test_same_file_same_name_pairs_do_not_collapse(self): assert total == 4 assert len(entries) == 2 assert all(entry["kind"] == "exact" for entry in entries) + + def test_near_duplicate_cache_reuses_pairs_without_matcher(self, monkeypatch): + base_lines = [f" result = compute_value_{i}(x, y, z)" for i in range(20)] + body_a = "\n".join(base_lines) + changed = base_lines.copy() + changed[-1] = " result = compute_value_19(x, y, w)" + body_b = "\n".join(changed) + fns = [ + _make_fn("foo", "a.py", body_a, loc=20), + _make_fn("bar", "b.py", body_b, loc=20), + ] + cache: dict[str, object] = {} + first_entries, total = detect_duplicates(fns, threshold=0.8, cache=cache) + assert total == 2 + assert len(first_entries) == 1 + assert isinstance(cache.get("near_pairs"), list) + assert cache.get("near_pairs") + + class _NoMatcher: + def __init__(self, *_args, **_kwargs): + raise AssertionError("near matcher should not run for unchanged cached pairs") + + monkeypatch.setattr( + "desloppify.engine.detectors.dupes.difflib.SequenceMatcher", + _NoMatcher, + ) + second_entries, second_total = detect_duplicates( + fns, + threshold=0.8, + cache=cache, + ) + assert second_total == 2 + assert len(second_entries) == 1 + assert second_entries[0]["kind"] == "near-duplicate" + + def test_cache_threshold_mismatch_falls_back_to_full_near_pass(self, monkeypatch): + base_lines = [f" result = compute_value_{i}(x, y, z)" for i in range(20)] + body_a = "\n".join(base_lines) + changed = base_lines.copy() + changed[-1] = " result = compute_value_19(x, y, w)" + body_b = "\n".join(changed) + fns = [ + _make_fn("foo", "a.py", body_a, loc=20), + _make_fn("bar", "b.py", body_b, loc=20), + ] + cache: dict[str, object] = {} + detect_duplicates(fns, threshold=0.8, cache=cache) + + real_matcher = dupes_mod.difflib.SequenceMatcher + calls = {"count": 0} + + class _CountingMatcher(real_matcher): + def __init__(self, *args, **kwargs): + calls["count"] += 1 + super().__init__(*args, **kwargs) + + monkeypatch.setattr( + "desloppify.engine.detectors.dupes.difflib.SequenceMatcher", + _CountingMatcher, + ) + entries, total = detect_duplicates(fns, threshold=0.95, cache=cache) + assert total == 2 + assert calls["count"] > 0 + assert len(entries) == 1 + assert cache["threshold"] == 0.95 diff --git a/desloppify/tests/intelligence/test_review_import_prepare_split_direct.py b/desloppify/tests/intelligence/test_review_import_prepare_split_direct.py index f32ff6a67..088f6b52e 100644 --- a/desloppify/tests/intelligence/test_review_import_prepare_split_direct.py +++ b/desloppify/tests/intelligence/test_review_import_prepare_split_direct.py @@ -296,6 +296,78 @@ def test_authorization_collector_includes_with_auth_siblings_same_directory() -> ] +def test_prepare_holistic_payload_compacts_batch_dimension_contexts() -> None: + deps = orchestration_mod.HolisticPrepareDependencies( + is_file_cache_enabled_fn=lambda: False, + enable_file_cache_fn=lambda: None, + disable_file_cache_fn=lambda: None, + build_holistic_context_fn=lambda *_args, **_kwargs: {"codebase_stats": {"total_files": 1}}, + build_review_context_fn=lambda *_args, **_kwargs: SimpleNamespace(), + load_dimensions_for_lang_fn=lambda _name: ( + ["naming_quality"], + {"naming_quality": {"prompt": "Assess naming"}}, + "sys", + ), + resolve_dimensions_fn=lambda cli_dimensions, default_dimensions: cli_dimensions or default_dimensions, + get_lang_guidance_fn=lambda _name: "guide", + assemble_holistic_batches_fn=lambda *_args, **_kwargs: [ + { + "name": "Naming", + "dimensions": ["naming_quality"], + "files_to_read": ["src/a.py"], + "why": "seed", + } + ], + holistic_batch_deps=holistic_batches_mod.HolisticBatchAssemblyDependencies( + build_investigation_batches_fn=lambda *_args, **_kwargs: [], + batch_concerns_fn=lambda *_args, **_kwargs: None, + filter_batches_to_dimensions_fn=lambda batches, _dims, **_kwargs: batches, + append_full_sweep_batch_fn=lambda **_kwargs: None, + log_best_effort_failure_fn=lambda *_args, **_kwargs: None, + logger=object(), + ), + serialize_context_fn=lambda _ctx: {}, + ) + payload = orchestration_mod.prepare_holistic_review_payload( + Path("."), + SimpleNamespace(name="python", file_finder=lambda _path: ["src/a.py"], zone_map=None), + state={ + "dimension_contexts": { + "naming_quality": { + "insights": [ + { + "header": "Names map to command intent", + "description": "Full rationale should remain packet-level only.", + "settled": True, + "positive": True, + } + ] + } + } + }, + options=SimpleNamespace( + files=["src/a.py"], + dimensions=["naming_quality"], + include_full_sweep=False, + max_files_per_batch=10, + include_issue_history=False, + issue_history_max_issues=10, + issue_history_max_batch_items=5, + ), + deps=deps, + ) + + assert payload["dimension_contexts"]["naming_quality"]["insights"][0]["description"] + batch_ctx = payload["investigation_batches"][0]["dimension_contexts"]["naming_quality"] + assert batch_ctx["insights"] == [ + { + "header": "Names map to command intent", + "settled": True, + "positive": True, + } + ] + + def test_holistic_batch_assembly_skips_concerns_for_inactive_dimension() -> None: deps = holistic_batches_mod.HolisticBatchAssemblyDependencies( build_investigation_batches_fn=lambda *_args, **_kwargs: [ diff --git a/desloppify/tests/lang/common/test_framework_shared_phases_and_structural_split_direct.py b/desloppify/tests/lang/common/test_framework_shared_phases_and_structural_split_direct.py index 336448758..5b2bc4cf5 100644 --- a/desloppify/tests/lang/common/test_framework_shared_phases_and_structural_split_direct.py +++ b/desloppify/tests/lang/common/test_framework_shared_phases_and_structural_split_direct.py @@ -2,6 +2,7 @@ from __future__ import annotations +import concurrent.futures from pathlib import Path from types import SimpleNamespace @@ -9,6 +10,7 @@ import desloppify.languages._framework.base.shared_phases_structural as structural_mod import desloppify.languages._framework.generic_support.structural as generic_structural_mod from desloppify.engine.policy.zones import Zone +from desloppify.languages._framework.base.types import LangSecurityResult def test_phase_dupes_filters_non_production_functions(monkeypatch) -> None: @@ -21,12 +23,16 @@ def test_phase_dupes_filters_non_production_functions(monkeypatch) -> None: zone_map=SimpleNamespace( get=lambda file_path: Zone.TEST if "tests/" in str(file_path) else Zone.PRODUCTION ), + review_cache={}, ) captured: dict[str, int] = {} - def _fake_detect(filtered): + def _fake_detect(filtered, **kwargs): captured["count"] = len(filtered) + cache_payload = kwargs.get("cache") + assert isinstance(cache_payload, dict) + captured["cache_size"] = len(cache_payload) return [{"id": "pair"}], len(filtered) monkeypatch.setattr(review_mod, "detect_duplicates", _fake_detect) @@ -36,6 +42,8 @@ def _fake_detect(filtered): assert len(issues) == 1 assert captured["count"] == 1 + assert "detectors" in lang.review_cache + assert "dupes" in lang.review_cache["detectors"] assert potentials == {"dupes": 1} @@ -68,6 +76,96 @@ def test_phase_boilerplate_duplication_handles_none_and_entries(monkeypatch) -> assert potentials == {"boilerplate_duplication": 2} +def test_phase_boilerplate_duplication_reuses_cached_entries(monkeypatch, tmp_path) -> None: + (tmp_path / "src").mkdir() + (tmp_path / "src" / "a.py").write_text("print('a')\n") + + calls = {"count": 0} + entries = [ + { + "id": "cluster-1", + "distinct_files": 2, + "window_size": 6, + "sample": ["x = 1"], + "locations": [ + {"file": "src/a.py", "line": 1}, + {"file": "src/b.py", "line": 2}, + ], + } + ] + lang = SimpleNamespace( + zone_map=None, + name="python", + review_cache={}, + file_finder=lambda _path: ["src/a.py"], + ) + + def _fake_detect(_path): + calls["count"] += 1 + return entries + + monkeypatch.setattr(review_mod, "detect_with_jscpd", _fake_detect) + monkeypatch.setattr(review_mod, "_filter_boilerplate_entries_by_zone", lambda items, _zone: items) + + first_issues, first_potentials = review_mod.phase_boilerplate_duplication(tmp_path, lang) + second_issues, second_potentials = review_mod.phase_boilerplate_duplication(tmp_path, lang) + + assert calls["count"] == 1 + assert len(first_issues) == 1 + assert len(second_issues) == 1 + assert first_potentials == {"boilerplate_duplication": 2} + assert second_potentials == {"boilerplate_duplication": 2} + + +def test_phase_boilerplate_duplication_uses_prefetched_result(monkeypatch, tmp_path) -> None: + class _ImmediateExecutor: + def submit(self, fn, *args, **kwargs): + future: concurrent.futures.Future = concurrent.futures.Future() + future.set_result(fn(*args, **kwargs)) + return future + + (tmp_path / "src").mkdir() + (tmp_path / "src" / "a.py").write_text("print('a')\n") + calls = {"count": 0} + entries = [ + { + "id": "cluster-1", + "distinct_files": 2, + "window_size": 6, + "sample": ["x = 1"], + "locations": [ + {"file": "src/a.py", "line": 1}, + {"file": "src/b.py", "line": 2}, + ], + } + ] + lang = SimpleNamespace( + zone_map=None, + name="python", + review_cache={}, + file_finder=lambda _path: ["src/a.py"], + ) + + monkeypatch.setattr(review_mod, "_PREFETCH_EXECUTOR", _ImmediateExecutor()) + monkeypatch.setattr( + review_mod, + "detect_with_jscpd", + lambda _path: (calls.__setitem__("count", calls["count"] + 1), entries)[1], + ) + monkeypatch.setattr(review_mod, "_filter_boilerplate_entries_by_zone", lambda items, _zone: items) + + review_mod.prewarm_review_phase_detectors( + tmp_path, + lang, + [SimpleNamespace(label="Boilerplate duplication", run=review_mod.phase_boilerplate_duplication)], + ) + issues, potentials = review_mod.phase_boilerplate_duplication(tmp_path, lang) + + assert calls["count"] == 1 + assert len(issues) == 1 + assert potentials == {"boilerplate_duplication": 2} + + def test_phase_security_records_default_coverage_when_missing(monkeypatch) -> None: lang = SimpleNamespace( zone_map=None, @@ -119,6 +217,153 @@ def test_phase_security_records_default_coverage_when_missing(monkeypatch) -> No assert lang.detector_coverage["security"]["status"] == "full" +def test_phase_security_reuses_lang_security_cache(monkeypatch, tmp_path) -> None: + (tmp_path / "src").mkdir() + (tmp_path / "src" / "a.py").write_text("print('a')\n") + + calls = {"count": 0} + lang = SimpleNamespace( + zone_map=None, + file_finder=lambda _path: ["src/a.py"], + name="python", + review_cache={}, + detector_coverage={}, + detect_lang_security_detailed=lambda _files, _zones: ( + calls.__setitem__("count", calls["count"] + 1), + LangSecurityResult( + entries=[ + { + "file": "src/lang.py", + "tier": 2, + "confidence": "medium", + "summary": "lang issue", + "name": "lang", + } + ], + files_scanned=3, + ), + )[1], + ) + + monkeypatch.setattr(review_mod, "filter_entries", lambda _zones, entries, _detector: entries) + monkeypatch.setattr( + review_mod, + "_entries_to_issues", + lambda detector, entries, **_kwargs: [{"detector": detector, "file": e["file"]} for e in entries], + ) + monkeypatch.setattr(review_mod, "_log_phase_summary", lambda *_args, **_kwargs: None) + + first_issues, first_potentials = review_mod.phase_security( + tmp_path, + lang, + detect_security_issues=lambda _files, _zones, _name, scan_root: ( + [ + { + "file": str(scan_root / "src" / "cross.py"), + "tier": 2, + "confidence": "high", + "summary": "cross issue", + "name": "cross", + } + ], + 1, + ), + ) + second_issues, second_potentials = review_mod.phase_security( + tmp_path, + lang, + detect_security_issues=lambda _files, _zones, _name, scan_root: ( + [ + { + "file": str(scan_root / "src" / "cross.py"), + "tier": 2, + "confidence": "high", + "summary": "cross issue", + "name": "cross", + } + ], + 1, + ), + ) + + assert calls["count"] == 1 + assert len(first_issues) == 2 + assert len(second_issues) == 2 + assert first_potentials == {"security": 3} + assert second_potentials == {"security": 3} + + +def test_phase_security_uses_prefetched_lang_result(monkeypatch, tmp_path) -> None: + class _ImmediateExecutor: + def submit(self, fn, *args, **kwargs): + future: concurrent.futures.Future = concurrent.futures.Future() + future.set_result(fn(*args, **kwargs)) + return future + + (tmp_path / "src").mkdir() + (tmp_path / "src" / "a.py").write_text("print('a')\n") + calls = {"count": 0} + lang = SimpleNamespace( + zone_map=None, + file_finder=lambda _path: ["src/a.py"], + name="python", + review_cache={}, + detector_coverage={}, + detect_lang_security_detailed=lambda _files, _zones: ( + calls.__setitem__("count", calls["count"] + 1), + LangSecurityResult( + entries=[], + files_scanned=4, + ), + )[1], + ) + + monkeypatch.setattr(review_mod, "_PREFETCH_EXECUTOR", _ImmediateExecutor()) + monkeypatch.setattr(review_mod, "filter_entries", lambda _zones, entries, _detector: entries) + monkeypatch.setattr(review_mod, "_entries_to_issues", lambda *_a, **_k: []) + monkeypatch.setattr(review_mod, "_log_phase_summary", lambda *_args, **_kwargs: None) + + review_mod.prewarm_review_phase_detectors( + tmp_path, + lang, + [SimpleNamespace(label="Security", run=review_mod.phase_security)], + ) + issues, potentials = review_mod.phase_security( + tmp_path, + lang, + detect_security_issues=lambda _files, _zones, _name, **_kwargs: ([], 1), + ) + + assert calls["count"] == 1 + assert issues == [] + assert potentials == {"security": 4} + + +def test_review_function_extraction_cached_across_signature_and_dupes(monkeypatch, tmp_path) -> None: + calls = {"count": 0} + functions = [SimpleNamespace(file="src/a.py", name="foo")] + lang = SimpleNamespace( + extract_functions=lambda _path: ( + calls.__setitem__("count", calls["count"] + 1), + functions, + )[1], + zone_map=None, + review_cache={}, + ) + + monkeypatch.setattr( + "desloppify.engine.detectors.signature.detect_signature_variance", + lambda _functions, **_kwargs: ([], 0), + ) + monkeypatch.setattr(review_mod, "detect_duplicates", lambda _functions, **_kwargs: ([], 1)) + monkeypatch.setattr(review_mod, "make_dupe_issues", lambda *_args, **_kwargs: []) + + review_mod.phase_signature(tmp_path, lang) + review_mod.phase_dupes(tmp_path, lang) + + assert calls["count"] == 1 + + def test_phase_test_coverage_and_private_imports_paths(monkeypatch) -> None: lang_without_zones = SimpleNamespace(zone_map=None) assert review_mod.phase_test_coverage(Path("."), lang_without_zones) == ([], {}) diff --git a/desloppify/tests/lang/common/test_lang_runtime_isolation.py b/desloppify/tests/lang/common/test_lang_runtime_isolation.py index 2d3d6a5d4..91b6ed98b 100644 --- a/desloppify/tests/lang/common/test_lang_runtime_isolation.py +++ b/desloppify/tests/lang/common/test_lang_runtime_isolation.py @@ -97,6 +97,16 @@ def test_lang_run_does_not_auto_forward_unknown_config_attrs() -> None: _ = run.future_runtime_attr +def test_make_lang_run_preserves_empty_review_cache_reference() -> None: + config = PythonConfig() + review_cache: dict[str, object] = {} + run = make_lang_run( + config, + overrides=LangRunOverrides(review_cache=review_cache), + ) + assert run.review_cache is review_cache + + def test_lang_run_props_threshold_defaults_to_lang_config() -> None: config = PythonConfig() config.props_threshold = 23 diff --git a/desloppify/tests/plan/test_plan_modules_direct.py b/desloppify/tests/plan/test_plan_modules_direct.py index 2513640c1..90db1bf33 100644 --- a/desloppify/tests/plan/test_plan_modules_direct.py +++ b/desloppify/tests/plan/test_plan_modules_direct.py @@ -5,6 +5,8 @@ from pathlib import Path from types import SimpleNamespace +import pytest + import desloppify.engine._state.filtering as filtering_mod from desloppify.engine._work_queue.core import QueueBuildOptions import desloppify.engine.planning.helpers as plan_common_mod @@ -63,6 +65,60 @@ def test_select_phases_and_run_phases_behavior(): assert potentials == {"fast": 1, "slow": 2, "review": 3} +def test_generate_issues_from_lang_primes_and_clears_review_prefetch(monkeypatch): + calls: list[str] = [] + lang = SimpleNamespace(phases=[], zone_map=None, name="python") + + monkeypatch.setattr(plan_scan_mod, "_build_zone_map", lambda *_a, **_k: None) + monkeypatch.setattr(plan_scan_mod, "_select_phases", lambda *_a, **_k: []) + monkeypatch.setattr(plan_scan_mod, "_run_phases", lambda *_a, **_k: ([], {})) + monkeypatch.setattr(plan_scan_mod, "_stamp_issue_context", lambda *_a, **_k: None) + monkeypatch.setattr( + plan_scan_mod, + "prewarm_review_phase_detectors", + lambda *_a, **_k: calls.append("prime"), + ) + monkeypatch.setattr( + plan_scan_mod, + "clear_review_phase_prefetch", + lambda *_a, **_k: calls.append("clear"), + ) + + issues, potentials = plan_scan_mod._generate_issues_from_lang(Path("."), lang) + + assert issues == [] + assert potentials == {} + assert calls == ["prime", "clear"] + + +def test_generate_issues_from_lang_clears_prefetch_on_phase_error(monkeypatch): + calls: list[str] = [] + lang = SimpleNamespace(phases=[], zone_map=None, name="python") + + monkeypatch.setattr(plan_scan_mod, "_build_zone_map", lambda *_a, **_k: None) + monkeypatch.setattr(plan_scan_mod, "_select_phases", lambda *_a, **_k: []) + monkeypatch.setattr( + plan_scan_mod, + "_run_phases", + lambda *_a, **_k: (_ for _ in ()).throw(RuntimeError("boom")), + ) + monkeypatch.setattr( + plan_scan_mod, + "prewarm_review_phase_detectors", + lambda *_a, **_k: calls.append("prime"), + ) + monkeypatch.setattr( + plan_scan_mod, + "clear_review_phase_prefetch", + lambda *_a, **_k: calls.append("clear"), + ) + + with pytest.raises(RuntimeError, match="boom"): + plan_scan_mod._generate_issues_from_lang(Path("."), lang) + + assert calls == ["prime", "clear"] + + def test_resolve_lang_prefers_explicit_and_fallbacks(monkeypatch): explicit = object() assert plan_scan_mod._resolve_lang(explicit, Path(".")) is explicit From dd73df580c282d9d22a5977c334a248425ac33d9 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 02:25:30 +0100 Subject: [PATCH 20/43] feat(frameworks): FrameworkSpec layer + Next.js spec (PR #414) Co-Authored-By: Tom Swift --- desloppify/base/discovery/source.py | 26 + desloppify/base/registry/catalog_entries.py | 22 + desloppify/base/registry/catalog_models.py | 2 + desloppify/engine/_scoring/policy/core.py | 11 +- .../_framework/frameworks/__init__.py | 41 + .../_framework/frameworks/detection.py | 212 +++ .../languages/_framework/frameworks/phases.py | 169 +++ .../_framework/frameworks/registry.py | 58 + .../_framework/frameworks/specs/__init__.py | 5 + .../_framework/frameworks/specs/nextjs.py | 557 ++++++++ .../languages/_framework/frameworks/types.py | 87 ++ .../_framework/generic_parts/parsers.py | 95 +- .../generic_parts/tool_factories.py | 24 +- .../_framework/generic_parts/tool_runner.py | 30 +- .../_framework/generic_support/core.py | 15 + .../languages/_framework/node/__init__.py | 3 + .../_framework/node/frameworks/__init__.py | 13 + .../node/frameworks/nextjs/README.md | 158 +++ .../node/frameworks/nextjs/__init__.py | 58 + .../_framework/node/frameworks/nextjs/info.py | 49 + .../node/frameworks/nextjs/scanners.py | 1209 +++++++++++++++++ .../languages/_framework/node/js_text.py | 76 ++ desloppify/languages/javascript/__init__.py | 6 +- .../tests/test_js_nextjs_framework.py | 83 ++ desloppify/languages/typescript/__init__.py | 2 + .../languages/typescript/detectors/unused.py | 23 +- .../languages/typescript/phases_smells.py | 4 +- .../tests/test_ts_nextjs_framework.py | 229 ++++ .../typescript/tests/test_ts_unused.py | 12 +- desloppify/tests/detectors/test_next_lint.py | 186 +++ desloppify/tests/review/work_queue_cases.py | 1 + 31 files changed, 3446 insertions(+), 20 deletions(-) create mode 100644 desloppify/languages/_framework/frameworks/__init__.py create mode 100644 desloppify/languages/_framework/frameworks/detection.py create mode 100644 desloppify/languages/_framework/frameworks/phases.py create mode 100644 desloppify/languages/_framework/frameworks/registry.py create mode 100644 desloppify/languages/_framework/frameworks/specs/__init__.py create mode 100644 desloppify/languages/_framework/frameworks/specs/nextjs.py create mode 100644 desloppify/languages/_framework/frameworks/types.py create mode 100644 desloppify/languages/_framework/node/__init__.py create mode 100644 desloppify/languages/_framework/node/frameworks/__init__.py create mode 100644 desloppify/languages/_framework/node/frameworks/nextjs/README.md create mode 100644 desloppify/languages/_framework/node/frameworks/nextjs/__init__.py create mode 100644 desloppify/languages/_framework/node/frameworks/nextjs/info.py create mode 100644 desloppify/languages/_framework/node/frameworks/nextjs/scanners.py create mode 100644 desloppify/languages/_framework/node/js_text.py create mode 100644 desloppify/languages/javascript/tests/test_js_nextjs_framework.py create mode 100644 desloppify/languages/typescript/tests/test_ts_nextjs_framework.py create mode 100644 desloppify/tests/detectors/test_next_lint.py diff --git a/desloppify/base/discovery/source.py b/desloppify/base/discovery/source.py index b0b564ee8..25e2ce934 100644 --- a/desloppify/base/discovery/source.py +++ b/desloppify/base/discovery/source.py @@ -286,6 +286,30 @@ def find_tsx_files(path: str | Path, *, runtime: RuntimeContext | None = None) - return find_source_files(path, [".tsx"], runtime=runtime) +def find_js_and_jsx_files( + path: str | Path, + *, + runtime: RuntimeContext | None = None, +) -> list[str]: + """Find JavaScript source files across common extensions.""" + exts = [".js", ".jsx", ".mjs", ".cjs"] + if runtime is None: + return find_source_files(path, exts) + return find_source_files(path, exts, runtime=runtime) + + +def find_js_ts_and_tsx_files( + path: str | Path, + *, + runtime: RuntimeContext | None = None, +) -> list[str]: + """Find JavaScript + TypeScript source files across common extensions.""" + exts = [".js", ".jsx", ".mjs", ".cjs", ".ts", ".tsx"] + if runtime is None: + return find_source_files(path, exts) + return find_source_files(path, exts, runtime=runtime) + + def find_py_files(path: str | Path, *, runtime: RuntimeContext | None = None) -> list[str]: if runtime is None: return find_source_files(path, [".py"]) @@ -309,5 +333,7 @@ def find_py_files(path: str | Path, *, runtime: RuntimeContext | None = None) -> "find_ts_files", "find_ts_and_tsx_files", "find_tsx_files", + "find_js_and_jsx_files", + "find_js_ts_and_tsx_files", "find_py_files", ] diff --git a/desloppify/base/registry/catalog_entries.py b/desloppify/base/registry/catalog_entries.py index d2015525d..f5b6224aa 100644 --- a/desloppify/base/registry/catalog_entries.py +++ b/desloppify/base/registry/catalog_entries.py @@ -189,6 +189,28 @@ tier=3, subjective_dimensions=("design_coherence",), ), + "nextjs": DetectorMeta( + "nextjs", + "nextjs", + "Code quality", + "refactor", + "fix Next.js framework smells (RSC/client boundaries, routing, middleware, env leakage)", + needs_judgment=True, + standalone_threshold="medium", + tier=3, + marks_dims_stale=True, + subjective_dimensions=("design_coherence", "logic_clarity"), + ), + "next_lint": DetectorMeta( + "next_lint", + "next lint", + "Code quality", + "manual_fix", + "run `next lint` and fix reported ESLint issues", + tier=2, + marks_dims_stale=True, + subjective_dimensions=("convention_outlier",), + ), "dupes": DetectorMeta( "dupes", "dupes", diff --git a/desloppify/base/registry/catalog_models.py b/desloppify/base/registry/catalog_models.py index 1c3b0b767..31c273c9e 100644 --- a/desloppify/base/registry/catalog_models.py +++ b/desloppify/base/registry/catalog_models.py @@ -22,6 +22,8 @@ "naming", "smells", "react", + "nextjs", + "next_lint", "dupes", "stale_exclude", "dict_keys", diff --git a/desloppify/engine/_scoring/policy/core.py b/desloppify/engine/_scoring/policy/core.py index 8835a0b9f..b44ffceb4 100644 --- a/desloppify/engine/_scoring/policy/core.py +++ b/desloppify/engine/_scoring/policy/core.py @@ -41,7 +41,16 @@ class DetectorScoringPolicy: # Keep policy details that are independent of tier/dimension wiring. _FILE_BASED_POLICY_DETECTORS = frozenset( - {"smells", "dict_keys", "test_coverage", "security", "concerns", "review"} + { + "smells", + "dict_keys", + "test_coverage", + "security", + "concerns", + "review", + "nextjs", + "next_lint", + } ) _LOC_WEIGHT_POLICY_DETECTORS = frozenset({"test_coverage"}) _EXCLUDED_ZONE_OVERRIDES: dict[str, frozenset[str]] = { diff --git a/desloppify/languages/_framework/frameworks/__init__.py b/desloppify/languages/_framework/frameworks/__init__.py new file mode 100644 index 000000000..fa703ff71 --- /dev/null +++ b/desloppify/languages/_framework/frameworks/__init__.py @@ -0,0 +1,41 @@ +"""Framework horizontal layer (spec-driven, like tree-sitter/tool specs). + +Framework support is intentionally spec-driven so it can be enabled from both +deep language plugins (LangConfig classes) and shallow generic_lang plugins. + +Public entrypoints: +- framework_phases(lang_name): build DetectorPhase objects +- detect_ecosystem_frameworks(scan_path, lang, ecosystem): framework presence + evidence +""" + +from __future__ import annotations + +from .detection import detect_ecosystem_frameworks +from .phases import framework_phases +from .registry import ( + FRAMEWORK_SPECS, + get_framework_spec, + list_framework_specs, + register_framework_spec, +) +from .types import ( + DetectionConfig, + EcosystemFrameworkDetection, + FrameworkSpec, + ScannerRule, + ToolIntegration, +) + +__all__ = [ + "DetectionConfig", + "EcosystemFrameworkDetection", + "FRAMEWORK_SPECS", + "FrameworkSpec", + "ScannerRule", + "ToolIntegration", + "detect_ecosystem_frameworks", + "framework_phases", + "get_framework_spec", + "list_framework_specs", + "register_framework_spec", +] diff --git a/desloppify/languages/_framework/frameworks/detection.py b/desloppify/languages/_framework/frameworks/detection.py new file mode 100644 index 000000000..1f9877199 --- /dev/null +++ b/desloppify/languages/_framework/frameworks/detection.py @@ -0,0 +1,212 @@ +"""Ecosystem-specific framework presence detection (deterministic, evidence-based).""" + +from __future__ import annotations + +import json +import re +from pathlib import Path +from typing import Any + +from desloppify.base.discovery.paths import get_project_root +from desloppify.languages._framework.base.types import LangRuntimeContract + +from .registry import ensure_builtin_specs_loaded, list_framework_specs +from .types import DetectionConfig, EcosystemFrameworkDetection, FrameworkEvidence + +_CACHE_PREFIX = "frameworks.ecosystem.present" + + +def _find_nearest_package_json(scan_path: Path, project_root: Path) -> Path | None: + resolved = scan_path if scan_path.is_absolute() else (project_root / scan_path) + resolved = resolved.resolve() + if resolved.is_file(): + resolved = resolved.parent + + # If scan_path is inside runtime project root, cap traversal there. + # Otherwise (e.g. --path /tmp/other-repo), traverse from scan_path upward. + limit_to_project_root = False + try: + resolved.relative_to(project_root) + limit_to_project_root = True + except ValueError: + limit_to_project_root = False + + cur = resolved + while True: + candidate = cur / "package.json" + if candidate.is_file(): + return candidate + if (limit_to_project_root and cur == project_root) or cur.parent == cur: + break + cur = cur.parent + + # Fallback only when no package.json exists in the scanned tree. + candidate = project_root / "package.json" + return candidate if candidate.is_file() else None + + +def _read_package_json(package_json: Path) -> dict[str, Any]: + try: + payload = json.loads(package_json.read_text()) + except (OSError, UnicodeDecodeError, json.JSONDecodeError): + return {} + return payload if isinstance(payload, dict) else {} + + +def _dep_set(payload: dict[str, Any], key: str) -> set[str]: + deps = payload.get(key) + if not isinstance(deps, dict): + return set() + return {str(k) for k in deps.keys()} + + +def _script_values(payload: dict[str, Any]) -> list[str]: + scripts = payload.get("scripts") + if not isinstance(scripts, dict): + return [] + return [v for v in scripts.values() if isinstance(v, str)] + + +def _existing_relpaths( + package_root: Path, + project_root: Path, + candidates: tuple[str, ...], + *, + kind: str, +) -> tuple[str, ...]: + hits: list[str] = [] + for relpath in candidates: + path = (package_root / relpath).resolve() + ok = path.is_dir() if kind == "dir" else path.is_file() + if not ok: + continue + try: + hits.append(path.relative_to(project_root).as_posix()) + except ValueError: + hits.append(path.as_posix()) + return tuple(hits) + + +def _node_framework_evidence( + *, + cfg: DetectionConfig, + package_root: Path, + project_root: Path, + deps: set[str], + dev_deps: set[str], + scripts: list[str], +) -> tuple[bool, FrameworkEvidence]: + dep_hits = tuple(sorted(set(cfg.dependencies).intersection(deps))) + dev_dep_hits = tuple(sorted(set(cfg.dev_dependencies).intersection(dev_deps))) + config_hits = _existing_relpaths(package_root, project_root, cfg.config_files, kind="file") + marker_file_hits = _existing_relpaths(package_root, project_root, cfg.marker_files, kind="file") + marker_dir_hits = _existing_relpaths(package_root, project_root, cfg.marker_dirs, kind="dir") + + script_hits: list[str] = [] + if scripts and cfg.script_pattern: + pat = re.compile(cfg.script_pattern) + script_hits = [s for s in scripts if pat.search(s)] + + # Presence is deterministic: deps/config/scripts imply presence. Marker dirs are context by default. + present = bool(dep_hits or dev_dep_hits or config_hits or marker_file_hits or script_hits) + if cfg.marker_dirs_imply_presence and marker_dir_hits: + present = True + + evidence: FrameworkEvidence = { + "dep_hits": list(dep_hits), + "dev_dep_hits": list(dev_dep_hits), + "config_hits": list(config_hits), + "marker_file_hits": list(marker_file_hits), + "marker_dir_hits": list(marker_dir_hits), + "script_hits": script_hits[:5], + } + return present, evidence + + +def detect_ecosystem_frameworks( + scan_path: Path, + lang: LangRuntimeContract | None, + ecosystem: str, +) -> EcosystemFrameworkDetection: + """Detect framework presence for an ecosystem and scan path (cached per run).""" + ensure_builtin_specs_loaded() + eco = str(ecosystem or "").strip().lower() + resolved_scan_path = Path(scan_path).resolve() + cache_key = f"{_CACHE_PREFIX}:{eco}:{resolved_scan_path.as_posix()}" + + if lang is not None: + cache = getattr(lang, "review_cache", None) + if isinstance(cache, dict): + cached = cache.get(cache_key) + if isinstance(cached, EcosystemFrameworkDetection): + return cached + + project_root = get_project_root() + + if eco != "node": + result = EcosystemFrameworkDetection( + ecosystem=eco, + package_root=project_root, + package_json_relpath=None, + present={}, + ) + if lang is not None and isinstance(getattr(lang, "review_cache", None), dict): + lang.review_cache[cache_key] = result + return result + + package_json = _find_nearest_package_json(resolved_scan_path, project_root) + package_root = (package_json.parent if package_json else project_root).resolve() + payload = _read_package_json(package_json) if package_json else {} + + deps = _dep_set(payload, "dependencies") | _dep_set(payload, "peerDependencies") | _dep_set( + payload, "optionalDependencies" + ) + dev_deps = _dep_set(payload, "devDependencies") + scripts = _script_values(payload) + + specs = list_framework_specs(ecosystem=eco) + present: dict[str, FrameworkEvidence] = {} + for framework_id, spec in specs.items(): + ok, evidence = _node_framework_evidence( + cfg=spec.detection, + package_root=package_root, + project_root=project_root, + deps=deps, + dev_deps=dev_deps, + scripts=scripts, + ) + if ok: + present[framework_id] = evidence + + # Apply mutual exclusions deterministically: present frameworks can suppress others. + present_ids = set(present.keys()) + for framework_id, spec in specs.items(): + if framework_id not in present_ids: + continue + for excluded in spec.excludes: + present.pop(str(excluded), None) + + result = EcosystemFrameworkDetection( + ecosystem=eco, + package_root=package_root, + package_json_relpath=( + ( + package_json.relative_to(project_root).as_posix() + if package_json and package_json.is_relative_to(project_root) + else package_json.as_posix() + ) + if package_json + else None + ), + present=present, + ) + + if lang is not None: + cache = getattr(lang, "review_cache", None) + if isinstance(cache, dict): + cache[cache_key] = result + + return result + + +__all__ = ["detect_ecosystem_frameworks"] diff --git a/desloppify/languages/_framework/frameworks/phases.py b/desloppify/languages/_framework/frameworks/phases.py new file mode 100644 index 000000000..65f6c75cf --- /dev/null +++ b/desloppify/languages/_framework/frameworks/phases.py @@ -0,0 +1,169 @@ +"""DetectorPhase factories for framework specs.""" + +from __future__ import annotations + +from pathlib import Path +from typing import Any + +from desloppify.base.output.terminal import log as _log +from desloppify.languages._framework.base.types import DetectorPhase, LangRuntimeContract +from desloppify.languages._framework.generic_support.core import make_tool_phase +from desloppify.state_io import Issue + +from .detection import detect_ecosystem_frameworks +from .registry import ensure_builtin_specs_loaded, list_framework_specs +from .types import FrameworkSpec, ScannerRule, ToolIntegration + + +def _has_capability(lang: LangRuntimeContract, cap: str) -> bool: + key = str(cap or "").strip() + if not key: + return True + if key == "dep_graph": + return getattr(lang, "dep_graph", None) is not None + if key == "zone_map": + return getattr(lang, "zone_map", None) is not None + if key == "file_finder": + return callable(getattr(lang, "file_finder", None)) + return bool(getattr(lang, key, None)) + + +def _record_capability_degradation( + lang: Any, + *, + detector: str, + rule_id: str, + missing: list[str], +) -> None: + """Record reduced coverage metadata when a framework rule cannot run.""" + if not missing: + return + summary = ( + f"Skipped {detector} framework rule '{rule_id}' (missing: {', '.join(missing)})." + ) + record = { + "detector": detector, + "status": "reduced", + "confidence": 0.5, + "summary": summary, + "impact": "Some framework-specific issues may be under-reported for this scan.", + "remediation": "Enable the required language capabilities and rerun scan.", + "tool": "", + "reason": "missing_capability", + } + detector_coverage = getattr(lang, "detector_coverage", None) + if isinstance(detector_coverage, dict): + existing = detector_coverage.get(detector) + if isinstance(existing, dict): + merged = dict(existing) + merged["status"] = "reduced" + merged["confidence"] = min(float(existing.get("confidence", 1.0)), 0.5) + merged_summary = str(merged.get("summary", "") or "").strip() + if merged_summary and summary not in merged_summary: + merged["summary"] = f"{merged_summary} | {summary}" + elif not merged_summary: + merged["summary"] = summary + detector_coverage[detector] = merged + else: + detector_coverage[detector] = dict(record) + + coverage_warnings = getattr(lang, "coverage_warnings", None) + if isinstance(coverage_warnings, list): + if not any( + isinstance(entry, dict) and entry.get("detector") == detector for entry in coverage_warnings + ): + coverage_warnings.append(dict(record)) + + +def _run_scanner_rules( + scan_root: Path, + lang: LangRuntimeContract, + *, + detector: str, + rules: tuple[ScannerRule, ...], +) -> tuple[list[Issue], int]: + issues: list[Issue] = [] + potential = 0 + + for rule in rules: + scan_fn = rule.scan + issue_factory = rule.issue_factory + if scan_fn is None or issue_factory is None: + continue + + missing = [cap for cap in rule.requires if not _has_capability(lang, cap)] + if missing: + _record_capability_degradation( + lang, + detector=detector, + rule_id=rule.id, + missing=missing, + ) + continue + + entries, scanned = scan_fn(scan_root, lang) + potential = max(potential, int(scanned or 0)) + for entry in entries: + issues.append(issue_factory(entry)) + if entries and rule.log_message: + _log(rule.log_message(len(entries))) + + return issues, potential + + +def _framework_smells_phase(spec: FrameworkSpec) -> DetectorPhase: + label = f"{spec.label} framework smells" + + def run(path: Path, lang: LangRuntimeContract) -> tuple[list[Issue], dict[str, int]]: + detection = detect_ecosystem_frameworks(path, lang, spec.ecosystem) + if spec.id not in detection.present: + return [], {} + + scan_root = detection.package_root + issues, potential = _run_scanner_rules( + scan_root, + lang, + detector=spec.id, + rules=spec.scanners, + ) + return issues, ({spec.id: potential} if potential > 0 else {}) + + return DetectorPhase(label, run) + + +def _framework_tool_phase(spec: FrameworkSpec, tool: ToolIntegration) -> DetectorPhase: + tool_phase = make_tool_phase( + tool.label, + tool.cmd, + tool.fmt, + tool.id, + tool.tier, + confidence=tool.confidence, + ) + tool_phase.slow = bool(tool.slow) + + def run(path: Path, lang: LangRuntimeContract) -> tuple[list[Issue], dict[str, int]]: + detection = detect_ecosystem_frameworks(path, lang, spec.ecosystem) + if spec.id not in detection.present: + return [], {} + + scan_root = detection.package_root + return tool_phase.run(scan_root, lang) + + return DetectorPhase(tool_phase.label, run, slow=tool_phase.slow) + + +def framework_phases(lang_name: str) -> list[DetectorPhase]: + """Return all framework phases for a language plugin.""" + del lang_name + ensure_builtin_specs_loaded() + + phases: list[DetectorPhase] = [] + for spec in list_framework_specs().values(): + phases.append(_framework_smells_phase(spec)) + for tool in spec.tools: + phases.append(_framework_tool_phase(spec, tool)) + return phases + + +__all__ = ["framework_phases"] diff --git a/desloppify/languages/_framework/frameworks/registry.py b/desloppify/languages/_framework/frameworks/registry.py new file mode 100644 index 000000000..fe2bb5fb0 --- /dev/null +++ b/desloppify/languages/_framework/frameworks/registry.py @@ -0,0 +1,58 @@ +"""Framework spec registry (analogous to tree-sitter spec registry).""" + +from __future__ import annotations + +from collections.abc import Iterable + +from .types import FrameworkSpec + +FRAMEWORK_SPECS: dict[str, FrameworkSpec] = {} + + +def register_framework_spec(spec: FrameworkSpec) -> None: + """Register a framework spec by id.""" + key = str(spec.id or "").strip() + if not key: + raise ValueError("FrameworkSpec.id must be non-empty") + FRAMEWORK_SPECS[key] = spec + + +def get_framework_spec(framework_id: str) -> FrameworkSpec | None: + """Return a registered framework spec by id.""" + key = str(framework_id or "").strip() + if not key: + return None + return FRAMEWORK_SPECS.get(key) + + +def list_framework_specs(*, ecosystem: str | None = None) -> dict[str, FrameworkSpec]: + """Return a copy of the framework registry, optionally filtered by ecosystem.""" + if ecosystem is None: + return dict(FRAMEWORK_SPECS) + eco = str(ecosystem or "").strip().lower() + if not eco: + return dict(FRAMEWORK_SPECS) + return {k: v for k, v in FRAMEWORK_SPECS.items() if str(v.ecosystem).lower() == eco} + + +def _register_builtin_specs() -> None: + """Register built-in framework specs shipped with the repo.""" + if FRAMEWORK_SPECS: + return + from .specs.nextjs import NEXTJS_SPEC + + register_framework_spec(NEXTJS_SPEC) + + +def ensure_builtin_specs_loaded() -> None: + """Idempotently load built-in framework specs.""" + _register_builtin_specs() + + +__all__ = [ + "FRAMEWORK_SPECS", + "ensure_builtin_specs_loaded", + "get_framework_spec", + "list_framework_specs", + "register_framework_spec", +] diff --git a/desloppify/languages/_framework/frameworks/specs/__init__.py b/desloppify/languages/_framework/frameworks/specs/__init__.py new file mode 100644 index 000000000..8c4c32a85 --- /dev/null +++ b/desloppify/languages/_framework/frameworks/specs/__init__.py @@ -0,0 +1,5 @@ +"""Built-in framework specs.""" + +from __future__ import annotations + +__all__ = [] diff --git a/desloppify/languages/_framework/frameworks/specs/nextjs.py b/desloppify/languages/_framework/frameworks/specs/nextjs.py new file mode 100644 index 000000000..0180d2206 --- /dev/null +++ b/desloppify/languages/_framework/frameworks/specs/nextjs.py @@ -0,0 +1,557 @@ +"""Next.js framework spec (Node ecosystem).""" + +from __future__ import annotations + +from collections.abc import Callable +from pathlib import Path +from typing import Any + +from desloppify.engine._state.filtering import make_issue +from desloppify.languages._framework.base.types import LangRuntimeContract +from desloppify.languages._framework.node.frameworks.nextjs.info import ( + NextjsFrameworkInfo, + nextjs_info_from_evidence, +) +from desloppify.languages._framework.node.frameworks.nextjs.scanners import ( + scan_mixed_router_layout, + scan_next_router_imports_in_app_router, + scan_nextjs_app_router_exports_in_pages_router, + scan_nextjs_async_client_components, + scan_nextjs_browser_globals_missing_use_client, + scan_nextjs_client_layouts, + scan_nextjs_env_leaks_in_client, + scan_nextjs_error_files_missing_use_client, + scan_nextjs_navigation_hooks_missing_use_client, + scan_nextjs_next_document_misuse, + scan_nextjs_next_head_in_app_router, + scan_nextjs_pages_api_route_handlers, + scan_nextjs_pages_router_apis_in_app_router, + scan_nextjs_pages_router_artifacts_in_app_router, + scan_nextjs_route_handlers_and_middleware_misuse, + scan_nextjs_server_exports_in_client, + scan_nextjs_server_imports_in_client, + scan_nextjs_server_modules_in_pages_router, + scan_nextjs_server_navigation_apis_in_client, + scan_nextjs_use_client_not_first, + scan_nextjs_use_server_in_client, + scan_nextjs_use_server_not_first, + scan_rsc_missing_use_client, +) +from desloppify.state_io import Issue + +from ..types import DetectionConfig, FrameworkSpec, ScannerRule, ToolIntegration + +_NEXTJS_INFO_CACHE_PREFIX = "framework.nextjs.info" + + +def _nextjs_info(scan_root: Path, lang: LangRuntimeContract) -> NextjsFrameworkInfo: + key = f"{_NEXTJS_INFO_CACHE_PREFIX}:{scan_root.resolve().as_posix()}" + cache = getattr(lang, "review_cache", None) + if isinstance(cache, dict): + cached = cache.get(key) + if isinstance(cached, NextjsFrameworkInfo): + return cached + + from desloppify.languages._framework.frameworks.detection import ( + detect_ecosystem_frameworks, + ) + + detection = detect_ecosystem_frameworks(scan_root, lang, "node") + evidence = detection.present.get("nextjs", {}) + info = nextjs_info_from_evidence( + evidence, + package_root=detection.package_root, + package_json_relpath=detection.package_json_relpath, + ) + + if isinstance(cache, dict): + cache[key] = info + return info + + +def _wrap_scan( + scan_fn: Callable[[Path, NextjsFrameworkInfo], tuple[list[dict[str, Any]], int]], +) -> Callable[[Path, LangRuntimeContract], tuple[list[dict[str, Any]], int]]: + def scan(scan_root: Path, lang: LangRuntimeContract) -> tuple[list[dict[str, Any]], int]: + info = _nextjs_info(scan_root, lang) + return scan_fn(scan_root, info) + + return scan + + +def _wrap_info_scan( + scan_fn: Callable[[NextjsFrameworkInfo], list[dict[str, Any]]], +) -> Callable[[Path, LangRuntimeContract], tuple[list[dict[str, Any]], int]]: + def scan(scan_root: Path, lang: LangRuntimeContract) -> tuple[list[dict[str, Any]], int]: + info = _nextjs_info(scan_root, lang) + return list(scan_fn(info)), 0 + + return scan + + +def _make_line_issue( + detector: str, + issue_id: str, + *, + tier: int, + confidence: str, + summary: str, +) -> Callable[[dict[str, Any]], Issue]: + return lambda entry: make_issue( + detector, + entry["file"], + issue_id, + tier=tier, + confidence=confidence, + summary=summary, + detail={"line": entry["line"]}, + ) + + +NEXTJS_SCANNERS: tuple[ScannerRule, ...] = ( + ScannerRule( + id="use_client_not_first", + scan=_wrap_scan(scan_nextjs_use_client_not_first), + issue_factory=_make_line_issue( + "nextjs", + "use_client_not_first", + tier=2, + confidence="high", + summary="'use client' directive is present but not the first meaningful line (invalid in Next.js).", + ), + log_message=lambda count: ( + " nextjs: " + f"{count} App Router files contain a non-top-level 'use client' directive" + ), + ), + ScannerRule( + id="error_file_missing_use_client", + scan=_wrap_scan(scan_nextjs_error_files_missing_use_client), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + f"error_file_missing_use_client::{entry.get('name','error')}", + tier=2, + confidence="high", + summary="App Router error boundary module is missing 'use client' (required for error.js/error.tsx).", + detail={"line": entry["line"], "name": entry.get("name")}, + ), + log_message=lambda count: ( + " nextjs: " f"{count} App Router error boundary files missing 'use client'" + ), + ), + ScannerRule( + id="pages_router_artifact_in_app_router", + scan=_wrap_scan(scan_nextjs_pages_router_artifacts_in_app_router), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + f"pages_router_artifact_in_app_router::{entry.get('name','artifact')}", + tier=3, + confidence="high", + summary=( + "App Router tree contains Pages Router artifact file " + f"{entry.get('name')} (likely migration artifact)." + ), + detail={"line": entry["line"], "name": entry.get("name")}, + ), + log_message=lambda count: ( + " nextjs: " f"{count} Pages Router artifact files found under app/" + ), + ), + ScannerRule( + id="missing_use_client", + scan=_wrap_scan(scan_rsc_missing_use_client), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + f"missing_use_client::{entry['hook']}", + tier=2, + confidence="medium", + summary=f"Missing 'use client' directive: App Router module uses {entry['hook']}()", + detail={"line": entry["line"], "hook": entry["hook"]}, + ), + log_message=lambda count: ( + " nextjs: " f"{count} App Router files missing 'use client'" + ), + ), + ScannerRule( + id="nav_hook_missing_use_client", + scan=_wrap_scan(scan_nextjs_navigation_hooks_missing_use_client), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + f"nav_hook_missing_use_client::{entry['hook']}", + tier=2, + confidence="high", + summary=f"Missing 'use client' directive: App Router module uses {entry['hook']}()", + detail={"line": entry["line"], "hook": entry["hook"]}, + ), + log_message=lambda count: ( + " nextjs: " + f"{count} App Router files use next/navigation hooks without 'use client'" + ), + ), + ScannerRule( + id="server_import_in_client", + scan=_wrap_scan(scan_nextjs_server_imports_in_client), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + "server_import_in_client", + tier=2, + confidence="high", + summary=( + ( + "Client component imports server-only modules (" + + ", ".join(entry.get("modules", [])[:4]) + + ")." + ) + if entry.get("modules") + else "Client component imports server-only modules." + ), + detail={ + "line": entry["line"], + "modules": entry.get("modules", []), + "imports": entry.get("imports", []), + }, + ), + log_message=lambda count: ( + " nextjs: " f"{count} client components import server-only modules" + ), + ), + ScannerRule( + id="server_export_in_client", + scan=_wrap_scan(scan_nextjs_server_exports_in_client), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + f"server_export_in_client::{entry.get('export','export')}", + tier=3, + confidence="high", + summary=( + "Client component exports server-only Next.js module exports " + f"({entry.get('export')})." + ), + detail={"line": entry["line"], "export": entry.get("export")}, + ), + log_message=lambda count: ( + " nextjs: " f"{count} client components export server-only Next.js exports" + ), + ), + ScannerRule( + id="pages_router_api_in_app_router", + scan=_wrap_scan(scan_nextjs_pages_router_apis_in_app_router), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + f"pages_router_api_in_app_router::{entry.get('api','api')}", + tier=3, + confidence="high", + summary=( + "App Router module uses Pages Router data-fetching API " + f"({entry.get('api')})." + ), + detail={"line": entry["line"], "api": entry.get("api")}, + ), + log_message=lambda count: ( + " nextjs: " f"{count} App Router files use Pages Router APIs" + ), + ), + ScannerRule( + id="next_head_in_app_router", + scan=_wrap_scan(scan_nextjs_next_head_in_app_router), + issue_factory=_make_line_issue( + "nextjs", + "next_head_in_app_router", + tier=3, + confidence="high", + summary="App Router module imports next/head (unsupported in App Router).", + ), + log_message=lambda count: ( + " nextjs: " f"{count} App Router files import next/head" + ), + ), + ScannerRule( + id="next_document_misuse", + scan=_wrap_scan(scan_nextjs_next_document_misuse), + issue_factory=_make_line_issue( + "nextjs", + "next_document_misuse", + tier=3, + confidence="high", + summary="next/document import outside valid Pages Router _document.* file.", + ), + log_message=lambda count: ( + " nextjs: " f"{count} files import next/document outside _document.*" + ), + ), + ScannerRule( + id="browser_global_missing_use_client", + scan=_wrap_scan(scan_nextjs_browser_globals_missing_use_client), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + f"browser_global_missing_use_client::{entry.get('global','global')}", + tier=2, + confidence="medium", + summary=( + "App Router module accesses browser globals " + f"({entry.get('global')}) but is missing 'use client'." + ), + detail={"line": entry["line"], "global": entry.get("global")}, + ), + log_message=lambda count: ( + " nextjs: " f"{count} App Router files access browser globals without 'use client'" + ), + ), + ScannerRule( + id="client_layout_smell", + scan=_wrap_scan(scan_nextjs_client_layouts), + issue_factory=_make_line_issue( + "nextjs", + "client_layout_smell", + tier=3, + confidence="low", + summary="Client layout detected (layout.* marked 'use client') — consider isolating interactivity to leaf components.", + ), + log_message=lambda count: ( + " nextjs: " f"{count} client layouts detected" + ), + ), + ScannerRule( + id="async_client_component", + scan=_wrap_scan(scan_nextjs_async_client_components), + issue_factory=_make_line_issue( + "nextjs", + "async_client_component", + tier=3, + confidence="high", + summary="Client component is async (invalid in Next.js).", + ), + log_message=lambda count: ( + " nextjs: " f"{count} async client components detected" + ), + ), + ScannerRule( + id="env_leak_in_client", + scan=_wrap_scan(scan_nextjs_env_leaks_in_client), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + f"env_leak_in_client::{entry.get('var','env')}", + tier=2, + confidence="high", + summary=( + "Client module accesses non-public env var " + f"process.env.{entry.get('var')} (only NEXT_PUBLIC_* should be used in client)." + ), + detail={"line": entry["line"], "var": entry.get("var")}, + ), + log_message=lambda count: ( + " nextjs: " f"{count} client modules access non-public env vars" + ), + ), + ScannerRule( + id="pages_api_route_handlers", + scan=_wrap_scan(scan_nextjs_pages_api_route_handlers), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + "pages_api_route_handlers", + tier=3, + confidence="high", + summary=( + "Pages Router API route exports App Router route-handler HTTP functions " + f"({', '.join(entry.get('exports', [])[:4])})." + ), + detail={"line": entry["line"], "exports": entry.get("exports", [])}, + ), + log_message=lambda count: ( + " nextjs: " f"{count} Pages Router API routes export App Router handlers" + ), + ), + ScannerRule( + id="middleware_misuse", + scan=_wrap_scan(scan_nextjs_route_handlers_and_middleware_misuse), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + f"middleware_misuse::{entry.get('kind','route')}", + tier=3, + confidence="medium", + summary=( + f"Next.js {entry.get('kind')} misuses route context " + f"({entry.get('reason')})." + ), + detail={ + "line": entry.get("line", 1), + "kind": entry.get("kind"), + "reason": entry.get("reason"), + "findings": entry.get("findings", []), + }, + ), + log_message=lambda count: ( + " nextjs: " f"{count} route handler/middleware context misuse findings" + ), + ), + ScannerRule( + id="server_api_in_client", + scan=_wrap_scan(scan_nextjs_server_navigation_apis_in_client), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + f"server_api_in_client::{entry.get('api','api')}", + tier=2, + confidence="high", + summary=( + "Client module calls server-only next/navigation API " + f"({entry.get('api')})." + ), + detail={"line": entry["line"], "api": entry.get("api")}, + ), + log_message=lambda count: ( + " nextjs: " f"{count} client modules call server-only next/navigation APIs" + ), + ), + ScannerRule( + id="use_server_in_client", + scan=_wrap_scan(scan_nextjs_use_server_in_client), + issue_factory=_make_line_issue( + "nextjs", + "use_server_in_client", + tier=2, + confidence="high", + summary="'use server' directive in a client module (invalid in Next.js).", + ), + log_message=lambda count: ( + " nextjs: " f"{count} client modules contain a module-level 'use server' directive" + ), + ), + ScannerRule( + id="use_server_not_first", + scan=_wrap_scan(scan_nextjs_use_server_not_first), + issue_factory=_make_line_issue( + "nextjs", + "use_server_not_first", + tier=2, + confidence="high", + summary="'use server' directive is present but not the first meaningful line (invalid in Next.js).", + ), + log_message=lambda count: ( + " nextjs: " f"{count} modules contain a non-top-level 'use server' directive" + ), + ), + ScannerRule( + id="app_router_exports_in_pages_router", + scan=_wrap_scan(scan_nextjs_app_router_exports_in_pages_router), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + f"app_router_exports_in_pages_router::{entry.get('export','export')}", + tier=3, + confidence="high", + summary=( + "Pages Router module exports App Router-only module export " + f"({entry.get('export')})." + ), + detail={"line": entry["line"], "export": entry.get("export")}, + ), + log_message=lambda count: ( + " nextjs: " f"{count} Pages Router files export App Router-only module exports" + ), + ), + ScannerRule( + id="server_modules_in_pages_router", + scan=_wrap_scan(scan_nextjs_server_modules_in_pages_router), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + "server_modules_in_pages_router", + tier=3, + confidence="high", + summary=( + ( + "Pages Router module imports App Router server-only modules (" + + ", ".join(entry.get("modules", [])[:4]) + + ")." + ) + if entry.get("modules") + else "Pages Router module imports App Router server-only modules." + ), + detail={ + "line": entry["line"], + "modules": entry.get("modules", []), + "imports": entry.get("imports", []), + }, + ), + log_message=lambda count: ( + " nextjs: " f"{count} Pages Router files import App Router server-only modules" + ), + ), + ScannerRule( + id="next_router_in_app_router", + scan=_wrap_scan(scan_next_router_imports_in_app_router), + issue_factory=_make_line_issue( + "nextjs", + "next_router_in_app_router", + tier=3, + confidence="high", + summary="App Router file imports legacy next/router (prefer next/navigation).", + ), + log_message=lambda count: ( + f" nextjs: {count} App Router files import next/router" + ), + ), + ScannerRule( + id="mixed_routers", + scan=_wrap_info_scan(scan_mixed_router_layout), + issue_factory=lambda entry: make_issue( + "nextjs", + entry["file"], + "mixed_routers", + tier=4, + confidence="low", + summary="Project contains both App Router (app/) and Pages Router (pages/) trees.", + detail={ + "app_roots": entry.get("app_roots", []), + "pages_roots": entry.get("pages_roots", []), + }, + ), + ), +) + + +NEXTJS_SPEC = FrameworkSpec( + id="nextjs", + label="Next.js", + ecosystem="node", + detection=DetectionConfig( + dependencies=("next",), + config_files=( + "next.config.js", + "next.config.mjs", + "next.config.cjs", + "next.config.ts", + ), + marker_dirs=("app", "src/app", "pages", "src/pages"), + script_pattern=r"(?:^|\s)next(?:\s|$)", + marker_dirs_imply_presence=False, + ), + excludes=(), + scanners=NEXTJS_SCANNERS, + tools=( + ToolIntegration( + id="next_lint", + label="next lint", + cmd="npx --no-install next lint --format json", + fmt="next_lint", + tier=2, + slow=True, + confidence="high", + ), + ), +) + + +__all__ = ["NEXTJS_SPEC"] diff --git a/desloppify/languages/_framework/frameworks/types.py b/desloppify/languages/_framework/frameworks/types.py new file mode 100644 index 000000000..183ad9c91 --- /dev/null +++ b/desloppify/languages/_framework/frameworks/types.py @@ -0,0 +1,87 @@ +"""Framework spec contracts (detection + scanners + tool integrations).""" + +from __future__ import annotations + +from collections.abc import Callable +from dataclasses import dataclass +from pathlib import Path +from typing import Any + +from desloppify.languages._framework.base.types import LangRuntimeContract +from desloppify.state_io import Issue + +FrameworkEvidence = dict[str, Any] + + +@dataclass(frozen=True) +class DetectionConfig: + """Deterministic framework presence detection hints for an ecosystem.""" + + dependencies: tuple[str, ...] = () + dev_dependencies: tuple[str, ...] = () + config_files: tuple[str, ...] = () + marker_files: tuple[str, ...] = () + marker_dirs: tuple[str, ...] = () + script_pattern: str | None = None + + # Markers are valuable for routing context, but default to "context only" + # so frameworks don't light up purely from directory shape. + marker_dirs_imply_presence: bool = False + + +@dataclass(frozen=True) +class ScannerRule: + """A single scanner rule within a FrameworkSpec.""" + + id: str + requires: tuple[str, ...] = () + scan: Callable[[Path, LangRuntimeContract], tuple[list[dict[str, Any]], int]] | None = None + issue_factory: Callable[[dict[str, Any]], Issue] | None = None + log_message: Callable[[int], str] | None = None + + +@dataclass(frozen=True) +class ToolIntegration: + """Framework tool integration (ToolSpec-like + phase semantics).""" + + id: str # detector id (e.g. "next_lint") + label: str + cmd: str + fmt: str + tier: int + slow: bool = False + confidence: str = "medium" + + +@dataclass(frozen=True) +class FrameworkSpec: + """A framework "horizontal layer" spec, analogous to tree-sitter specs.""" + + id: str + label: str + ecosystem: str + detection: DetectionConfig + + excludes: tuple[str, ...] = () + scanners: tuple[ScannerRule, ...] = () + tools: tuple[ToolIntegration, ...] = () + + +@dataclass(frozen=True) +class EcosystemFrameworkDetection: + """Framework presence detection result for a scan path within an ecosystem.""" + + ecosystem: str + package_root: Path + package_json_relpath: str | None + present: dict[str, FrameworkEvidence] + + +__all__ = [ + "DetectionConfig", + "EcosystemFrameworkDetection", + "FrameworkEvidence", + "FrameworkSpec", + "ScannerRule", + "ToolIntegration", +] diff --git a/desloppify/languages/_framework/generic_parts/parsers.py b/desloppify/languages/_framework/generic_parts/parsers.py index 1b4db2a5e..313683ab8 100644 --- a/desloppify/languages/_framework/generic_parts/parsers.py +++ b/desloppify/languages/_framework/generic_parts/parsers.py @@ -200,7 +200,96 @@ def parse_phpstan(output: str, scan_path: Path) -> list[dict]: return entries -PARSERS: dict[str, Callable[[str, Path], list[dict]]] = { +def _extract_json_array(text: str) -> str | None: + """Best-effort: return the first JSON array substring in *text*.""" + start = text.find("[") + if start == -1: + return None + end = text.rfind("]") + if end == -1 or end <= start: + return None + return text[start : end + 1] + + +def _relativize_to_project_root(filepath: str, *, scan_path: Path) -> str: + """Resolve a tool-emitted path to a project-root-relative string when possible.""" + from desloppify.base.discovery.paths import get_project_root + + project_root = get_project_root().resolve() + try: + p = Path(filepath) + abs_path = p.resolve() if p.is_absolute() else (scan_path / p).resolve() + try: + return str(abs_path.relative_to(project_root)).replace("\\", "/") + except ValueError: + return str(abs_path) + except Exception: # pragma: no cover + return filepath + + +def parse_next_lint(output: str, scan_path: Path) -> tuple[list[dict], dict]: + """Parse Next.js `next lint --format json` output. + + Returns ``(entries, meta)`` where: + - entries are *per-file* aggregates: {file, line, message, id, detail} + - meta includes ``potential`` (number of files lint reported on) + """ + raw = (output or "").strip() + json_text = _extract_json_array(raw) + if not json_text: + raise ToolParserError("next_lint parser could not find JSON output array") + + data = _load_json_output(json_text, parser_name="next_lint") + if not isinstance(data, list): + raise ToolParserError("next_lint parser expected a JSON array") + + potential = len(data) + entries: list[dict] = [] + for fobj in data: + if not isinstance(fobj, dict): + continue + file_path = fobj.get("filePath") or "" + messages = fobj.get("messages") or [] + if not file_path or not isinstance(messages, list) or not messages: + continue + + rel = _relativize_to_project_root(str(file_path), scan_path=scan_path) + first = next((m for m in messages if isinstance(m, dict)), None) + if first is None: + continue + line = _coerce_line(first.get("line", 0)) or 1 + msg = first.get("message") if isinstance(first.get("message"), str) else "Lint issue" + entries.append( + { + "file": rel, + "line": line if line > 0 else 1, + "id": "lint", + "message": f"next lint: {msg} ({len(messages)} issue(s) in file)", + "detail": { + "count": len(messages), + "messages": [ + { + "line": _coerce_line(m.get("line", 0)) or 0, + "column": _coerce_line(m.get("column", 0)) or 0, + "ruleId": m.get("ruleId", "") if isinstance(m.get("ruleId", ""), str) else "", + "message": m.get("message", "") if isinstance(m.get("message", ""), str) else "", + "severity": _coerce_line(m.get("severity", 0)) or 0, + } + for m in messages + if isinstance(m, dict) + ][:50], + }, + } + ) + + return entries, {"potential": potential} + + +ToolParseResult = list[dict] | tuple[list[dict], dict] +ToolParser = Callable[[str, Path], ToolParseResult] + + +PARSERS: dict[str, ToolParser] = { "gnu": parse_gnu, "golangci": parse_golangci, "json": parse_json, @@ -209,12 +298,15 @@ def parse_phpstan(output: str, scan_path: Path) -> list[dict]: "rubocop": parse_rubocop, "cargo": parse_cargo, "eslint": parse_eslint, + "next_lint": parse_next_lint, } __all__ = [ "PARSERS", "ToolParserError", + "ToolParseResult", + "ToolParser", "parse_cargo", "parse_credo", "parse_eslint", @@ -222,5 +314,6 @@ def parse_phpstan(output: str, scan_path: Path) -> list[dict]: "parse_golangci", "parse_json", "parse_phpstan", + "parse_next_lint", "parse_rubocop", ] diff --git a/desloppify/languages/_framework/generic_parts/tool_factories.py b/desloppify/languages/_framework/generic_parts/tool_factories.py index b6722a53c..4d7c518b3 100644 --- a/desloppify/languages/_framework/generic_parts/tool_factories.py +++ b/desloppify/languages/_framework/generic_parts/tool_factories.py @@ -63,12 +63,16 @@ def make_tool_phase( fmt: str, smell_id: str, tier: int, + *, + confidence: str = "medium", + cwd_fn: Callable[[Path, Any], Path] | None = None, ) -> DetectorPhase: """Create a DetectorPhase that runs an external tool and parses output.""" parser = PARSERS[fmt] def run(path: Path, lang: Any) -> tuple[list[dict[str, Any]], dict[str, int]]: - run_result = run_tool_result(cmd, path, parser) + run_path = cwd_fn(path, lang).resolve() if cwd_fn is not None else path + run_result = run_tool_result(cmd, run_path, parser) if run_result.status == "error": _record_tool_failure_coverage( lang, @@ -78,20 +82,28 @@ def run(path: Path, lang: Any) -> tuple[list[dict[str, Any]], dict[str, int]]: ) return [], {} entries = list(run_result.entries) + meta = run_result.meta if isinstance(run_result.meta, dict) else {} + meta_potential = meta.get("potential") + potential = meta_potential if isinstance(meta_potential, int) else 0 + + if run_result.status == "empty": + return [], ({smell_id: potential} if potential > 0 else {}) + if not entries: - return [], {} + return [], ({smell_id: potential} if potential > 0 else {}) issues = [ make_issue( smell_id, entry["file"], - f"{smell_id}::{entry['line']}", + str(entry.get("id") or f"{smell_id}::{entry['line']}"), tier=tier, - confidence="medium", - summary=entry["message"], + confidence=str(entry.get("confidence") or confidence), + summary=str(entry.get("summary") or entry["message"]), + detail=entry.get("detail") if isinstance(entry.get("detail"), dict) else None, ) for entry in entries ] - return issues, {smell_id: len(entries)} + return issues, {smell_id: potential if potential > 0 else len(entries)} return DetectorPhase(label, run) diff --git a/desloppify/languages/_framework/generic_parts/tool_runner.py b/desloppify/languages/_framework/generic_parts/tool_runner.py index e8c1d1936..095f6db15 100644 --- a/desloppify/languages/_framework/generic_parts/tool_runner.py +++ b/desloppify/languages/_framework/generic_parts/tool_runner.py @@ -15,6 +15,7 @@ from desloppify.languages._framework.generic_parts.parsers import ToolParserError SubprocessRun = Callable[..., subprocess.CompletedProcess[str]] +ToolParser = Callable[[str, Path], list[dict] | tuple[list[dict], dict]] _SHELL_META_CHARS = re.compile(r"[|&;<>()$`\n]") logger = logging.getLogger(__name__) @@ -26,6 +27,7 @@ class ToolRunResult: entries: list[dict] status: Literal["ok", "empty", "error"] + meta: dict | None = None error_kind: str | None = None message: str | None = None returncode: int | None = None @@ -62,7 +64,7 @@ def _output_preview(output: str, *, limit: int = 160) -> str: def run_tool_result( cmd: str, path: Path, - parser: Callable[[str, Path], list[dict]], + parser: ToolParser, *, run_subprocess: SubprocessRun | None = None, ) -> ToolRunResult: @@ -133,7 +135,25 @@ def run_tool_result( message=str(exc), returncode=result.returncode, ) - if not isinstance(parsed, list): + meta: dict | None = None + parsed_entries = parsed + if isinstance(parsed, tuple): + if ( + len(parsed) != 2 + or not isinstance(parsed[0], list) + or not isinstance(parsed[1], dict) + ): + return ToolRunResult( + entries=[], + status="error", + error_kind="parser_shape_error", + message="parser returned invalid (entries, meta) tuple", + returncode=result.returncode, + ) + parsed_entries = parsed[0] + meta = dict(parsed[1]) + + if not isinstance(parsed_entries, list): return ToolRunResult( entries=[], status="error", @@ -141,7 +161,7 @@ def run_tool_result( message="parser returned non-list output", returncode=result.returncode, ) - if not parsed: + if not parsed_entries: if result.returncode not in (0, None): preview = _output_preview(combined) return ToolRunResult( @@ -157,11 +177,13 @@ def run_tool_result( return ToolRunResult( entries=[], status="empty", + meta=meta, returncode=result.returncode, ) return ToolRunResult( - entries=parsed, + entries=parsed_entries, status="ok", + meta=meta, returncode=result.returncode, ) diff --git a/desloppify/languages/_framework/generic_support/core.py b/desloppify/languages/_framework/generic_support/core.py index 4727a8217..980d8bbb1 100644 --- a/desloppify/languages/_framework/generic_support/core.py +++ b/desloppify/languages/_framework/generic_support/core.py @@ -57,6 +57,7 @@ def generic_lang( zone_rules: list[ZoneRule] | None = None, test_coverage_module: Any | None = None, entry_patterns: list[str] | None = None, + frameworks: bool = False, ) -> LangConfig: """Build and register a generic language plugin from tool specs. @@ -127,6 +128,20 @@ def generic_lang( zone_rules=opts.zone_rules if opts.zone_rules is not None else generic_zone_rules(extensions), ) + if frameworks: + from desloppify.languages._framework.frameworks.phases import framework_phases + + phases = list(cfg.phases) + fw_phases = framework_phases(name) + + insert_at = len(phases) + for idx, phase in enumerate(phases): + if getattr(phase, "label", "") == "Structural analysis": + insert_at = idx + 1 + break + phases[insert_at:insert_at] = fw_phases + cfg.phases = phases + # Set integration depth — upgrade when tree-sitter provides capabilities. if has_treesitter and opts.depth in ("shallow", "minimal"): cfg.integration_depth = "standard" diff --git a/desloppify/languages/_framework/node/__init__.py b/desloppify/languages/_framework/node/__init__.py new file mode 100644 index 000000000..c86041049 --- /dev/null +++ b/desloppify/languages/_framework/node/__init__.py @@ -0,0 +1,3 @@ +"""Node/JavaScript ecosystem shared helpers (package.json, framework tooling).""" + +from __future__ import annotations diff --git a/desloppify/languages/_framework/node/frameworks/__init__.py b/desloppify/languages/_framework/node/frameworks/__init__.py new file mode 100644 index 000000000..51183dc09 --- /dev/null +++ b/desloppify/languages/_framework/node/frameworks/__init__.py @@ -0,0 +1,13 @@ +"""Node ecosystem framework scanners (Next.js, etc). + +Framework presence detection now lives under +``desloppify.languages._framework.frameworks``. + +This package remains the shared home for framework scanners and helper code so +JS/TS language plugins can reuse the same framework checks without duplicating +logic or importing across plugins. +""" + +from __future__ import annotations + +__all__: list[str] = [] diff --git a/desloppify/languages/_framework/node/frameworks/nextjs/README.md b/desloppify/languages/_framework/node/frameworks/nextjs/README.md new file mode 100644 index 000000000..8a6857fd9 --- /dev/null +++ b/desloppify/languages/_framework/node/frameworks/nextjs/README.md @@ -0,0 +1,158 @@ +# Next.js Framework Support (Scanners + Spec) + +This document explains the Next.js framework support used by Desloppify's TypeScript and JavaScript plugins. + +It covers: + +- What the Next.js framework module does +- How framework detection and scanning flow works +- What each file in `desloppify/languages/_framework/node/frameworks/nextjs/` is responsible for +- Which shared files outside this folder affect behavior +- Current limits and safe extension points + +If you are new to this code, start with the "Spec + scan flow" section, then read `scanners.py`. + +## High-level purpose + +The Next.js framework module adds framework-aware smells that generic code-quality detectors do not catch. + +Current scope includes: + +- App Router vs Pages Router migration and misuse signals +- Client/server boundary misuse (`"use client"`, `"use server"`, server-only imports/exports) +- Route handler and middleware context misuse +- Next.js API misuse in wrong router contexts +- Environment variable leakage in client modules +- `next lint` integration as a framework quality gate (`next_lint` detector) + +This module is intentionally heuristic-heavy (regex/file-structure based) so scans remain fast and robust without requiring full compiler semantics. + +## Module map + +Files in this folder: + +- `desloppify/languages/_framework/node/frameworks/nextjs/__init__.py` +- `desloppify/languages/_framework/node/frameworks/nextjs/info.py` +- `desloppify/languages/_framework/node/frameworks/nextjs/scanners.py` + +Spec + orchestration lives outside this folder: + +- `desloppify/languages/_framework/frameworks/specs/nextjs.py` +- `desloppify/languages/_framework/frameworks/phases.py` + +### What each file does + +`__init__.py`: + +- Exposes the framework info contract and shared phase entrypoint for imports + +`info.py`: + +- Defines `NextjsFrameworkInfo` +- Converts ecosystem detection evidence into Next.js-specific router roots and flags + +`scanners.py`: + +- Implements all Next.js smell scanners +- Performs fast source-file discovery and content heuristics +- Returns normalized scanner entries for the framework spec adapter + +## Shared surfaces outside this folder + +These files are part of the same feature boundary and should be considered together: + +- `desloppify/languages/_framework/frameworks/detection.py` +- `desloppify/languages/_framework/frameworks/phases.py` +- `desloppify/languages/_framework/frameworks/specs/nextjs.py` +- `desloppify/languages/typescript/__init__.py` +- `desloppify/languages/javascript/__init__.py` +- `desloppify/languages/_framework/generic_parts/parsers.py` (parser: `parse_next_lint`) +- `desloppify/languages/_framework/generic_parts/tool_factories.py` (tool phase: `make_tool_phase`) +- `desloppify/base/discovery/source.py` + +### Responsibility split + +- `frameworks/detection.py` decides whether Next.js is present for a scan path and where package roots are. +- `frameworks/specs/nextjs.py` defines the Next.js FrameworkSpec (detection config + scanners + tool integrations). +- `frameworks/phases.py` adapts specs into `DetectorPhase` objects. +- `nextjs/info.py` derives routing context (`app_roots`, `pages_roots`) from detection evidence. +- `nextjs/scanners.py` only finds smell candidates (fast, heuristic). + +## Detectors + +This module emits findings under: + +- `nextjs` +- `next_lint` + +Registry/scoring wiring lives outside this folder in: + +- `desloppify/base/registry/catalog_entries.py` +- `desloppify/base/registry/catalog_models.py` +- `desloppify/engine/_scoring/policy/core.py` + +## Scan flow in plain language + +## Spec + scan flow in plain language + +When TypeScript or JavaScript scans run for a Next.js project, flow is: + +1. Ecosystem framework detection (Node) evaluates deterministic presence signals from `package.json`. +2. Next.js info derives App/Pages router roots from detection evidence (`marker_dir_hits`). +3. Next.js framework smells phase runs all scanner functions and maps entries into normalized `nextjs` issues. +4. `next lint` tool phase runs (slow) and maps ESLint JSON output into `next_lint` issues. +5. Potentials are returned for scoring and state merge. + +## `next lint` behavior + +The Next.js spec runs: + +- `npx --no-install next lint --format json` + +Behavior: + +- If lint runs and returns JSON, file-level lint findings are emitted (one issue per file). +- If lint cannot run or output cannot be parsed, coverage is degraded for `next_lint` (shown as a scan coverage warning). + +`next lint` runs as a slow phase (`DetectorPhase.slow=True`) so `--skip-slow` skips it automatically. + +## Smell families covered + +Current high-value families include: + +- `"use client"` placement and missing directive checks +- `"use server"` placement checks (module-level misuse only) +- Server-only imports in client modules (`next/headers`, `next/server`, `next/cache`, `server-only`, Node built-ins) +- Server-only Next exports from client modules (`metadata`, `generateMetadata`, `revalidate`, `dynamic`, etc.) +- Pages Router APIs used under App Router (`getServerSideProps`, `getStaticProps`, etc.) +- `next/navigation` usage in Pages Router files +- App Router metadata/config exports in Pages Router files +- Pages API route files exporting App Router route-handler HTTP functions +- App Router route handler and middleware misuse +- `next/head` usage in App Router +- `next/document` imports outside valid `_document.*` pages context +- Browser global usage in App Router modules missing `"use client"` +- Client layout smell and async client component smell +- Mixed `app/` and `pages/` router project smell +- Env leakage in client modules via non-`NEXT_PUBLIC_*` `process.env` usage + +## Extending this module safely + +When adding a new smell: + +1. Add scanner logic in `scanners.py`. +2. Return compact entries (`file`, `line`, and minimal structured detail). +3. Map entries to `make_issue(...)` in `phase.py` with clear `id`, `summary`, and `detail`. +4. Update/extend tests in: + - `desloppify/languages/typescript/tests/test_ts_nextjs_framework.py` + - `desloppify/languages/javascript/tests/test_js_nextjs_framework.py` (if JS parity applies) +5. Keep logic shared (do not duplicate TS vs JS framework smell rules). + +## Limits and tradeoffs + +- Scanners are heuristic, not compiler-accurate. +- Some patterns are intentionally conservative to avoid noisy false positives. +- Router/middleware checks rely on conventional Next.js file placement. +- `next lint` requires project dependencies to be present for full lint execution. + +These tradeoffs are deliberate: fast scans with high-signal framework smells, while preserving a clear extension path when stronger analysis is needed. diff --git a/desloppify/languages/_framework/node/frameworks/nextjs/__init__.py b/desloppify/languages/_framework/node/frameworks/nextjs/__init__.py new file mode 100644 index 000000000..8dc1fc8f9 --- /dev/null +++ b/desloppify/languages/_framework/node/frameworks/nextjs/__init__.py @@ -0,0 +1,58 @@ +"""Next.js framework support shared across JS/TS scans.""" + +from __future__ import annotations + +from .info import NextjsFrameworkInfo, nextjs_info_from_evidence +from .scanners import ( + scan_nextjs_app_router_exports_in_pages_router, + scan_nextjs_async_client_components, + scan_nextjs_browser_globals_missing_use_client, + scan_nextjs_client_layouts, + scan_nextjs_error_files_missing_use_client, + scan_mixed_router_layout, + scan_next_router_imports_in_app_router, + scan_nextjs_env_leaks_in_client, + scan_nextjs_navigation_hooks_missing_use_client, + scan_nextjs_next_document_misuse, + scan_nextjs_next_head_in_app_router, + scan_nextjs_pages_api_route_handlers, + scan_nextjs_pages_router_apis_in_app_router, + scan_nextjs_pages_router_artifacts_in_app_router, + scan_nextjs_route_handlers_and_middleware_misuse, + scan_nextjs_server_navigation_apis_in_client, + scan_nextjs_server_modules_in_pages_router, + scan_nextjs_server_exports_in_client, + scan_nextjs_server_imports_in_client, + scan_nextjs_use_client_not_first, + scan_nextjs_use_server_not_first, + scan_nextjs_use_server_in_client, + scan_rsc_missing_use_client, +) + +__all__ = [ + "NextjsFrameworkInfo", + "nextjs_info_from_evidence", + "scan_nextjs_app_router_exports_in_pages_router", + "scan_nextjs_async_client_components", + "scan_nextjs_browser_globals_missing_use_client", + "scan_nextjs_client_layouts", + "scan_nextjs_error_files_missing_use_client", + "scan_mixed_router_layout", + "scan_next_router_imports_in_app_router", + "scan_nextjs_env_leaks_in_client", + "scan_nextjs_navigation_hooks_missing_use_client", + "scan_nextjs_next_document_misuse", + "scan_nextjs_next_head_in_app_router", + "scan_nextjs_pages_api_route_handlers", + "scan_nextjs_pages_router_apis_in_app_router", + "scan_nextjs_pages_router_artifacts_in_app_router", + "scan_nextjs_route_handlers_and_middleware_misuse", + "scan_nextjs_server_navigation_apis_in_client", + "scan_nextjs_server_modules_in_pages_router", + "scan_nextjs_server_exports_in_client", + "scan_nextjs_server_imports_in_client", + "scan_nextjs_use_client_not_first", + "scan_nextjs_use_server_not_first", + "scan_nextjs_use_server_in_client", + "scan_rsc_missing_use_client", +] diff --git a/desloppify/languages/_framework/node/frameworks/nextjs/info.py b/desloppify/languages/_framework/node/frameworks/nextjs/info.py new file mode 100644 index 000000000..1a02b51ea --- /dev/null +++ b/desloppify/languages/_framework/node/frameworks/nextjs/info.py @@ -0,0 +1,49 @@ +"""Next.js framework info derived from ecosystem-level detection evidence.""" + +from __future__ import annotations + +from dataclasses import dataclass +from pathlib import Path +from typing import Any + + +@dataclass(frozen=True) +class NextjsFrameworkInfo: + package_root: Path + package_json_relpath: str | None + app_roots: tuple[str, ...] + pages_roots: tuple[str, ...] + + @property + def uses_app_router(self) -> bool: + return bool(self.app_roots) + + @property + def uses_pages_router(self) -> bool: + return bool(self.pages_roots) + + +def _tuple_str(value: Any) -> tuple[str, ...]: + if not isinstance(value, list): + return () + return tuple(str(v) for v in value if isinstance(v, str)) + + +def nextjs_info_from_evidence( + evidence: dict[str, Any] | None, + *, + package_root: Path, + package_json_relpath: str | None, +) -> NextjsFrameworkInfo: + """Convert generic framework evidence into Next.js routing context.""" + evidence_dict = evidence if isinstance(evidence, dict) else {} + marker_dirs = _tuple_str(evidence_dict.get("marker_dir_hits")) + return NextjsFrameworkInfo( + package_root=package_root, + package_json_relpath=package_json_relpath, + app_roots=tuple(p for p in marker_dirs if p.endswith("/app") or p == "app"), + pages_roots=tuple(p for p in marker_dirs if p.endswith("/pages") or p == "pages"), + ) + + +__all__ = ["NextjsFrameworkInfo", "nextjs_info_from_evidence"] diff --git a/desloppify/languages/_framework/node/frameworks/nextjs/scanners.py b/desloppify/languages/_framework/node/frameworks/nextjs/scanners.py new file mode 100644 index 000000000..6ee3335be --- /dev/null +++ b/desloppify/languages/_framework/node/frameworks/nextjs/scanners.py @@ -0,0 +1,1209 @@ +"""Next.js-specific scanners. + +These scanners are intentionally lightweight (regex/heuristic-based) so they +can run as part of the normal smell phase without requiring a full TS AST. +""" + +from __future__ import annotations + +import logging +import re +from pathlib import Path + +from desloppify.base.discovery.paths import get_project_root +from desloppify.base.discovery.source import find_js_ts_and_tsx_files +from desloppify.languages._framework.node.js_text import ( + code_text as _code_text, + strip_js_ts_comments as _strip_ts_comments, +) + +from .info import NextjsFrameworkInfo + +logger = logging.getLogger(__name__) + +_USE_CLIENT_RE = re.compile( + r"""^(?:'use client'|"use client")\s*;?\s*(?://.*)?$""" +) +_MODULE_SPECIFIER_RE = re.compile( + r"""(?:from\s+['"](?P[^'"]+)['"]|require\(\s*['"](?P[^'"]+)['"]\s*\)|import\(\s*['"](?P[^'"]+)['"]\s*\))""" +) +_NEXT_ROUTER_IMPORT_RE = re.compile( + r"""(?:from\s+['"]next/router['"]|require\(\s*['"]next/router['"]\s*\))""" +) +_NEXT_NAV_IMPORT_RE = re.compile( + r"""(?:from\s+['"]next/navigation['"]|require\(\s*['"]next/navigation['"]\s*\)|import\(\s*['"]next/navigation['"]\s*\))""" +) +_NEXT_NAV_HOOK_CALL_RE = re.compile( + r"""\b(?:useRouter|usePathname|useSearchParams|useParams|useSelectedLayoutSegments|useSelectedLayoutSegment)\s*\(""" +) +_CLIENT_HOOK_CALL_RE = re.compile( + r"""\b(?:useState|useEffect|useLayoutEffect|useReducer|useRef|useContext|useTransition|useDeferredValue|useImperativeHandle|useSyncExternalStore|useMemo|useCallback|useId|useInsertionEffect)\s*\(""" +) +_REACT_NAMESPACE_HOOK_CALL_RE = re.compile( + r"""\bReact\.(?:useState|useEffect|useLayoutEffect|useReducer|useRef|useContext|useTransition|useDeferredValue|useImperativeHandle|useSyncExternalStore|useMemo|useCallback|useId|useInsertionEffect)\s*\(""" +) + +_NEXTJS_SERVER_ONLY_IMPORTS: set[str] = { + "next/headers", + "next/server", + "next/cache", + "server-only", +} + +# Heuristic list (not exhaustive). These are commonly invalid in client bundles. +_NODE_BUILTIN_MODULES: set[str] = { + "assert", + "buffer", + "child_process", + "cluster", + "crypto", + "dgram", + "dns", + "events", + "fs", + "http", + "https", + "module", + "net", + "os", + "path", + "perf_hooks", + "process", + "stream", + "timers", + "tls", + "tty", + "url", + "util", + "vm", + "worker_threads", + "zlib", +} + +_NEXTJS_SERVER_EXPORT_RE = re.compile( + r"""\bexport\s+(?:(?:const|let|var)\s+(?Pmetadata|revalidate|dynamic|runtime|fetchCache|preferredRegion|maxDuration|dynamicParams|metadataBase|viewport|experimental_ppr)\b|(?:async\s+)?function\s+(?PgenerateMetadata|generateStaticParams|generateViewport)\b)""" +) + +_NEXTJS_PAGES_ROUTER_API_RE = re.compile( + r"""\b(?:export\s+(?:async\s+)?function\s+(?PgetServerSideProps|getStaticProps|getStaticPaths|getInitialProps)\b|export\s+const\s+(?PgetServerSideProps|getStaticProps|getStaticPaths|getInitialProps)\b|\b(?PgetServerSideProps|getStaticProps|getStaticPaths|getInitialProps)\s*=)""" +) + +_PROCESS_ENV_DOT_RE = re.compile(r"""\bprocess\.env\.([A-Z0-9_]+)\b""") +_PROCESS_ENV_BRACKET_RE = re.compile(r"""\bprocess\.env\[\s*['"]([A-Z0-9_]+)['"]\s*\]""") +_CLIENT_ENV_ALLOWLIST: set[str] = {"NODE_ENV"} + +_USE_SERVER_LINE_RE = re.compile(r"""^\s*(?:'use server'|"use server")\s*;?\s*(?://.*)?$""") +_DIRECTIVE_LINE_RE = re.compile(r"""^\s*(?:'[^']*'|"[^"]*")\s*;?\s*(?://.*)?$""") +_ASYNC_EXPORT_DEFAULT_RE = re.compile(r"""\bexport\s+default\s+async\s+(?:function\b|\()""") +_BROWSER_GLOBAL_ACCESS_RE = re.compile( + r"""\b(?Pwindow|document|localStorage|sessionStorage|navigator)\s*(?:\.|\[)""" +) +_INVALID_REACTY_MODULES_IN_ROUTE_CONTEXT: set[str] = { + "next/link", + "next/image", + "next/head", + "next/script", +} + +# NOTE: `redirect()` and `permanentRedirect()` can be called from Client +# Components during the render phase (not event handlers). We intentionally do +# not flag those patterns here. +_NEXT_NAV_SERVER_API_CALL_RE = re.compile(r"""\b(?PnotFound)\s*\(""") +_ROUTE_HANDLER_HTTP_EXPORT_RE = re.compile( + r"""\bexport\s+(?:async\s+)?function\s+(GET|POST|PUT|PATCH|DELETE|HEAD|OPTIONS)\b""" +) +_EXPORT_DEFAULT_RE = re.compile(r"""\bexport\s+default\b""") +_NEXTAPI_TYPES_RE = re.compile(r"""\bNextApi(?:Request|Response)\b""") +_RES_STATUS_RE = re.compile(r"""\bres\.status\s*\(""") +_RUNTIME_EDGE_RE = re.compile(r"""\bexport\s+const\s+runtime\s*=\s*['"]edge['"]""") + + +def _has_use_server_directive_at_top(content: str) -> bool: + first = _first_meaningful_line(content.splitlines()) + return bool(first and _USE_SERVER_LINE_RE.match(first)) + + +def _find_use_server_directive_line_anywhere(content: str) -> int | None: + for idx, line in enumerate(content.splitlines()[:200], start=1): + if _USE_SERVER_LINE_RE.match(line.strip()): + return idx + return None + + +def _first_meaningful_line(lines: list[str]) -> str | None: + """Return the first non-empty, non-comment-only line.""" + in_block_comment = False + for line in lines[:80]: + s = line.strip() + if not s: + continue + if in_block_comment: + end = s.find("*/") + if end == -1: + continue + s = s[end + 2 :].strip() + in_block_comment = False + if not s: + continue + if s.startswith("//"): + continue + if s.startswith("/*"): + end = s.find("*/", 2) + if end == -1: + in_block_comment = True + continue + s = s[end + 2 :].strip() + if not s: + continue + return s + return None + + +def _has_use_client_directive(content: str) -> bool: + first = _first_meaningful_line(content.splitlines()) + return bool(first and _USE_CLIENT_RE.match(first)) + + +def _find_use_client_directive_anywhere(content: str) -> int | None: + for idx, line in enumerate(content.splitlines()[:120], start=1): + if _USE_CLIENT_RE.match(line.strip()): + return idx + return None + + +def _is_under_any_root(filepath: str, roots: tuple[str, ...]) -> bool: + return any(filepath == root or filepath.startswith(root.rstrip("/") + "/") for root in roots) + + +def _iter_import_specifiers(search_text: str) -> list[dict]: + matches: list[dict] = [] + for match in _MODULE_SPECIFIER_RE.finditer(search_text): + module = match.group("from") or match.group("require") or match.group("import") or "" + if not module: + continue + line_no = search_text[: match.start()].count("\n") + 1 + matches.append({"module": module, "line": line_no}) + return matches + + +def _is_node_builtin(module: str) -> bool: + raw = module[5:] if module.startswith("node:") else module + base = raw.split("/", 1)[0] + return base in _NODE_BUILTIN_MODULES + + +def _find_misplaced_module_use_server_directive(content: str) -> int | None: + """Find module-level 'use server' directives that are not first. + + Intentionally ignores nested inline server actions where `'use server'` is + inside a function body (valid Next.js pattern). + """ + search_text = _strip_ts_comments(content) + first_directive: str | None = None + in_prologue = True + + for idx, line in enumerate(search_text.splitlines()[:300], start=1): + if not line.strip(): + continue + + stripped = line.strip() + is_directive = bool(_DIRECTIVE_LINE_RE.match(stripped)) + is_use_server = bool(_USE_SERVER_LINE_RE.match(stripped)) + + if in_prologue: + if is_directive: + if first_directive is None: + first_directive = stripped + if is_use_server and first_directive != stripped: + return idx + continue + in_prologue = False + + # Top-level misplaced directive after code starts. + if line == line.lstrip() and is_use_server: + return idx + + return None + + +def _is_layout_module(filepath: str) -> bool: + name = Path(filepath).name + return name in {"layout.tsx", "layout.ts", "layout.jsx", "layout.js"} + + +def _is_pages_document_module(filepath: str) -> bool: + name = Path(filepath).name + return name.startswith("_document.") + + +def scan_nextjs_error_files_missing_use_client( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find App Router error boundary modules missing a 'use client' directive.""" + if not info.uses_app_router: + return [], 0 + + targets = { + "error.tsx", + "error.ts", + "error.jsx", + "error.js", + "global-error.tsx", + "global-error.ts", + "global-error.jsx", + "global-error.js", + } + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + if not _is_under_any_root(filepath, info.app_roots): + continue + if Path(filepath).name not in targets: + continue + + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + if _has_use_client_directive(content): + continue + if _find_use_client_directive_anywhere(content) is not None: + continue + + entries.append({"file": filepath, "line": 1, "name": Path(filepath).name}) + + return entries, scanned + + +def scan_nextjs_pages_router_artifacts_in_app_router( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find Pages Router artifact filenames (e.g. _app.tsx) under app/ trees.""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + if not _is_under_any_root(filepath, info.app_roots): + continue + + scanned += 1 + name = Path(filepath).name + if not (name.startswith("_app.") or name.startswith("_document.") or name.startswith("_error.")): + continue + + entries.append({"file": filepath, "line": 1, "name": name}) + + return entries, scanned + + +def scan_nextjs_use_server_not_first( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find modules where 'use server' exists but is not the first meaningful line.""" + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + if _has_use_server_directive_at_top(content): + continue + + line_no = _find_misplaced_module_use_server_directive(content) + if line_no is None: + continue + + entries.append({"file": filepath, "line": line_no}) + + return entries, scanned + + +def scan_nextjs_next_head_in_app_router( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find App Router modules importing legacy next/head.""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + if not _is_under_any_root(filepath, info.app_roots): + continue + + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + search_text = _strip_ts_comments(content) + for imp in _iter_import_specifiers(search_text): + if imp["module"] == "next/head": + entries.append({"file": filepath, "line": imp["line"]}) + break + + return entries, scanned + + +def scan_nextjs_use_client_not_first( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find App Router modules where 'use client' exists but is not the first meaningful line.""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + if not _is_under_any_root(filepath, info.app_roots): + continue + + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + if _has_use_client_directive(content): + continue + + line_no = _find_use_client_directive_anywhere(content) + if line_no is None: + continue + + entries.append({"file": filepath, "line": line_no}) + + return entries, scanned + + +def scan_nextjs_next_document_misuse( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find next/document imports outside Pages Router `_document.*`.""" + entries: list[dict] = [] + scanned = 0 + + for filepath in find_js_ts_and_tsx_files(path): + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + search_text = _strip_ts_comments(content) + bad_lines: list[int] = [] + for imp in _iter_import_specifiers(search_text): + if imp["module"] != "next/document": + continue + + allowed = info.uses_pages_router and _is_under_any_root(filepath, info.pages_roots) and _is_pages_document_module(filepath) + if not allowed: + bad_lines.append(imp["line"]) + + if bad_lines: + entries.append({"file": filepath, "line": min(bad_lines)}) + + return entries, scanned + + +def scan_nextjs_server_navigation_apis_in_client( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find 'use client' modules calling server-only next/navigation APIs (notFound).""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + if not _has_use_client_directive(content): + continue + + search_text = _strip_ts_comments(content) + if not _NEXT_NAV_IMPORT_RE.search(search_text): + continue + + code = _code_text(search_text) + match = _NEXT_NAV_SERVER_API_CALL_RE.search(code) + if not match: + continue + + line_no = code[: match.start()].count("\n") + 1 + entries.append({"file": filepath, "line": line_no, "api": match.group("api")}) + + return entries, scanned + + +def scan_nextjs_browser_globals_missing_use_client( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find App Router modules using browser globals without a 'use client' directive.""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + if not _is_under_any_root(filepath, info.app_roots): + continue + if filepath.endswith("/route.ts") or filepath.endswith("/route.tsx"): + continue + + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + if _has_use_client_directive(content): + continue + + code = _code_text(_strip_ts_comments(content)) + match = _BROWSER_GLOBAL_ACCESS_RE.search(code) + if not match: + continue + + line_no = code[: match.start()].count("\n") + 1 + entries.append({"file": filepath, "line": line_no, "global": match.group("global")}) + + return entries, scanned + + +def scan_nextjs_client_layouts( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find App Router layout modules that are marked as client components.""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + if not _is_under_any_root(filepath, info.app_roots): + continue + if not _is_layout_module(filepath): + continue + + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + if _has_use_client_directive(content): + entries.append({"file": filepath, "line": 1}) + + return entries, scanned + + +def scan_nextjs_async_client_components( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find 'use client' modules exporting async default components (invalid in Next.js).""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + if not _has_use_client_directive(content): + continue + + code = _code_text(_strip_ts_comments(content)) + match = _ASYNC_EXPORT_DEFAULT_RE.search(code) + if not match: + continue + + line_no = code[: match.start()].count("\n") + 1 + entries.append({"file": filepath, "line": line_no}) + + return entries, scanned + + +def scan_nextjs_use_server_in_client( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find 'use client' modules that include a 'use server' directive.""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + if not _has_use_client_directive(content): + continue + + # Only module-level 'use server' directives are invalid in 'use client' modules. + # Inline server actions (e.g. inside a function body) are valid and should not be flagged. + line_no = _find_misplaced_module_use_server_directive(content) + if line_no is None: + continue + + entries.append({"file": filepath, "line": line_no}) + + return entries, scanned + + +def scan_nextjs_server_modules_in_pages_router( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find Pages Router modules importing App Router server-only Next.js modules.""" + if not info.uses_pages_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + + def _is_pages_api_route(fp: str) -> bool: + for root in info.pages_roots: + prefix = root.rstrip("/") + "/api/" + if fp.startswith(prefix) or fp == (root.rstrip("/") + "/api"): + return True + return False + + for filepath in find_js_ts_and_tsx_files(path): + if not _is_under_any_root(filepath, info.pages_roots): + continue + if _is_pages_api_route(filepath): + continue + + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + search_text = _strip_ts_comments(content) + imports = _iter_import_specifiers(search_text) + bad = [imp for imp in imports if imp["module"] in _NEXTJS_SERVER_ONLY_IMPORTS] + if not bad: + continue + + entries.append( + { + "file": filepath, + "line": bad[0]["line"], + "imports": bad, + "modules": sorted({b["module"] for b in bad}), + } + ) + + return entries, scanned + + +def scan_nextjs_pages_api_route_handlers( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find Pages Router API routes using App Router route handler patterns (export GET/POST/etc).""" + if not info.uses_pages_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + + def _is_pages_api_route(fp: str) -> bool: + for root in info.pages_roots: + prefix = root.rstrip("/") + "/api/" + if fp.startswith(prefix) or fp == (root.rstrip("/") + "/api"): + return True + return False + + for filepath in find_js_ts_and_tsx_files(path): + if not _is_under_any_root(filepath, info.pages_roots): + continue + if not _is_pages_api_route(filepath): + continue + + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + search_text = _strip_ts_comments(content) + code = _code_text(search_text) + match = _ROUTE_HANDLER_HTTP_EXPORT_RE.search(code) + if not match: + continue + + line_no = code[: match.start()].count("\n") + 1 + entries.append({"file": filepath, "line": line_no, "method": match.group(1)}) + + return entries, scanned + + +def scan_nextjs_app_router_exports_in_pages_router( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find Pages Router modules exporting App Router metadata/config exports.""" + if not info.uses_pages_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + if not _is_under_any_root(filepath, info.pages_roots): + continue + + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + search_text = _strip_ts_comments(content) + matches: list[dict] = [] + for m in _NEXTJS_SERVER_EXPORT_RE.finditer(search_text): + name = m.group("const_name") or m.group("fn_name") or "" + if not name: + continue + line_no = search_text[: m.start()].count("\n") + 1 + matches.append({"name": name, "line": line_no}) + if not matches: + continue + + entries.append( + { + "file": filepath, + "line": matches[0]["line"], + "exports": matches, + "names": sorted({mm["name"] for mm in matches}), + } + ) + + return entries, scanned + + +def scan_rsc_missing_use_client(path: Path, info: NextjsFrameworkInfo) -> tuple[list[dict], int]: + """Find App Router modules that appear to use client-only React hooks without 'use client'.""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + if not _is_under_any_root(filepath, info.app_roots): + continue + + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + if _has_use_client_directive(content): + continue + if _find_use_client_directive_anywhere(content) is not None: + continue + + code = _code_text(content) + match = _CLIENT_HOOK_CALL_RE.search(code) or _REACT_NAMESPACE_HOOK_CALL_RE.search(code) + if not match: + continue + + line_no = code[: match.start()].count("\n") + 1 + hook = match.group(0).split("(")[0].strip() + entries.append( + { + "file": filepath, + "line": line_no, + "hook": hook, + } + ) + + return entries, scanned + + +def scan_nextjs_navigation_hooks_missing_use_client( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find App Router modules using next/navigation hooks without 'use client'.""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + if not _is_under_any_root(filepath, info.app_roots): + continue + + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + if _has_use_client_directive(content): + continue + if _find_use_client_directive_anywhere(content) is not None: + continue + + code = _code_text(_strip_ts_comments(content)) + match = _NEXT_NAV_HOOK_CALL_RE.search(code) + if not match: + continue + + line_no = code[: match.start()].count("\n") + 1 + hook = match.group(0).split("(")[0].strip() + entries.append({"file": filepath, "line": line_no, "hook": hook}) + + return entries, scanned + + +def scan_nextjs_server_imports_in_client( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find 'use client' modules importing server-only APIs (Next server modules, node built-ins).""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + if not _has_use_client_directive(content): + continue + + search_text = _strip_ts_comments(content) + imports = _iter_import_specifiers(search_text) + bad: list[dict] = [] + for imp in imports: + module = imp["module"] + if module in _NEXTJS_SERVER_ONLY_IMPORTS or _is_node_builtin(module): + bad.append(imp) + + if not bad: + continue + + entries.append( + { + "file": filepath, + "line": bad[0]["line"], + "imports": bad, + "modules": sorted({b["module"] for b in bad}), + } + ) + + return entries, scanned + + +def scan_next_router_imports_in_app_router( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find App Router files importing legacy `next/router`.""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + if not _is_under_any_root(filepath, info.app_roots): + continue + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + search_text = _strip_ts_comments(content) + match = _NEXT_ROUTER_IMPORT_RE.search(search_text) + if not match: + continue + + line_no = search_text[: match.start()].count("\n") + 1 + entries.append({"file": filepath, "line": line_no}) + + return entries, scanned + + +def scan_nextjs_server_exports_in_client( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find 'use client' modules exporting server-only Next.js metadata/config exports.""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + if not _has_use_client_directive(content): + continue + + search_text = _strip_ts_comments(content) + matches: list[dict] = [] + for m in _NEXTJS_SERVER_EXPORT_RE.finditer(search_text): + name = m.group("const_name") or m.group("fn_name") or "" + if not name: + continue + line_no = search_text[: m.start()].count("\n") + 1 + matches.append({"name": name, "line": line_no}) + + if not matches: + continue + + entries.append( + { + "file": filepath, + "line": matches[0]["line"], + "exports": matches, + "names": sorted({mm["name"] for mm in matches}), + } + ) + + return entries, scanned + + +def scan_nextjs_pages_router_apis_in_app_router( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find Pages Router data fetching APIs used under the App Router tree.""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + if not _is_under_any_root(filepath, info.app_roots): + continue + + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + code = _code_text(_strip_ts_comments(content)) + matches: list[dict] = [] + for m in _NEXTJS_PAGES_ROUTER_API_RE.finditer(code): + name = m.group("fn") or m.group("const") or m.group("assign") or "" + if not name: + continue + line_no = code[: m.start()].count("\n") + 1 + matches.append({"name": name, "line": line_no}) + + if not matches: + continue + + entries.append( + { + "file": filepath, + "line": matches[0]["line"], + "apis": matches, + "names": sorted({mm["name"] for mm in matches}), + } + ) + + return entries, scanned + + +def scan_nextjs_env_leaks_in_client( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Find 'use client' modules that reference non-NEXT_PUBLIC_* env vars via process.env.""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + for filepath in find_js_ts_and_tsx_files(path): + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + if not _has_use_client_directive(content): + continue + + code = _code_text(_strip_ts_comments(content)) + occurrences: list[tuple[str, int]] = [] + for m in _PROCESS_ENV_DOT_RE.finditer(code): + name = m.group(1) + line_no = code[: m.start()].count("\n") + 1 + occurrences.append((name, line_no)) + for m in _PROCESS_ENV_BRACKET_RE.finditer(code): + name = m.group(1) + line_no = code[: m.start()].count("\n") + 1 + occurrences.append((name, line_no)) + + bad_occurrences = [ + (name, line_no) + for (name, line_no) in occurrences + if not name.startswith("NEXT_PUBLIC_") and name not in _CLIENT_ENV_ALLOWLIST + ] + if not bad_occurrences: + continue + + bad_vars = sorted({name for (name, _) in bad_occurrences}) + first_line = min(line_no for (_, line_no) in bad_occurrences) + entries.append({"file": filepath, "line": first_line, "vars": bad_vars}) + + return entries, scanned + + +def scan_nextjs_route_handlers_and_middleware_misuse( + path: Path, info: NextjsFrameworkInfo +) -> tuple[list[dict], int]: + """Special-case checks for `app/**/route.ts(x)` and `middleware.ts`.""" + if not info.uses_app_router: + return [], 0 + + entries: list[dict] = [] + scanned = 0 + + def _is_route_handler(fp: str) -> bool: + if not _is_under_any_root(fp, info.app_roots): + return False + return fp.endswith(("/route.ts", "/route.tsx", "/route.js", "/route.jsx")) or fp in { + "route.ts", + "route.tsx", + "route.js", + "route.jsx", + } + + def _is_middleware(fp: str) -> bool: + return fp in { + "middleware.ts", + "middleware.tsx", + "middleware.js", + "middleware.jsx", + "src/middleware.ts", + "src/middleware.tsx", + "src/middleware.js", + "src/middleware.jsx", + } + + for filepath in find_js_ts_and_tsx_files(path): + if not (_is_route_handler(filepath) or _is_middleware(filepath)): + continue + + scanned += 1 + try: + full = Path(filepath) if Path(filepath).is_absolute() else get_project_root() / filepath + content = full.read_text() + except (OSError, UnicodeDecodeError) as exc: + logger.debug("Skipping unreadable Next.js candidate %s: %s", filepath, exc) + continue + + search_text = _strip_ts_comments(content) + code_text = _code_text(search_text) + findings: list[dict] = [] + + if _has_use_client_directive(content): + findings.append({"kind": "use_client", "line": 1}) + + react_import = re.search( + r"""(?:from\s+['"]react['"]|require\(\s*['"]react['"]\s*\))""", + search_text, + ) + if react_import: + findings.append( + { + "kind": "react_import", + "line": search_text[: react_import.start()].count("\n") + 1, + } + ) + + hook_call = _CLIENT_HOOK_CALL_RE.search(code_text) or _REACT_NAMESPACE_HOOK_CALL_RE.search(code_text) + if hook_call: + findings.append( + { + "kind": "react_hook_call", + "line": search_text[: hook_call.start()].count("\n") + 1, + } + ) + + nav_import = _NEXT_NAV_IMPORT_RE.search(search_text) + if nav_import: + findings.append( + { + "kind": "next_navigation_import", + "line": search_text[: nav_import.start()].count("\n") + 1, + } + ) + + if _is_route_handler(filepath): + default_export = _EXPORT_DEFAULT_RE.search(code_text) + if default_export: + findings.append( + { + "kind": "default_export", + "line": code_text[: default_export.start()].count("\n") + 1, + } + ) + + nextapi = _NEXTAPI_TYPES_RE.search(code_text) + if nextapi: + findings.append( + { + "kind": "next_api_types", + "line": code_text[: nextapi.start()].count("\n") + 1, + } + ) + + res_status = _RES_STATUS_RE.search(code_text) + if res_status: + findings.append( + { + "kind": "res_status_usage", + "line": code_text[: res_status.start()].count("\n") + 1, + } + ) + + imports = _iter_import_specifiers(search_text) + for imp in imports: + module = imp["module"] + if module in _INVALID_REACTY_MODULES_IN_ROUTE_CONTEXT: + findings.append({"kind": f"invalid_import::{module}", "line": imp["line"]}) + + if _is_middleware(filepath): + for imp in imports: + module = imp["module"] + if _is_node_builtin(module): + findings.append({"kind": f"node_builtin_import::{module}", "line": imp["line"]}) + elif _is_route_handler(filepath): + runtime_edge = _RUNTIME_EDGE_RE.search(code_text) + if runtime_edge: + for imp in imports: + module = imp["module"] + if _is_node_builtin(module): + findings.append( + {"kind": f"edge_runtime_node_builtin_import::{module}", "line": imp["line"]} + ) + + if filepath.endswith(".tsx"): + jsx_return = re.search(r"""\breturn\s*<""", code_text) + if jsx_return: + findings.append( + { + "kind": "jsx_return", + "line": code_text[: jsx_return.start()].count("\n") + 1, + } + ) + + if not findings: + continue + + kind = "route_handler" if _is_route_handler(filepath) else "middleware" + entries.append( + {"file": filepath, "line": findings[0]["line"], "kind": kind, "findings": findings} + ) + + return entries, scanned + + +def scan_mixed_router_layout(info: NextjsFrameworkInfo) -> list[dict]: + """Project-level check: both App Router and Pages Router present.""" + if not (info.uses_app_router and info.uses_pages_router): + return [] + return [ + { + "file": info.package_json_relpath or "package.json", + "app_roots": list(info.app_roots), + "pages_roots": list(info.pages_roots), + } + ] + + +__all__ = [ + "scan_nextjs_app_router_exports_in_pages_router", + "scan_nextjs_async_client_components", + "scan_nextjs_browser_globals_missing_use_client", + "scan_nextjs_client_layouts", + "scan_nextjs_error_files_missing_use_client", + "scan_mixed_router_layout", + "scan_next_router_imports_in_app_router", + "scan_nextjs_env_leaks_in_client", + "scan_nextjs_navigation_hooks_missing_use_client", + "scan_nextjs_next_document_misuse", + "scan_nextjs_next_head_in_app_router", + "scan_nextjs_pages_router_apis_in_app_router", + "scan_nextjs_pages_api_route_handlers", + "scan_nextjs_pages_router_artifacts_in_app_router", + "scan_nextjs_route_handlers_and_middleware_misuse", + "scan_nextjs_server_navigation_apis_in_client", + "scan_nextjs_server_modules_in_pages_router", + "scan_nextjs_server_exports_in_client", + "scan_nextjs_server_imports_in_client", + "scan_nextjs_use_client_not_first", + "scan_nextjs_use_server_not_first", + "scan_nextjs_use_server_in_client", + "scan_rsc_missing_use_client", +] diff --git a/desloppify/languages/_framework/node/js_text.py b/desloppify/languages/_framework/node/js_text.py new file mode 100644 index 000000000..a1790d55a --- /dev/null +++ b/desloppify/languages/_framework/node/js_text.py @@ -0,0 +1,76 @@ +"""JavaScript/TypeScript-oriented text helpers. + +These helpers are intentionally framework-agnostic and live under the shared +Node layer so they can be used by framework scanners across JS/TS plugins. +""" + +from __future__ import annotations + +from collections.abc import Generator + +from desloppify.base.text_utils import strip_c_style_comments + + +def strip_js_ts_comments(text: str) -> str: + """Strip // and /* */ comments while preserving string literals.""" + return strip_c_style_comments(text) + + +def scan_code(text: str) -> Generator[tuple[int, str, bool], None, None]: + """Yield ``(index, char, in_string)`` tuples while handling escapes.""" + i = 0 + in_str = None + while i < len(text): + ch = text[i] + if in_str: + if ch == "\\" and i + 1 < len(text): + yield (i, ch, True) + i += 1 + yield (i, text[i], True) + i += 1 + continue + if ch == in_str: + in_str = None + yield (i, ch, in_str is not None) + else: + if ch in ("'", '"', "`"): + in_str = ch + yield (i, ch, True) + else: + yield (i, ch, False) + i += 1 + + +def code_text(text: str) -> str: + """Blank string literals and ``//`` comments to spaces, preserving positions.""" + out = list(text) + in_line_comment = False + prev_code_idx = -2 + prev_code_ch = "" + for i, ch, in_s in scan_code(text): + if ch == "\n": + in_line_comment = False + prev_code_ch = "" + continue + if in_line_comment: + out[i] = " " + continue + if in_s: + out[i] = " " + continue + if ch == "/" and prev_code_ch == "/" and prev_code_idx == i - 1: + out[prev_code_idx] = " " + out[i] = " " + in_line_comment = True + prev_code_ch = "" + continue + prev_code_idx = i + prev_code_ch = ch + return "".join(out) + + +__all__ = [ + "code_text", + "scan_code", + "strip_js_ts_comments", +] diff --git a/desloppify/languages/javascript/__init__.py b/desloppify/languages/javascript/__init__.py index 9f5673357..d885ee8ef 100644 --- a/desloppify/languages/javascript/__init__.py +++ b/desloppify/languages/javascript/__init__.py @@ -1,9 +1,12 @@ """JavaScript/JSX language plugin — ESLint.""" +from __future__ import annotations + from desloppify.languages._framework.generic_support.core import generic_lang from desloppify.languages._framework.treesitter import JS_SPEC -generic_lang( + +cfg = generic_lang( name="javascript", extensions=[".js", ".jsx", ".mjs", ".cjs"], tools=[ @@ -21,6 +24,7 @@ detect_markers=["package.json"], default_src="src", treesitter_spec=JS_SPEC, + frameworks=True, ) __all__ = [ diff --git a/desloppify/languages/javascript/tests/test_js_nextjs_framework.py b/desloppify/languages/javascript/tests/test_js_nextjs_framework.py new file mode 100644 index 000000000..9c9523866 --- /dev/null +++ b/desloppify/languages/javascript/tests/test_js_nextjs_framework.py @@ -0,0 +1,83 @@ +"""Tests for JavaScript Next.js framework smells integration.""" + +from __future__ import annotations + +from pathlib import Path +from types import SimpleNamespace + +import pytest + +import desloppify.languages.javascript # noqa: F401 (registration side effect) +from desloppify.languages.framework import get_lang + + +@pytest.fixture(autouse=True) +def _root(tmp_path, set_project_root): + """Point PROJECT_ROOT at the tmp directory via RuntimeContext.""" + + +def _write(tmp_path: Path, name: str, content: str) -> Path: + p = tmp_path / name + p.parent.mkdir(parents=True, exist_ok=True) + p.write_text(content) + return p + + +class _FakeLang(SimpleNamespace): + zone_map = None + dep_graph = None + file_finder = None + + def __init__(self): + super().__init__(review_cache={}, detector_coverage={}, coverage_warnings=[]) + + +def test_javascript_plugin_includes_nextjs_framework_phases_and_next_lint_is_slow(): + cfg = get_lang("javascript") + labels = [getattr(p, "label", "") for p in cfg.phases] + assert "Next.js framework smells" in labels + lint = next(p for p in cfg.phases if getattr(p, "label", "") == "next lint") + assert lint.slow is True + + +def test_nextjs_smells_phase_emits_smells_when_next_is_present(tmp_path: Path): + _write( + tmp_path, + "package.json", + '{"dependencies": {"next": "14.0.0", "react": "18.3.0"}}\n', + ) + _write( + tmp_path, + "app/server-in-client.jsx", + "'use client'\nimport fs from 'node:fs'\nexport default function X(){return null}\n", + ) + + cfg = get_lang("javascript") + phase = next(p for p in cfg.phases if getattr(p, "label", "") == "Next.js framework smells") + issues, potentials = phase.run(tmp_path, _FakeLang()) + detectors = {issue.get("detector") for issue in issues} + assert "nextjs" in detectors + assert potentials.get("nextjs", 0) >= 1 + assert any("server_import_in_client" in str(issue.get("id", "")) for issue in issues) + + +def test_nextjs_smells_phase_scans_jsx_error_and_js_middleware(tmp_path: Path): + _write( + tmp_path, + "package.json", + '{"dependencies": {"next": "14.0.0", "react": "18.3.0"}}\n', + ) + _write(tmp_path, "app/error.jsx", "export default function Error(){ return null }\n") + _write( + tmp_path, + "middleware.js", + "'use client'\nimport React from 'react'\nexport function middleware(){ return null }\n", + ) + + cfg = get_lang("javascript") + phase = next(p for p in cfg.phases if getattr(p, "label", "") == "Next.js framework smells") + issues, potentials = phase.run(tmp_path, _FakeLang()) + ids = {issue["id"] for issue in issues} + assert any("error_file_missing_use_client" in issue_id for issue_id in ids) + assert any("middleware_misuse" in issue_id for issue_id in ids) + assert potentials.get("nextjs", 0) >= 1 diff --git a/desloppify/languages/typescript/__init__.py b/desloppify/languages/typescript/__init__.py index aedfcb57a..97a4a86cb 100644 --- a/desloppify/languages/typescript/__init__.py +++ b/desloppify/languages/typescript/__init__.py @@ -15,6 +15,7 @@ LangConfig, LangSecurityResult, ) +from desloppify.languages._framework.frameworks.phases import framework_phases from desloppify.languages._framework.registry.registration import register_full_plugin from desloppify.languages._framework.registry.state import register_lang_hooks from desloppify.languages.typescript import test_coverage as ts_test_coverage_hooks @@ -118,6 +119,7 @@ def __init__(self): detector_phase_signature(), detector_phase_test_coverage(), DetectorPhase("Code smells", phase_smells), + *framework_phases("typescript"), detector_phase_security(), *shared_subjective_duplicates_tail(), ], diff --git a/desloppify/languages/typescript/detectors/unused.py b/desloppify/languages/typescript/detectors/unused.py index fee3d4ab3..16cfca3fb 100644 --- a/desloppify/languages/typescript/detectors/unused.py +++ b/desloppify/languages/typescript/detectors/unused.py @@ -46,14 +46,27 @@ def _run_tsc_unused_check( project_root: Path, tsconfig_path: Path, ) -> subprocess.CompletedProcess[str]: - """Run the fixed `npx tsc` unused-symbol check for one project root.""" + """Run the unused-symbol check for one project root. + + Prefers `npx tsc` (project-local), then `node_modules/.bin/tsc`, then `tsc`. + """ npx_path = shutil.which("npx") - if not npx_path: - raise OSError("npx executable not found in PATH") + if npx_path: + cmd = [npx_path, "tsc"] + else: + local_tsc = project_root / "node_modules" / ".bin" / "tsc" + if local_tsc.is_file(): + cmd = [str(local_tsc)] + else: + tsc_path = shutil.which("tsc") + if tsc_path: + cmd = [tsc_path] + else: + raise OSError("TypeScript compiler not found (npx/tsc)") + return _proc_runtime.run( # nosec B603 [ - npx_path, - "tsc", + *cmd, "--project", str(tsconfig_path), "--noEmit", diff --git a/desloppify/languages/typescript/phases_smells.py b/desloppify/languages/typescript/phases_smells.py index e5ea4e741..2d0a4aad5 100644 --- a/desloppify/languages/typescript/phases_smells.py +++ b/desloppify/languages/typescript/phases_smells.py @@ -106,10 +106,12 @@ def phase_smells(path: Path, lang: LangRuntimeContract) -> tuple[list[Issue], di if bool_entries: log(f" react: {len(bool_entries)} boolean state explosions") - return results, { + potentials: dict[str, int] = { "smells": adjust_potential(lang.zone_map, total_smell_files), "react": total_effects, } + return results, potentials + __all__ = ["phase_smells"] diff --git a/desloppify/languages/typescript/tests/test_ts_nextjs_framework.py b/desloppify/languages/typescript/tests/test_ts_nextjs_framework.py new file mode 100644 index 000000000..57d48d5fc --- /dev/null +++ b/desloppify/languages/typescript/tests/test_ts_nextjs_framework.py @@ -0,0 +1,229 @@ +"""Tests for Next.js framework spec integration (TypeScript).""" + +from __future__ import annotations + +from pathlib import Path +from types import SimpleNamespace + +import pytest + +from desloppify.engine.planning import scan as plan_scan_mod +from desloppify.languages._framework.frameworks.detection import detect_ecosystem_frameworks +from desloppify.languages._framework.node.frameworks.nextjs.info import ( + nextjs_info_from_evidence, +) +from desloppify.languages._framework.node.frameworks.nextjs.scanners import ( + scan_nextjs_server_modules_in_pages_router, + scan_nextjs_server_navigation_apis_in_client, + scan_nextjs_use_server_in_client, + scan_nextjs_use_server_not_first, +) +from desloppify.languages.framework import make_lang_run +from desloppify.languages.typescript import TypeScriptConfig + + +@pytest.fixture(autouse=True) +def _root(tmp_path, set_project_root): + """Point PROJECT_ROOT at the tmp directory via RuntimeContext.""" + + +def _write(tmp_path: Path, name: str, content: str) -> Path: + p = tmp_path / name + p.parent.mkdir(parents=True, exist_ok=True) + p.write_text(content) + return p + + +class _FakeLang(SimpleNamespace): + zone_map = None + dep_graph = None + file_finder = None + + def __init__(self): + super().__init__(review_cache={}, detector_coverage={}, coverage_warnings=[]) + + +def test_detect_nextjs_present_when_next_dependency_and_app_present(tmp_path: Path): + _write(tmp_path, "package.json", '{"dependencies": {"next": "14.0.0"}}\n') + _write(tmp_path, "app/page.tsx", "export default function Page() { return
}\n") + + detection = detect_ecosystem_frameworks(tmp_path, None, "node") + assert detection.package_root == tmp_path.resolve() + assert detection.package_json_relpath == "package.json" + assert "nextjs" in detection.present + assert "app" in (detection.present["nextjs"].get("marker_dir_hits") or []) + + +def test_detect_nextjs_absent_when_only_app_tree_exists(tmp_path: Path): + _write(tmp_path, "package.json", '{"dependencies": {"react": "18.3.0"}}\n') + _write(tmp_path, "app/page.tsx", "export default function Page() { return
}\n") + + detection = detect_ecosystem_frameworks(tmp_path, None, "node") + assert "nextjs" not in detection.present + + +def test_detect_nextjs_package_root_for_external_scan_path(tmp_path: Path): + external = tmp_path.parent / f"{tmp_path.name}-external-next" + external.mkdir(parents=True, exist_ok=True) + (external / "package.json").write_text('{"dependencies": {"next": "14.0.0"}}\n') + (external / "app").mkdir(parents=True, exist_ok=True) + (external / "app" / "page.tsx").write_text("export default function Page(){return
}\n") + + detection = detect_ecosystem_frameworks(external, None, "node") + assert detection.package_root == external.resolve() + assert detection.package_json_relpath is not None + assert detection.package_json_relpath.endswith("package.json") + assert "nextjs" in detection.present + + +def test_use_server_not_first_ignores_nested_inline_actions(tmp_path: Path): + _write(tmp_path, "package.json", '{"dependencies": {"next": "14.0.0"}}\n') + _write( + tmp_path, + "app/inline-action.tsx", + ( + "export default async function Page() {\n" + " async function doAction() {\n" + " 'use server'\n" + " return 1\n" + " }\n" + " return
{String(!!doAction)}
\n" + "}\n" + ), + ) + _write( + tmp_path, + "app/misplaced.ts", + "export const x = 1\n'use server'\nexport async function action(){ return 1 }\n", + ) + + info = nextjs_info_from_evidence( + {"marker_dir_hits": ["app"]}, + package_root=tmp_path.resolve(), + package_json_relpath="package.json", + ) + entries, _ = scan_nextjs_use_server_not_first(tmp_path, info) + files = {entry["file"] for entry in entries} + assert "app/misplaced.ts" in files + assert "app/inline-action.tsx" not in files + + +def test_use_server_in_client_ignores_comments_and_string_literals(tmp_path: Path): + _write(tmp_path, "package.json", '{"dependencies": {"next": "14.0.0"}}\n') + _write( + tmp_path, + "app/page.tsx", + ( + "'use client'\n" + 'console.log("use server")\n' + "// 'use server'\n" + "export default function X(){return null}\n" + ), + ) + + info = nextjs_info_from_evidence( + {"marker_dir_hits": ["app"]}, + package_root=tmp_path.resolve(), + package_json_relpath="package.json", + ) + entries, _ = scan_nextjs_use_server_in_client(tmp_path, info) + assert not entries + + +def test_server_navigation_apis_in_client_only_flags_not_found(tmp_path: Path): + _write(tmp_path, "package.json", '{"dependencies": {"next": "14.0.0"}}\n') + _write( + tmp_path, + "app/client-redirect.tsx", + ( + "'use client'\n" + "import { redirect } from 'next/navigation'\n" + "export default function X(){ redirect('/'); return null }\n" + ), + ) + _write( + tmp_path, + "app/client-notfound.tsx", + ( + "'use client'\n" + "import { notFound } from 'next/navigation'\n" + "export default function X(){ notFound(); return null }\n" + ), + ) + + info = nextjs_info_from_evidence( + {"marker_dir_hits": ["app"]}, + package_root=tmp_path.resolve(), + package_json_relpath="package.json", + ) + entries, _ = scan_nextjs_server_navigation_apis_in_client(tmp_path, info) + files = {entry["file"] for entry in entries} + assert "app/client-notfound.tsx" in files + assert "app/client-redirect.tsx" not in files + + +def test_server_modules_in_pages_router_skips_pages_api_routes(tmp_path: Path): + _write(tmp_path, "package.json", '{"dependencies": {"next": "14.0.0"}}\n') + _write( + tmp_path, + "pages/api/edge.ts", + ( + "import { NextResponse } from 'next/server'\n" + "export const config = { runtime: 'edge' }\n" + "export default function handler(){ return NextResponse.json({ ok: true }) }\n" + ), + ) + + info = nextjs_info_from_evidence( + {"marker_dir_hits": ["pages"]}, + package_root=tmp_path.resolve(), + package_json_relpath="package.json", + ) + entries, _ = scan_nextjs_server_modules_in_pages_router(tmp_path, info) + assert not entries + + +def test_typescript_config_includes_nextjs_framework_phases_and_next_lint_is_slow(): + cfg = TypeScriptConfig() + labels = [getattr(p, "label", "") for p in cfg.phases] + assert "Next.js framework smells" in labels + lint = next(p for p in cfg.phases if getattr(p, "label", "") == "next lint") + assert lint.slow is True + + +def test_nextjs_smells_phase_emits_issues_when_next_present(tmp_path: Path): + _write( + tmp_path, + "package.json", + '{"dependencies": {"next": "14.0.0", "react": "18.3.0"}}\n', + ) + _write( + tmp_path, + "app/legacy.tsx", + "import { useRouter } from 'next/router'\nexport default function X(){return null}\n", + ) + _write( + tmp_path, + "app/server-in-client.tsx", + ( + "'use client'\n" + "import { cookies } from 'next/headers'\n" + "import fs from 'node:fs'\n" + "export default function X(){return null}\n" + ), + ) + + cfg = TypeScriptConfig() + phase = next(p for p in cfg.phases if getattr(p, "label", "") == "Next.js framework smells") + issues, potentials = phase.run(tmp_path, _FakeLang()) + assert potentials.get("nextjs", 0) >= 1 + assert any(issue.get("detector") == "nextjs" for issue in issues) + assert any("next_router_in_app_router" in str(issue.get("id", "")) for issue in issues) + + +def test_next_lint_phase_is_skipped_when_include_slow_false(): + run = make_lang_run(TypeScriptConfig()) + selected = plan_scan_mod._select_phases(run, include_slow=False, profile="full") + labels = [getattr(p, "label", "") for p in selected] + assert "Next.js framework smells" in labels + assert "next lint" not in labels diff --git a/desloppify/languages/typescript/tests/test_ts_unused.py b/desloppify/languages/typescript/tests/test_ts_unused.py index a954a2da2..d9ba376df 100644 --- a/desloppify/languages/typescript/tests/test_ts_unused.py +++ b/desloppify/languages/typescript/tests/test_ts_unused.py @@ -168,7 +168,7 @@ def _fake_run(*args, **kwargs): def test_run_tsc_unused_check_raises_without_npx(self, tmp_path, monkeypatch): monkeypatch.setattr(ts_unused_mod.shutil, "which", lambda _name: None) - with pytest.raises(OSError, match="npx executable not found"): + with pytest.raises(OSError, match="TypeScript compiler not found"): ts_unused_mod._run_tsc_unused_check(tmp_path, tmp_path / "tsconfig.json") def test_detect_unused_uses_deno_fallback_for_url_imports(self, tmp_path, monkeypatch): @@ -238,6 +238,11 @@ def _fake_run(*args, **kwargs): calls["count"] += 1 return _Result() + monkeypatch.setattr( + ts_unused_mod.shutil, + "which", + lambda name: "/opt/homebrew/bin/npx" if name == "npx" else None, + ) monkeypatch.setattr(ts_unused_mod._proc_runtime, "run", _fake_run) entries, total = detect_unused(tmp_path / "src") assert calls["count"] == 1 @@ -263,6 +268,11 @@ def _fake_run(*args, **kwargs): calls["count"] += 1 return _Result() + monkeypatch.setattr( + ts_unused_mod.shutil, + "which", + lambda name: "/opt/homebrew/bin/npx" if name == "npx" else None, + ) monkeypatch.setattr(ts_unused_mod._proc_runtime, "run", _fake_run) entries, total = detect_unused(tmp_path / "src") assert calls["count"] == 1 diff --git a/desloppify/tests/detectors/test_next_lint.py b/desloppify/tests/detectors/test_next_lint.py new file mode 100644 index 000000000..de64a6af8 --- /dev/null +++ b/desloppify/tests/detectors/test_next_lint.py @@ -0,0 +1,186 @@ +"""Tests for Next.js `next lint` parser + tool-phase integration.""" + +from __future__ import annotations + +import json +import subprocess # nosec B404 +from pathlib import Path +from types import SimpleNamespace + +import pytest + +from desloppify.languages._framework.generic_parts.parsers import ( + ToolParserError, + parse_next_lint, +) +from desloppify.languages._framework.generic_support.core import make_tool_phase +from desloppify.languages._framework.generic_parts import tool_runner as tool_runner_mod + + +def test_parse_next_lint_aggregates_per_file_and_relativizes_paths(monkeypatch, tmp_path): + monkeypatch.chdir(tmp_path) + scan_path = tmp_path / "apps" / "web" + scan_path.mkdir(parents=True, exist_ok=True) + + payload = [ + { + "filePath": str(tmp_path / "app" / "page.tsx"), + "messages": [ + { + "line": 10, + "column": 2, + "ruleId": "rule-a", + "message": "Bad thing", + "severity": 2, + }, + { + "line": 11, + "column": 1, + "ruleId": "rule-b", + "message": "Another thing", + "severity": 1, + }, + ], + }, + { + "filePath": "relative.js", + "messages": [{"line": 0, "message": "Line defaults to 1", "severity": 1}], + }, + {"filePath": "empty.js", "messages": []}, + ] + + raw = "eslint noise\n" + json.dumps(payload) + "\nmore noise" + entries, meta = parse_next_lint(raw, scan_path) + assert meta == {"potential": 3} + assert len(entries) == 2 + + first = next(e for e in entries if e["file"] == "app/page.tsx") + assert first["line"] == 10 + assert first["id"] == "lint" + assert first["message"].startswith("next lint: Bad thing") + assert first["detail"]["count"] == 2 + assert len(first["detail"]["messages"]) == 2 + + second = next(e for e in entries if e["file"] == "apps/web/relative.js") + assert second["line"] == 1 + assert second["id"] == "lint" + assert second["message"].startswith("next lint: Line defaults to 1") + assert second["detail"]["count"] == 1 + +def test_parse_next_lint_raises_on_missing_json_array(tmp_path): + with pytest.raises(ToolParserError): + parse_next_lint("not json output", tmp_path) + + +def test_next_lint_tool_phase_emits_issues_and_potential(monkeypatch, tmp_path): + monkeypatch.chdir(tmp_path) + scan_path = tmp_path / "apps" / "web" + scan_path.mkdir(parents=True, exist_ok=True) + + payload = [ + {"filePath": "a.js", "messages": [{"line": 3, "message": "x", "severity": 1}]}, + {"filePath": "b.js", "messages": []}, + ] + output = json.dumps(payload) + + def fake_run(argv, *, shell, cwd, capture_output, text, timeout): + assert shell is False + assert capture_output is True + assert text is True + assert timeout == 120 + assert Path(cwd).resolve() == scan_path.resolve() + return subprocess.CompletedProcess(argv, 0, stdout=output, stderr="") + + monkeypatch.setattr(tool_runner_mod.subprocess, "run", fake_run) + + phase = make_tool_phase( + "next lint", + "npx --no-install next lint --format json", + "next_lint", + "next_lint", + 2, + ) + lang = SimpleNamespace(detector_coverage={}, coverage_warnings=[]) + + issues, signals = phase.run(scan_path, lang) + assert signals == {"next_lint": 2} + assert len(issues) == 1 + assert issues[0]["detector"] == "next_lint" + assert issues[0]["file"] == "apps/web/a.js" + assert issues[0]["tier"] == 2 + assert issues[0]["detail"]["count"] == 1 + + +def test_next_lint_tool_phase_reports_potential_when_clean(monkeypatch, tmp_path): + monkeypatch.chdir(tmp_path) + scan_path = tmp_path / "apps" / "web" + scan_path.mkdir(parents=True, exist_ok=True) + + payload = [{"filePath": "a.js", "messages": []}] + output = json.dumps(payload) + + def fake_run(argv, *, shell, cwd, capture_output, text, timeout): + return subprocess.CompletedProcess(argv, 0, stdout=output, stderr="") + + monkeypatch.setattr(tool_runner_mod.subprocess, "run", fake_run) + + phase = make_tool_phase( + "next lint", + "npx --no-install next lint --format json", + "next_lint", + "next_lint", + 2, + ) + lang = SimpleNamespace(detector_coverage={}, coverage_warnings=[]) + + issues, signals = phase.run(scan_path, lang) + assert issues == [] + assert signals == {"next_lint": 1} + + +def test_next_lint_tool_phase_records_coverage_warning_on_tool_missing(monkeypatch, tmp_path): + monkeypatch.chdir(tmp_path) + + def fake_run(*_args, **_kwargs): + raise FileNotFoundError("missing tool") + + monkeypatch.setattr(tool_runner_mod.subprocess, "run", fake_run) + + phase = make_tool_phase( + "next lint", + "npx --no-install next lint --format json", + "next_lint", + "next_lint", + 2, + ) + lang = SimpleNamespace(detector_coverage={}, coverage_warnings=[]) + + issues, signals = phase.run(tmp_path, lang) + assert issues == [] + assert signals == {} + assert lang.detector_coverage["next_lint"]["reason"] == "tool_not_found" + assert lang.coverage_warnings and lang.coverage_warnings[0]["detector"] == "next_lint" + + +def test_next_lint_tool_phase_records_coverage_warning_on_parser_error(monkeypatch, tmp_path): + monkeypatch.chdir(tmp_path) + + def fake_run(argv, *, shell, cwd, capture_output, text, timeout): + return subprocess.CompletedProcess(argv, 0, stdout="not json", stderr="") + + monkeypatch.setattr(tool_runner_mod.subprocess, "run", fake_run) + + phase = make_tool_phase( + "next lint", + "npx --no-install next lint --format json", + "next_lint", + "next_lint", + 2, + ) + lang = SimpleNamespace(detector_coverage={}, coverage_warnings=[]) + + issues, signals = phase.run(tmp_path, lang) + assert issues == [] + assert signals == {} + assert lang.detector_coverage["next_lint"]["reason"] == "parser_error" + assert lang.coverage_warnings and lang.coverage_warnings[0]["detector"] == "next_lint" diff --git a/desloppify/tests/review/work_queue_cases.py b/desloppify/tests/review/work_queue_cases.py index af65a8e5a..cd5bd848d 100644 --- a/desloppify/tests/review/work_queue_cases.py +++ b/desloppify/tests/review/work_queue_cases.py @@ -875,6 +875,7 @@ def test_registry_standalone_threshold_count(): "dict_keys", "dupes", "naming", + "nextjs", "patterns", "props", "react", From 6f2693b744fed90122a5db1d363be91015a33ce3 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 20:43:58 +0100 Subject: [PATCH 21/43] fix: allow scan when queue is fully drained regardless of lifecycle phase Fixes #441 Co-Authored-By: Claude Opus 4.6 (1M context) --- desloppify/app/commands/scan/preflight.py | 2 ++ .../commands/scan/test_scan_preflight.py | 28 +++++++++++++++++++ pyproject.toml | 2 +- 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/desloppify/app/commands/scan/preflight.py b/desloppify/app/commands/scan/preflight.py index b1d47815f..e784878c7 100644 --- a/desloppify/app/commands/scan/preflight.py +++ b/desloppify/app/commands/scan/preflight.py @@ -94,6 +94,8 @@ def scan_queue_preflight(args: object) -> None: except OSError: _logger.debug("scan preflight queue breakdown skipped", exc_info=True) return + if breakdown.queue_total == 0: + return # Queue fully drained — scan always allowed (#441) if mode is ScoreDisplayMode.LIVE: return # Queue fully clear or no active cycle — scan allowed if ( diff --git a/desloppify/tests/commands/scan/test_scan_preflight.py b/desloppify/tests/commands/scan/test_scan_preflight.py index d1fa4c6d9..91bdaf2bb 100644 --- a/desloppify/tests/commands/scan/test_scan_preflight.py +++ b/desloppify/tests/commands/scan/test_scan_preflight.py @@ -76,6 +76,34 @@ def test_queue_clear_allows_scan(): scan_queue_preflight(args) +def test_queue_drained_with_non_scan_lifecycle_allows_scan(): + """When queue is fully drained but lifecycle phase hasn't advanced to scan, + scan should still be allowed. Regression test for #441.""" + from desloppify.app.commands.helpers.queue_progress import QueueBreakdown + + args = SimpleNamespace(profile=None, force_rescan=False, state=None, lang="python") + plan = {"plan_start_scores": {"strict": 80.0}} + # lifecycle_phase stuck on "review" even though queue_total is 0 + breakdown = QueueBreakdown(queue_total=0, workflow=0, lifecycle_phase="review") + with ( + patch( + "desloppify.app.commands.scan.preflight.resolve_plan_load_status", + return_value=_plan_status(plan), + ), + patch( + "desloppify.app.commands.scan.preflight.state_path", + return_value="/tmp/test-state.json", + ), + patch("desloppify.app.commands.scan.preflight.state_mod") as mock_state_mod, + patch( + "desloppify.app.commands.scan.preflight.plan_aware_queue_breakdown", + return_value=breakdown, + ), + ): + mock_state_mod.load_state.return_value = {"issues": {}} + scan_queue_preflight(args) + + # ── Queue remaining = gate ────────────────────────────────── diff --git a/pyproject.toml b/pyproject.toml index f5d4ebe20..668bb480c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "desloppify" -version = "0.9.9" +version = "0.9.10" description = "Multi-language codebase health scanner and technical debt tracker" readme = "README.md" requires-python = ">=3.11" From f0250d83fd2e46bbcca3ffa5bbaa0516c2bc6203 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 20:43:59 +0100 Subject: [PATCH 22/43] fix: quote paths for Windows cmd /c and use utf-8 encoding in log recovery Fixes #442 Co-Authored-By: Claude Opus 4.6 (1M context) --- desloppify/app/commands/review/runner_process_impl/io.py | 4 ++-- desloppify/app/commands/runner/codex_batch.py | 8 ++++++-- pyproject.toml | 2 +- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/desloppify/app/commands/review/runner_process_impl/io.py b/desloppify/app/commands/review/runner_process_impl/io.py index 28b918b78..9a4b03f97 100644 --- a/desloppify/app/commands/review/runner_process_impl/io.py +++ b/desloppify/app/commands/review/runner_process_impl/io.py @@ -36,7 +36,7 @@ def _output_file_has_json_payload(output_file: Path) -> bool: if not output_file.exists(): return False try: - payload = json.loads(output_file.read_text()) + payload = json.loads(output_file.read_text(encoding="utf-8", errors="replace")) except (OSError, json.JSONDecodeError): return False return isinstance(payload, dict) @@ -52,7 +52,7 @@ def extract_payload_from_log( if not log_path.exists(): return None try: - log_text = log_path.read_text() + log_text = log_path.read_text(encoding="utf-8", errors="replace") except OSError: return None diff --git a/desloppify/app/commands/runner/codex_batch.py b/desloppify/app/commands/runner/codex_batch.py index b7bad99c8..1422b2ea8 100644 --- a/desloppify/app/commands/runner/codex_batch.py +++ b/desloppify/app/commands/runner/codex_batch.py @@ -35,10 +35,14 @@ def _resolve_executable(name: str) -> list[str]: """ resolved = shutil.which(name) if sys.platform == "win32": + target = resolved or name + # Quote the target for cmd /c when the path contains spaces. + if " " in target: + target = f'"{target}"' if resolved is not None and resolved.lower().endswith((".cmd", ".bat")): - return ["cmd", "/c", resolved] + return ["cmd", "/c", target] # shutil.which may miss .cmd/.bat wrappers — let cmd.exe resolve it - return ["cmd", "/c", resolved or name] + return ["cmd", "/c", target] return [resolved or name] diff --git a/pyproject.toml b/pyproject.toml index f5d4ebe20..668bb480c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "desloppify" -version = "0.9.9" +version = "0.9.10" description = "Multi-language codebase health scanner and technical debt tracker" readme = "README.md" requires-python = ">=3.11" From 6531668dd8d0950f0f00854ceeec311126314bee Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 20:46:57 +0100 Subject: [PATCH 23/43] fix: merge retry batch results with original run before coverage check Fixes #443 Co-Authored-By: Claude Opus 4.6 (1M context) --- .../commands/review/batch/execution_phases.py | 72 +++++++++++ .../review/batch/execution_results.py | 122 ++++++++++++++++++ pyproject.toml | 2 +- 3 files changed, 195 insertions(+), 1 deletion(-) diff --git a/desloppify/app/commands/review/batch/execution_phases.py b/desloppify/app/commands/review/batch/execution_phases.py index b38fe6ff0..4cb0d0981 100644 --- a/desloppify/app/commands/review/batch/execution_phases.py +++ b/desloppify/app/commands/review/batch/execution_phases.py @@ -553,6 +553,25 @@ def execute_batch_run(*, prepared: PreparedBatchRunContext, deps: BatchRunDeps) ) +def _is_partial_batch_retry(prepared: PreparedBatchRunContext) -> bool: + """Return True when the current run targets a subset of the packet's batches.""" + all_indexes = set(range(len(prepared.batches))) + return set(prepared.selected_indexes) != all_indexes + + +def _retry_dimension_set(prepared: PreparedBatchRunContext) -> set[str]: + """Return the set of dimensions covered by the selected (retried) batches.""" + dims: set[str] = set() + for idx in prepared.selected_indexes: + if 0 <= idx < len(prepared.batches): + batch = prepared.batches[idx] + if isinstance(batch, dict): + for dim in batch.get("dimensions", []): + if isinstance(dim, str) and dim.strip(): + dims.add(dim.strip()) + return dims + + def merge_and_import_batch_run( *, prepared: PreparedBatchRunContext, @@ -578,6 +597,57 @@ def merge_and_import_batch_run( safe_write_text_fn=deps.safe_write_text_fn, colorize_fn=deps.colorize_fn, ) + + # When retrying a subset of batches (--only-batches), look for a prior + # run's merged results to fill in the non-retried dimensions. This + # prevents the coverage gate from rejecting a valid retry that only + # re-ran a few failed slices. + if missing_after_import and _is_partial_batch_retry(prepared): + from .execution_results import ( + find_prior_run_merged_results, + overlay_retry_results_on_prior, + ) + + subagent_runs_dir = prepared.run_dir.parent + prior_merged = find_prior_run_merged_results( + subagent_runs_dir=subagent_runs_dir, + immutable_packet_path=prepared.immutable_packet_path, + current_run_dir=prepared.run_dir, + ) + if prior_merged is not None: + retry_dims = _retry_dimension_set(prepared) + import json as _json + + retry_merged = _json.loads(merged_path.read_text()) + combined = overlay_retry_results_on_prior( + prior_merged=prior_merged, + retry_merged=retry_merged, + retry_dims=retry_dims, + ) + deps.safe_write_text_fn( + merged_path, + _json.dumps(combined, indent=2) + "\n", + ) + # Recompute coverage from the combined result. + combined_assessment_dims = normalize_dimension_list( + list((combined.get("assessments") or {}).keys()) + ) + from .scope import missing_scored_dimensions + + missing_after_import = missing_scored_dimensions( + selected_dims=combined_assessment_dims, + scored_dims=prepared.scored_dimensions, + ) + prior_dims = set((prior_merged.get("assessments") or {}).keys()) + inherited_count = len(prior_dims - retry_dims) + print( + deps.colorize_fn( + f" Merged retry with prior run: inherited {inherited_count} " + f"dimension(s) from original run, retried {len(retry_dims)}.", + "green", + ) + ) + enforce_import_coverage( missing_after_import=missing_after_import, packet_dimensions=prepared.packet_dimensions, @@ -606,10 +676,12 @@ def merge_and_import_batch_run( "PreparedPacketScope", "PreparedBatchRunContext", "PreparedRunArtifacts", + "_is_partial_batch_retry", "_prepare_packet_scope", "_prepare_run_runtime", "_print_runtime_expectation", "_resolve_runtime_policy", + "_retry_dimension_set", "execute_batch_run", "merge_and_import_batch_run", "prepare_batch_run", diff --git a/desloppify/app/commands/review/batch/execution_results.py b/desloppify/app/commands/review/batch/execution_results.py index 8f8bd8693..bba4cbfe8 100644 --- a/desloppify/app/commands/review/batch/execution_results.py +++ b/desloppify/app/commands/review/batch/execution_results.py @@ -19,6 +19,126 @@ ) +def find_prior_run_merged_results( + *, + subagent_runs_dir: Path, + immutable_packet_path: Path, + current_run_dir: Path, +) -> dict | None: + """Find the most recent prior run's merged results for the same packet. + + Searches run directories under ``subagent_runs_dir`` for a prior run that + used the same immutable packet and produced a ``holistic_issues_merged.json``. + Skips the current run directory. Returns the parsed merged results dict or + ``None`` if no matching prior run is found. + """ + if not subagent_runs_dir.is_dir(): + return None + + immutable_packet_str = str(immutable_packet_path) + best_merged: dict | None = None + best_stamp: str = "" + + for run_dir in sorted(subagent_runs_dir.iterdir(), reverse=True): + if not run_dir.is_dir(): + continue + if run_dir == current_run_dir: + continue + summary_path = run_dir / "run_summary.json" + merged_path = run_dir / "holistic_issues_merged.json" + if not summary_path.exists() or not merged_path.exists(): + continue + try: + summary = json.loads(summary_path.read_text()) + except (OSError, json.JSONDecodeError): + continue + if str(summary.get("immutable_packet", "")) != immutable_packet_str: + continue + stamp = str(summary.get("run_stamp", run_dir.name)) + if stamp <= best_stamp: + continue + try: + merged = json.loads(merged_path.read_text()) + except (OSError, json.JSONDecodeError): + continue + if isinstance(merged, dict) and merged.get("assessments"): + best_merged = merged + best_stamp = stamp + + return best_merged + + +def overlay_retry_results_on_prior( + *, + prior_merged: dict, + retry_merged: dict, + retry_dims: set[str], +) -> dict: + """Combine prior run results with retry results for retried dimensions. + + For dimensions in ``retry_dims``, the retry's assessments/issues/notes take + precedence. For all other dimensions, the prior run's data is preserved. + The result is a new merged dict suitable for the standard coverage gate. + """ + combined = dict(retry_merged) + + # --- assessments: prior non-retried dims + retry dims --- + prior_assessments = prior_merged.get("assessments") or {} + retry_assessments = retry_merged.get("assessments") or {} + merged_assessments: dict[str, object] = {} + for dim, value in prior_assessments.items(): + if dim not in retry_dims: + merged_assessments[dim] = value + for dim, value in retry_assessments.items(): + merged_assessments[dim] = value + combined["assessments"] = merged_assessments + + # --- issues: prior non-retried dim issues + all retry issues --- + prior_issues = prior_merged.get("issues") or [] + retry_issues = retry_merged.get("issues") or [] + kept_prior_issues = [ + issue for issue in prior_issues + if isinstance(issue, dict) and str(issue.get("dimension", "")) not in retry_dims + ] + combined["issues"] = kept_prior_issues + list(retry_issues) + + # --- dimension_notes: merge similarly --- + prior_notes = prior_merged.get("dimension_notes") or {} + retry_notes = retry_merged.get("dimension_notes") or {} + merged_notes: dict[str, object] = {} + for dim, value in prior_notes.items(): + if dim not in retry_dims: + merged_notes[dim] = value + for dim, value in retry_notes.items(): + merged_notes[dim] = value + combined["dimension_notes"] = merged_notes + + # --- dimension_judgment: merge similarly --- + prior_judgment = prior_merged.get("dimension_judgment") or {} + retry_judgment = retry_merged.get("dimension_judgment") or {} + merged_judgment: dict[str, object] = {} + for dim, value in prior_judgment.items(): + if dim not in retry_dims: + merged_judgment[dim] = value + for dim, value in retry_judgment.items(): + merged_judgment[dim] = value + combined["dimension_judgment"] = merged_judgment + + # --- context_updates: merge similarly --- + prior_ctx = prior_merged.get("context_updates") or {} + retry_ctx = retry_merged.get("context_updates") or {} + if prior_ctx or retry_ctx: + merged_ctx: dict[str, object] = {} + for dim, value in prior_ctx.items(): + if dim not in retry_dims: + merged_ctx[dim] = value + for dim, value in retry_ctx.items(): + merged_ctx[dim] = value + combined["context_updates"] = merged_ctx + + return combined + + def collect_and_reconcile_results( *, collect_batch_results_fn, @@ -272,7 +392,9 @@ def log_run_start( __all__ = [ "collect_and_reconcile_results", "enforce_import_coverage", + "find_prior_run_merged_results", "log_run_start", "import_and_finalize", "merge_and_write_results", + "overlay_retry_results_on_prior", ] diff --git a/pyproject.toml b/pyproject.toml index f5d4ebe20..668bb480c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "desloppify" -version = "0.9.9" +version = "0.9.10" description = "Multi-language codebase health scanner and technical debt tracker" readme = "README.md" requires-python = ">=3.11" From d8fdb1e7898ca24095e8e506e23f4dd9e58bd543 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 20:44:47 +0100 Subject: [PATCH 24/43] =?UTF-8?q?fix(docs):=20SKILL.md=20cleanup=20?= =?UTF-8?q?=E2=80=94=20remove=20unsupported=20frontmatter,=20fix=20file=20?= =?UTF-8?q?naming,=20generalize=20install?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fixes #444, #445, #446 Co-Authored-By: Claude Opus 4.6 (1M context) --- docs/CLAUDE.md | 2 +- docs/CURSOR.md | 2 +- docs/SKILL.md | 3 +-- docs/WINDSURF.md | 2 +- pyproject.toml | 2 +- 5 files changed, 5 insertions(+), 6 deletions(-) diff --git a/docs/CLAUDE.md b/docs/CLAUDE.md index bc6d92d90..f9c283a78 100644 --- a/docs/CLAUDE.md +++ b/docs/CLAUDE.md @@ -19,7 +19,7 @@ Run `desloppify review --prepare` first to generate review data, then use Claude - The codebase path and list of dimensions to score - The blind packet path to read - Instruction to score from code evidence only, not from targets -- Each agent writes output to a separate file. Merge assessments (average overlapping dimension scores) and concatenate findings. +- Each agent writes output to `results/batch-N.raw.txt` (matching the batch index). Merge assessments (average overlapping dimension scores) and concatenate findings. ### Subagent rules diff --git a/docs/CURSOR.md b/docs/CURSOR.md index 2de98d04e..1f455796d 100644 --- a/docs/CURSOR.md +++ b/docs/CURSOR.md @@ -20,7 +20,7 @@ tools: Use the prompt from the "Reviewer agent prompt" section above. Launch multiple reviewer subagents, each with a subset of dimensions. -Each agent writes its output to a separate file. +Each agent writes its output to `results/batch-N.raw.txt` (matching the batch index). Merge assessments (average where dimensions overlap) and findings, then import. diff --git a/docs/SKILL.md b/docs/SKILL.md index 6fc23ef9c..4317064e9 100644 --- a/docs/SKILL.md +++ b/docs/SKILL.md @@ -6,7 +6,6 @@ description: > duplicate functions, code smells, naming issues, import cycles, or coupling problems. Also use when asked for a health score, what to fix next, or to create a cleanup plan. Supports 29 languages. -allowed-tools: Bash(desloppify *) --- @@ -275,6 +274,6 @@ If the fix is unclear or the change needs discussion, open an issue at `https:// ## Prerequisite -`command -v desloppify >/dev/null 2>&1 && echo "desloppify: installed" || echo "NOT INSTALLED — run: pip install --upgrade git+https://github.com/peteromallet/desloppify.git"` +`command -v desloppify >/dev/null 2>&1 && echo "desloppify: installed" || echo "NOT INSTALLED — run: uvx --from git+https://github.com/peteromallet/desloppify.git desloppify"` diff --git a/docs/WINDSURF.md b/docs/WINDSURF.md index 295300b51..b52c5fee3 100644 --- a/docs/WINDSURF.md +++ b/docs/WINDSURF.md @@ -12,7 +12,7 @@ multiple Cascade panes manually. in one, abstraction + error consistency in another). 3. Each pane scores its assigned dimensions independently, reading the codebase and `query.json`'s `dimension_prompts` for context. -4. Each pane writes output to a separate file. +4. Each pane writes output to `results/batch-N.raw.txt` (matching the batch index). 5. In the primary pane, merge assessments and findings, then import. If the user prefers a single-pane workflow, review all dimensions sequentially diff --git a/pyproject.toml b/pyproject.toml index f5d4ebe20..668bb480c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "desloppify" -version = "0.9.9" +version = "0.9.10" description = "Multi-language codebase health scanner and technical debt tracker" readme = "README.md" requires-python = ">=3.11" From df9bff4bc4151100936ac34e195390067afcb8b2 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 21:44:07 +0100 Subject: [PATCH 25/43] cleanup: remove dead _strip_c_style_comments_preserve_lines shim from rust/tools.py Follow-up to PR #440 (Rust inline-test filtering). Co-Authored-By: Claude Opus 4.6 (1M context) --- desloppify/languages/rust/tools.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/desloppify/languages/rust/tools.py b/desloppify/languages/rust/tools.py index 66c31c4c8..c1760256c 100644 --- a/desloppify/languages/rust/tools.py +++ b/desloppify/languages/rust/tools.py @@ -560,11 +560,6 @@ def _strip_comments_preserve_lines(text: str) -> str: return "".join(result) -def _strip_c_style_comments_preserve_lines(text: str) -> str: - """Backwards-compatible shim for tests/imports expecting the old helper name.""" - return _strip_comments_preserve_lines(text) - - def _line_number(content: str, offset: int) -> int: return content.count("\n", 0, offset) + 1 From 30f97ef1a7c56ee378030b1432b4d8469b261a0c Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 21:44:34 +0100 Subject: [PATCH 26/43] refactor: move queue_total==0 check into score_display_mode (#441) Move the empty-queue guard from scan_queue_preflight into score_display_mode() so ALL callers (status, plan nudge, next flow) benefit from the fix, not just scan preflight. Co-Authored-By: Claude Opus 4.6 (1M context) --- desloppify/app/commands/helpers/queue_progress.py | 2 ++ desloppify/app/commands/scan/preflight.py | 2 -- desloppify/tests/commands/test_queue_progress.py | 7 +++++++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/desloppify/app/commands/helpers/queue_progress.py b/desloppify/app/commands/helpers/queue_progress.py index cfed45116..fc60bd5c5 100644 --- a/desloppify/app/commands/helpers/queue_progress.py +++ b/desloppify/app/commands/helpers/queue_progress.py @@ -106,6 +106,8 @@ def score_display_mode( return ScoreDisplayMode.LIVE if breakdown is None: return ScoreDisplayMode.LIVE + if breakdown.queue_total == 0: + return ScoreDisplayMode.LIVE # Queue fully drained — always live (#441) if breakdown.lifecycle_phase == LIFECYCLE_PHASE_SCAN: return ScoreDisplayMode.LIVE if breakdown.lifecycle_phase == LIFECYCLE_PHASE_EXECUTE: diff --git a/desloppify/app/commands/scan/preflight.py b/desloppify/app/commands/scan/preflight.py index e784878c7..b1d47815f 100644 --- a/desloppify/app/commands/scan/preflight.py +++ b/desloppify/app/commands/scan/preflight.py @@ -94,8 +94,6 @@ def scan_queue_preflight(args: object) -> None: except OSError: _logger.debug("scan preflight queue breakdown skipped", exc_info=True) return - if breakdown.queue_total == 0: - return # Queue fully drained — scan always allowed (#441) if mode is ScoreDisplayMode.LIVE: return # Queue fully clear or no active cycle — scan allowed if ( diff --git a/desloppify/tests/commands/test_queue_progress.py b/desloppify/tests/commands/test_queue_progress.py index e7cc467b0..7c9a818b0 100644 --- a/desloppify/tests/commands/test_queue_progress.py +++ b/desloppify/tests/commands/test_queue_progress.py @@ -162,6 +162,13 @@ def test_score_display_mode_live_when_queue_empty(): assert score_display_mode(b, 80.0) is ScoreDisplayMode.LIVE +@pytest.mark.parametrize("phase", ["review", "execute", "workflow"]) +def test_score_display_mode_live_when_queue_drained_any_lifecycle(phase: str): + """queue_total=0 always returns LIVE regardless of lifecycle phase (#441).""" + b = QueueBreakdown(queue_total=0, lifecycle_phase=phase) + assert score_display_mode(b, 80.0) is ScoreDisplayMode.LIVE + + # ── format_queue_headline ──────────────────────────────────── From 2d4a9396ba59ea5bfa54a67c31e0cf91cfa8ef11 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 21:44:45 +0100 Subject: [PATCH 27/43] fix: extract anonymous functions in tree-sitter specs (R lang) PR #449 added an R anonymous function query pattern that captures @fn but the extractor requires @name, silently skipping all anonymous function matches. Fix the extractor to synthesize an "" name when @name is absent but @func is present. Original R spec contributed by sims1253 in PR #449. Co-Authored-By: Claude Opus 4.6 (1M context) --- .../treesitter/analysis/extractors.py | 4 +- .../tests/lang/common/test_treesitter.py | 62 +++++++++++++++++++ 2 files changed, 64 insertions(+), 2 deletions(-) diff --git a/desloppify/languages/_framework/treesitter/analysis/extractors.py b/desloppify/languages/_framework/treesitter/analysis/extractors.py index 1a36f313d..02530b22e 100644 --- a/desloppify/languages/_framework/treesitter/analysis/extractors.py +++ b/desloppify/languages/_framework/treesitter/analysis/extractors.py @@ -141,10 +141,10 @@ def ts_extract_functions( for _pattern_idx, captures in matches: func_node = _unwrap_node(captures.get("func")) name_node = _unwrap_node(captures.get("name")) - if not func_node or not name_node: + if not func_node: continue - name_text = _node_text(name_node) + name_text = _node_text(name_node) if name_node else "" line = func_node.start_point[0] + 1 # 1-indexed end_line = func_node.end_point[0] + 1 diff --git a/desloppify/tests/lang/common/test_treesitter.py b/desloppify/tests/lang/common/test_treesitter.py index 1daf74ea1..6efa1c083 100644 --- a/desloppify/tests/lang/common/test_treesitter.py +++ b/desloppify/tests/lang/common/test_treesitter.py @@ -1163,6 +1163,68 @@ def test_normalization_strips_console(self, js_file, tmp_path): assert "return" in greet.normalized +# ── R extraction tests ───────────────────────────────────────── + + +class TestRExtraction: + @pytest.fixture + def r_file(self, tmp_path): + code = """\ +# Named function assigned with <- +my_func <- function(x, y) { + result <- x + y + return(result) +} + +# Anonymous function inside lapply +result <- lapply(items, function(i) { + value <- process(i) + transform(value) + return(value) +}) +""" + f = tmp_path / "analysis.R" + f.write_text(code) + return str(f) + + def test_named_function_extraction(self, r_file, tmp_path): + from desloppify.languages._framework.treesitter.analysis.extractors import ( + ts_extract_functions, + ) + from desloppify.languages._framework.treesitter.specs.scripting import R_SPEC + + functions = ts_extract_functions(tmp_path, R_SPEC, [r_file]) + names = [f.name for f in functions] + assert "my_func" in names + + def test_anonymous_function_extraction(self, r_file, tmp_path): + from desloppify.languages._framework.treesitter.analysis.extractors import ( + ts_extract_functions, + ) + from desloppify.languages._framework.treesitter.specs.scripting import R_SPEC + + functions = ts_extract_functions(tmp_path, R_SPEC, [r_file]) + names = [f.name for f in functions] + assert "" in names, ( + "Anonymous functions in lapply() should be extracted with " + "synthesized name" + ) + + def test_anonymous_function_has_correct_metadata(self, r_file, tmp_path): + from desloppify.languages._framework.treesitter.analysis.extractors import ( + ts_extract_functions, + ) + from desloppify.languages._framework.treesitter.specs.scripting import R_SPEC + + functions = ts_extract_functions(tmp_path, R_SPEC, [r_file]) + anon_fns = [f for f in functions if f.name == ""] + assert len(anon_fns) >= 1 + fn = anon_fns[0] + assert fn.file == r_file + assert fn.line > 0 + assert fn.body_hash is not None + + # ── ESLint parser tests ─────────────────────────────────────── From 11b8c16e980d9041f6d669ed8d4dd4f084834b6f Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 21:45:41 +0100 Subject: [PATCH 28/43] fix(docs): sync .agents SKILL.md with docs copy, add pip fallback and batch naming note - Remove `allowed-tools` frontmatter from .agents/skills/desloppify/SKILL.md (#444) - Add `pip install` fallback note alongside uvx in both copies (#446) - Add batch output naming clarification (batch-N.raw.txt vs .json imports) (#445) - Sync agent directives section and version bump to .agents copy Co-Authored-By: Claude Opus 4.6 (1M context) --- .agents/skills/desloppify/SKILL.md | 23 ++++++++++++++++++++--- docs/SKILL.md | 4 ++++ 2 files changed, 24 insertions(+), 3 deletions(-) diff --git a/.agents/skills/desloppify/SKILL.md b/.agents/skills/desloppify/SKILL.md index 32f4908c0..3a3fa073e 100644 --- a/.agents/skills/desloppify/SKILL.md +++ b/.agents/skills/desloppify/SKILL.md @@ -6,11 +6,10 @@ description: > duplicate functions, code smells, naming issues, import cycles, or coupling problems. Also use when asked for a health score, what to fix next, or to create a cleanup plan. Supports 29 languages. -allowed-tools: Bash(desloppify *) --- - + # Desloppify @@ -121,6 +120,8 @@ Four paths to get subjective scores: - **Cloud/external**: `desloppify review --external-start --external-runner claude` → follow session template → `--external-submit`. - **Manual path**: `desloppify review --prepare` → review per dimension → `desloppify review --import file.json`. +**Batch output vs import filenames:** Individual batch outputs from subagents must be named `batch-N.raw.txt` (plain text/JSON content, `.raw.txt` extension). The `.json` filenames in `--import merged.json` or `--import findings.json` refer to the final merged import file, not individual batch outputs. Do not name batch outputs with a `.json` extension. + - Import first, fix after — import creates tracked state entries for correlation. - Target-matching scores trigger auto-reset to prevent gaming. Use the blind-review workflow described in your agent overlay doc (e.g. `docs/CLAUDE.md`, `docs/HERMES.md`). - Even moderate scores (60-80) dramatically improve overall health. @@ -203,6 +204,20 @@ desloppify config set commit_tracking_enabled false # disable guidance After resolving findings as `fixed`, the tool shows uncommitted work, committed history, and a suggested commit message. After committing externally, run `record` to move findings from uncommitted to committed and auto-update the linked PR description. +### Agent directives + +Directives are messages shown to agents at lifecycle phase transitions — use them to switch models, set constraints, or give context-specific instructions. + +```bash +desloppify directives # show all configured directives +desloppify directives set execute "Switch to claude-sonnet-4-6. Focus on speed." +desloppify directives set triage "Switch to claude-opus-4-6. Read carefully." +desloppify directives set review "Use blind packet. Do not anchor on previous scores." +desloppify directives unset execute # remove a directive +``` + +Available phases: `execute`, `review`, `triage`, `workflow`, `scan` (and fine-grained variants like `review_initial`, `triage_postflight`, etc.). + ### Quick reference ```bash @@ -261,7 +276,9 @@ If the fix is unclear or the change needs discussion, open an issue at `https:// ## Prerequisite -`command -v desloppify >/dev/null 2>&1 && echo "desloppify: installed" || echo "NOT INSTALLED — run: pip install --upgrade git+https://github.com/peteromallet/desloppify.git"` +`command -v desloppify >/dev/null 2>&1 && echo "desloppify: installed" || echo "NOT INSTALLED — run: uvx --from git+https://github.com/peteromallet/desloppify.git desloppify"` + +If `uvx` is not available: `pip install desloppify[full]` diff --git a/docs/SKILL.md b/docs/SKILL.md index 4317064e9..fd632344c 100644 --- a/docs/SKILL.md +++ b/docs/SKILL.md @@ -120,6 +120,8 @@ Four paths to get subjective scores: - **Cloud/external**: `desloppify review --external-start --external-runner claude` → follow session template → `--external-submit`. - **Manual path**: `desloppify review --prepare` → review per dimension → `desloppify review --import file.json`. +**Batch output vs import filenames:** Individual batch outputs from subagents must be named `batch-N.raw.txt` (plain text/JSON content, `.raw.txt` extension). The `.json` filenames in `--import merged.json` or `--import findings.json` refer to the final merged import file, not individual batch outputs. Do not name batch outputs with a `.json` extension. + - Import first, fix after — import creates tracked state entries for correlation. - Target-matching scores trigger auto-reset to prevent gaming. Use the blind-review workflow described in your agent overlay doc (e.g. `docs/CLAUDE.md`, `docs/HERMES.md`). - Even moderate scores (60-80) dramatically improve overall health. @@ -276,4 +278,6 @@ If the fix is unclear or the change needs discussion, open an issue at `https:// `command -v desloppify >/dev/null 2>&1 && echo "desloppify: installed" || echo "NOT INSTALLED — run: uvx --from git+https://github.com/peteromallet/desloppify.git desloppify"` +If `uvx` is not available: `pip install desloppify[full]` + From b062ad1dc5619c42a766d07304435f619bb77db0 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 21:46:57 +0100 Subject: [PATCH 29/43] fix: collapse cmd /c arguments into single string for proper Windows quoting The previous fix pre-quoted the executable path, but the actual breakage was in argument paths (-C repo_root, -o output_file) containing spaces. Pre-embedding quotes in a subprocess list causes double-quoting because Popen's list2cmdline() adds its own quotes. The real issue: cmd /c concatenates everything after /c and re-parses it with its own tokeniser. The fix introduces _wrap_cmd_c() which uses subprocess.list2cmdline() to build the inner command as a single properly-quoted string, then passes that as one token after /c: ["cmd", "/c", "codex exec -C \"path with spaces\" ..."]. - Revert incorrect executable pre-quoting in _resolve_executable - Add _wrap_cmd_c() to properly collapse cmd /c commands - Apply _wrap_cmd_c in codex_batch_command after building the full arg list - Keep correct encoding="utf-8", errors="replace" fix in io.py - Add tests for _wrap_cmd_c and Windows codex_batch_command path quoting Co-Authored-By: Claude Opus 4.6 (1M context) --- desloppify/app/commands/runner/codex_batch.py | 38 ++++++++++++-- .../commands/test_runner_modules_direct.py | 51 +++++++++++++++++++ 2 files changed, 85 insertions(+), 4 deletions(-) diff --git a/desloppify/app/commands/runner/codex_batch.py b/desloppify/app/commands/runner/codex_batch.py index 1422b2ea8..0cbc70cea 100644 --- a/desloppify/app/commands/runner/codex_batch.py +++ b/desloppify/app/commands/runner/codex_batch.py @@ -4,6 +4,7 @@ import os import shutil +import subprocess import sys from pathlib import Path @@ -32,13 +33,15 @@ def _resolve_executable(name: str) -> list[str]: When ``shutil.which()`` cannot locate the executable on Windows, we still route through ``cmd /c`` so the shell's own PATH resolution can find ``.cmd``/``.bat`` wrappers that Python's ``which`` missed. + + Returns the command prefix tokens. On Windows, this will be + ``["cmd", "/c", executable]``; the caller should pass the final + assembled command through :func:`_wrap_cmd_c` to collapse everything + after ``/c`` into a single properly-quoted string. """ resolved = shutil.which(name) if sys.platform == "win32": target = resolved or name - # Quote the target for cmd /c when the path contains spaces. - if " " in target: - target = f'"{target}"' if resolved is not None and resolved.lower().endswith((".cmd", ".bat")): return ["cmd", "/c", target] # shutil.which may miss .cmd/.bat wrappers — let cmd.exe resolve it @@ -46,13 +49,39 @@ def _resolve_executable(name: str) -> list[str]: return [resolved or name] +def _wrap_cmd_c(cmd: list[str]) -> list[str]: + """Collapse a ``cmd /c `` list into proper form. + + ``cmd /c`` concatenates everything after ``/c`` into a single string and + re-parses it with its own tokeniser. When arguments contain spaces + (e.g. repo paths like ``core_project - Copy``), passing them as separate + list elements causes ``subprocess.list2cmdline()`` to quote them + individually, but ``cmd``'s re-parsing can still split on spaces in + certain edge cases. + + The reliable approach is to build the real command string ourselves with + ``subprocess.list2cmdline()`` and pass that as a **single** token after + ``/c``:: + + ["cmd", "/c", "codex exec -C \\"path with spaces\\" ..."] + + ``list2cmdline`` on the outer list then leaves the inner string untouched + (it contains no special characters that need additional quoting), and + ``cmd /c`` receives exactly the string we intended. + """ + if len(cmd) >= 3 and cmd[0].lower() == "cmd" and cmd[1].lower() == "/c": + inner = subprocess.list2cmdline(cmd[2:]) + return ["cmd", "/c", inner] + return cmd + + def codex_batch_command(*, prompt: str, repo_root: Path, output_file: Path) -> list[str]: """Build one codex exec command line for a batch prompt.""" effort = os.environ.get("DESLOPPIFY_CODEX_REASONING_EFFORT", "low").strip().lower() if effort not in {"low", "medium", "high", "xhigh"}: effort = "low" prefix = _resolve_executable("codex") - return [ + cmd = [ *prefix, "exec", "--ephemeral", @@ -68,6 +97,7 @@ def codex_batch_command(*, prompt: str, repo_root: Path, output_file: Path) -> l str(output_file), prompt, ] + return _wrap_cmd_c(cmd) def run_codex_batch( diff --git a/desloppify/tests/commands/test_runner_modules_direct.py b/desloppify/tests/commands/test_runner_modules_direct.py index c6c65fa98..1d383306b 100644 --- a/desloppify/tests/commands/test_runner_modules_direct.py +++ b/desloppify/tests/commands/test_runner_modules_direct.py @@ -9,6 +9,57 @@ import desloppify.app.commands.runner.run_logs as run_logs_mod +def test_wrap_cmd_c_collapses_arguments_into_single_string() -> None: + """_wrap_cmd_c should join everything after /c into one quoted string.""" + wrap = codex_batch_mod._wrap_cmd_c + + # cmd /c with a path containing spaces — arguments are collapsed + cmd = ["cmd", "/c", "C:\\Program Files\\codex.cmd", "exec", "-C", "C:\\my project - Copy"] + result = wrap(cmd) + assert result[:2] == ["cmd", "/c"] + assert len(result) == 3 # exactly three elements + inner = result[2] + # The inner string should contain the quoted path + assert '"C:\\my project - Copy"' in inner + assert "exec" in inner + assert '"C:\\Program Files\\codex.cmd"' in inner + + # Non-cmd command — returned unchanged + assert wrap(["codex", "exec", "-C", "path"]) == ["codex", "exec", "-C", "path"] + + # cmd /c with simple paths (no spaces) — still collapses, no quotes needed + simple = wrap(["cmd", "/c", "codex", "exec", "-C", "repo"]) + assert len(simple) == 3 + assert simple[2] == "codex exec -C repo" + + +def test_codex_batch_command_on_windows_collapses_cmd_c(monkeypatch, tmp_path: Path) -> None: + """On Windows with a .cmd wrapper, paths with spaces must be quoted inside a single /c arg.""" + monkeypatch.setattr("sys.platform", "win32") + monkeypatch.setattr( + "shutil.which", + lambda _name: "C:\\Program Files\\npm\\codex.CMD", + ) + repo = tmp_path / "core_project - Copy" + repo.mkdir() + output = repo / ".desloppify" / "out.json" + + cmd = codex_batch_mod.codex_batch_command( + prompt="review prompt", + repo_root=repo, + output_file=output, + ) + # Should be exactly ["cmd", "/c", ""] + assert cmd[0] == "cmd" + assert cmd[1] == "/c" + assert len(cmd) == 3 + inner = cmd[2] + # The repo path with spaces must be quoted + assert f'"{repo}"' in inner or f'"{str(repo)}"' in inner + assert "exec" in inner + assert "--ephemeral" in inner + + def test_codex_batch_command_uses_sanitized_reasoning_effort(monkeypatch, tmp_path: Path) -> None: monkeypatch.setenv("DESLOPPIFY_CODEX_REASONING_EFFORT", "HIGH") From e1979b3c552b6c50107337a907763e2298fc3eb0 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 21:47:13 +0100 Subject: [PATCH 30/43] fix: skip coverage gate on partial batch retry instead of merging results Replace the 195-line merge approach (find_prior_run_merged_results + overlay_retry_results_on_prior) with a ~5-line bypass: when --only-batches selects a subset of the packet's batches, set allow_partial=True so the coverage gate does not reject the partial retry. The merge approach had multiple issues: wrong prior-run selection after failed retry chains, dimension name normalization mismatches, and stale metadata in combined output. The simpler fix recognizes that a partial retry inherently cannot cover all dimensions, and the original run already handled the rest. Fixes #443 Co-Authored-By: Claude Opus 4.6 (1M context) --- .../commands/review/batch/execution_phases.py | 72 ++--------- .../review/batch/execution_results.py | 122 ------------------ ...st_review_batch_execution_phases_direct.py | 107 +++++++++++++++ 3 files changed, 115 insertions(+), 186 deletions(-) diff --git a/desloppify/app/commands/review/batch/execution_phases.py b/desloppify/app/commands/review/batch/execution_phases.py index 4cb0d0981..99f25a819 100644 --- a/desloppify/app/commands/review/batch/execution_phases.py +++ b/desloppify/app/commands/review/batch/execution_phases.py @@ -559,19 +559,6 @@ def _is_partial_batch_retry(prepared: PreparedBatchRunContext) -> bool: return set(prepared.selected_indexes) != all_indexes -def _retry_dimension_set(prepared: PreparedBatchRunContext) -> set[str]: - """Return the set of dimensions covered by the selected (retried) batches.""" - dims: set[str] = set() - for idx in prepared.selected_indexes: - if 0 <= idx < len(prepared.batches): - batch = prepared.batches[idx] - if isinstance(batch, dict): - for dim in batch.get("dimensions", []): - if isinstance(dim, str) and dim.strip(): - dims.add(dim.strip()) - return dims - - def merge_and_import_batch_run( *, prepared: PreparedBatchRunContext, @@ -598,60 +585,18 @@ def merge_and_import_batch_run( colorize_fn=deps.colorize_fn, ) - # When retrying a subset of batches (--only-batches), look for a prior - # run's merged results to fill in the non-retried dimensions. This - # prevents the coverage gate from rejecting a valid retry that only - # re-ran a few failed slices. - if missing_after_import and _is_partial_batch_retry(prepared): - from .execution_results import ( - find_prior_run_merged_results, - overlay_retry_results_on_prior, - ) - - subagent_runs_dir = prepared.run_dir.parent - prior_merged = find_prior_run_merged_results( - subagent_runs_dir=subagent_runs_dir, - immutable_packet_path=prepared.immutable_packet_path, - current_run_dir=prepared.run_dir, - ) - if prior_merged is not None: - retry_dims = _retry_dimension_set(prepared) - import json as _json - - retry_merged = _json.loads(merged_path.read_text()) - combined = overlay_retry_results_on_prior( - prior_merged=prior_merged, - retry_merged=retry_merged, - retry_dims=retry_dims, - ) - deps.safe_write_text_fn( - merged_path, - _json.dumps(combined, indent=2) + "\n", - ) - # Recompute coverage from the combined result. - combined_assessment_dims = normalize_dimension_list( - list((combined.get("assessments") or {}).keys()) - ) - from .scope import missing_scored_dimensions - - missing_after_import = missing_scored_dimensions( - selected_dims=combined_assessment_dims, - scored_dims=prepared.scored_dimensions, - ) - prior_dims = set((prior_merged.get("assessments") or {}).keys()) - inherited_count = len(prior_dims - retry_dims) - print( - deps.colorize_fn( - f" Merged retry with prior run: inherited {inherited_count} " - f"dimension(s) from original run, retried {len(retry_dims)}.", - "green", - ) - ) + # When retrying a subset of batches (--only-batches), the merged output + # only contains the retried dimensions. Skip the coverage gate so the + # partial result can be imported — the original run already covered the + # remaining dimensions. + allow_partial = prepared.allow_partial + if _is_partial_batch_retry(prepared): + allow_partial = True enforce_import_coverage( missing_after_import=missing_after_import, packet_dimensions=prepared.packet_dimensions, - allow_partial=prepared.allow_partial, + allow_partial=allow_partial, scan_path=prepared.scan_path, colorize_fn=deps.colorize_fn, ) @@ -681,7 +626,6 @@ def merge_and_import_batch_run( "_prepare_run_runtime", "_print_runtime_expectation", "_resolve_runtime_policy", - "_retry_dimension_set", "execute_batch_run", "merge_and_import_batch_run", "prepare_batch_run", diff --git a/desloppify/app/commands/review/batch/execution_results.py b/desloppify/app/commands/review/batch/execution_results.py index bba4cbfe8..8f8bd8693 100644 --- a/desloppify/app/commands/review/batch/execution_results.py +++ b/desloppify/app/commands/review/batch/execution_results.py @@ -19,126 +19,6 @@ ) -def find_prior_run_merged_results( - *, - subagent_runs_dir: Path, - immutable_packet_path: Path, - current_run_dir: Path, -) -> dict | None: - """Find the most recent prior run's merged results for the same packet. - - Searches run directories under ``subagent_runs_dir`` for a prior run that - used the same immutable packet and produced a ``holistic_issues_merged.json``. - Skips the current run directory. Returns the parsed merged results dict or - ``None`` if no matching prior run is found. - """ - if not subagent_runs_dir.is_dir(): - return None - - immutable_packet_str = str(immutable_packet_path) - best_merged: dict | None = None - best_stamp: str = "" - - for run_dir in sorted(subagent_runs_dir.iterdir(), reverse=True): - if not run_dir.is_dir(): - continue - if run_dir == current_run_dir: - continue - summary_path = run_dir / "run_summary.json" - merged_path = run_dir / "holistic_issues_merged.json" - if not summary_path.exists() or not merged_path.exists(): - continue - try: - summary = json.loads(summary_path.read_text()) - except (OSError, json.JSONDecodeError): - continue - if str(summary.get("immutable_packet", "")) != immutable_packet_str: - continue - stamp = str(summary.get("run_stamp", run_dir.name)) - if stamp <= best_stamp: - continue - try: - merged = json.loads(merged_path.read_text()) - except (OSError, json.JSONDecodeError): - continue - if isinstance(merged, dict) and merged.get("assessments"): - best_merged = merged - best_stamp = stamp - - return best_merged - - -def overlay_retry_results_on_prior( - *, - prior_merged: dict, - retry_merged: dict, - retry_dims: set[str], -) -> dict: - """Combine prior run results with retry results for retried dimensions. - - For dimensions in ``retry_dims``, the retry's assessments/issues/notes take - precedence. For all other dimensions, the prior run's data is preserved. - The result is a new merged dict suitable for the standard coverage gate. - """ - combined = dict(retry_merged) - - # --- assessments: prior non-retried dims + retry dims --- - prior_assessments = prior_merged.get("assessments") or {} - retry_assessments = retry_merged.get("assessments") or {} - merged_assessments: dict[str, object] = {} - for dim, value in prior_assessments.items(): - if dim not in retry_dims: - merged_assessments[dim] = value - for dim, value in retry_assessments.items(): - merged_assessments[dim] = value - combined["assessments"] = merged_assessments - - # --- issues: prior non-retried dim issues + all retry issues --- - prior_issues = prior_merged.get("issues") or [] - retry_issues = retry_merged.get("issues") or [] - kept_prior_issues = [ - issue for issue in prior_issues - if isinstance(issue, dict) and str(issue.get("dimension", "")) not in retry_dims - ] - combined["issues"] = kept_prior_issues + list(retry_issues) - - # --- dimension_notes: merge similarly --- - prior_notes = prior_merged.get("dimension_notes") or {} - retry_notes = retry_merged.get("dimension_notes") or {} - merged_notes: dict[str, object] = {} - for dim, value in prior_notes.items(): - if dim not in retry_dims: - merged_notes[dim] = value - for dim, value in retry_notes.items(): - merged_notes[dim] = value - combined["dimension_notes"] = merged_notes - - # --- dimension_judgment: merge similarly --- - prior_judgment = prior_merged.get("dimension_judgment") or {} - retry_judgment = retry_merged.get("dimension_judgment") or {} - merged_judgment: dict[str, object] = {} - for dim, value in prior_judgment.items(): - if dim not in retry_dims: - merged_judgment[dim] = value - for dim, value in retry_judgment.items(): - merged_judgment[dim] = value - combined["dimension_judgment"] = merged_judgment - - # --- context_updates: merge similarly --- - prior_ctx = prior_merged.get("context_updates") or {} - retry_ctx = retry_merged.get("context_updates") or {} - if prior_ctx or retry_ctx: - merged_ctx: dict[str, object] = {} - for dim, value in prior_ctx.items(): - if dim not in retry_dims: - merged_ctx[dim] = value - for dim, value in retry_ctx.items(): - merged_ctx[dim] = value - combined["context_updates"] = merged_ctx - - return combined - - def collect_and_reconcile_results( *, collect_batch_results_fn, @@ -392,9 +272,7 @@ def log_run_start( __all__ = [ "collect_and_reconcile_results", "enforce_import_coverage", - "find_prior_run_merged_results", "log_run_start", "import_and_finalize", "merge_and_write_results", - "overlay_retry_results_on_prior", ] diff --git a/desloppify/tests/commands/review/test_review_batch_execution_phases_direct.py b/desloppify/tests/commands/review/test_review_batch_execution_phases_direct.py index c959f451b..add500463 100644 --- a/desloppify/tests/commands/review/test_review_batch_execution_phases_direct.py +++ b/desloppify/tests/commands/review/test_review_batch_execution_phases_direct.py @@ -222,3 +222,110 @@ def test_merge_and_import_batch_run_calls_all_pipeline_steps() -> None: phases_mod.import_and_finalize = original_import assert calls == ["enforce", "import"] + + +def test_is_partial_batch_retry_detects_subset() -> None: + """Selected indexes that are a strict subset of all batches is a partial retry.""" + ctx = _prepared_context( + batches=[{"dimensions": ["a"]}, {"dimensions": ["b"]}, {"dimensions": ["c"]}], + selected_indexes=[1], + ) + assert phases_mod._is_partial_batch_retry(ctx) is True + + +def test_is_partial_batch_retry_false_for_full_run() -> None: + ctx = _prepared_context( + batches=[{"dimensions": ["a"]}, {"dimensions": ["b"]}], + selected_indexes=[0, 1], + ) + assert phases_mod._is_partial_batch_retry(ctx) is False + + +def test_partial_retry_bypasses_coverage_gate() -> None: + """When --only-batches selects a subset, the coverage gate gets allow_partial=True.""" + captured_kwargs: dict = {} + original_merge = phases_mod.merge_and_write_results + original_enforce = phases_mod.enforce_import_coverage + original_import = phases_mod.import_and_finalize + + # Return missing dims so the gate would normally block. + phases_mod.merge_and_write_results = lambda **_k: (Path("merged.json"), ["missing_dim"]) + + def capture_enforce(**kwargs): + captured_kwargs.update(kwargs) + + phases_mod.enforce_import_coverage = capture_enforce + phases_mod.import_and_finalize = lambda **_k: None + try: + # 3 batches but only batch 1 selected => partial retry + phases_mod.merge_and_import_batch_run( + prepared=_prepared_context( + allow_partial=False, + batches=[{"dimensions": ["a"]}, {"dimensions": ["b"]}, {"dimensions": ["c"]}], + selected_indexes=[1], + append_run_log=lambda *_a, **_k: None, + args=SimpleNamespace(), + ), + executed=_executed_context(), + state_file=Path("state.json"), + deps=SimpleNamespace( + merge_batch_results_fn=lambda *_a, **_k: {"issues": []}, + build_import_provenance_fn=lambda **_k: {}, + safe_write_text_fn=lambda *_a, **_k: None, + colorize_fn=lambda text, _tone=None: text, + do_import_fn=lambda *_a, **_k: None, + run_followup_scan_fn=lambda **_k: 0, + ), + ) + finally: + phases_mod.merge_and_write_results = original_merge + phases_mod.enforce_import_coverage = original_enforce + phases_mod.import_and_finalize = original_import + + # The gate should have been called with allow_partial=True despite + # the prepared context having allow_partial=False. + assert captured_kwargs["allow_partial"] is True + + +def test_full_run_does_not_bypass_coverage_gate() -> None: + """A full run (all batches selected) should NOT override allow_partial.""" + captured_kwargs: dict = {} + original_merge = phases_mod.merge_and_write_results + original_enforce = phases_mod.enforce_import_coverage + original_import = phases_mod.import_and_finalize + + phases_mod.merge_and_write_results = lambda **_k: (Path("merged.json"), ["missing_dim"]) + + def capture_enforce(**kwargs): + captured_kwargs.update(kwargs) + + phases_mod.enforce_import_coverage = capture_enforce + phases_mod.import_and_finalize = lambda **_k: None + try: + # All 2 batches selected => full run + phases_mod.merge_and_import_batch_run( + prepared=_prepared_context( + allow_partial=False, + batches=[{"dimensions": ["a"]}, {"dimensions": ["b"]}], + selected_indexes=[0, 1], + append_run_log=lambda *_a, **_k: None, + args=SimpleNamespace(), + ), + executed=_executed_context(), + state_file=Path("state.json"), + deps=SimpleNamespace( + merge_batch_results_fn=lambda *_a, **_k: {"issues": []}, + build_import_provenance_fn=lambda **_k: {}, + safe_write_text_fn=lambda *_a, **_k: None, + colorize_fn=lambda text, _tone=None: text, + do_import_fn=lambda *_a, **_k: None, + run_followup_scan_fn=lambda **_k: 0, + ), + ) + finally: + phases_mod.merge_and_write_results = original_merge + phases_mod.enforce_import_coverage = original_enforce + phases_mod.import_and_finalize = original_import + + # Full run should preserve the original allow_partial=False. + assert captured_kwargs["allow_partial"] is False From ad10a458d81c2b48326b0497460bbb729ba0b128 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 21:48:54 +0100 Subject: [PATCH 31/43] bump version to 0.9.10 --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index f5d4ebe20..668bb480c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "desloppify" -version = "0.9.9" +version = "0.9.10" description = "Multi-language codebase health scanner and technical debt tracker" readme = "README.md" requires-python = ">=3.11" From 3b299e62c9198f7e660fec2dc40a2e111176978a Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 21:49:07 +0100 Subject: [PATCH 32/43] bump version to 0.9.10 Co-Authored-By: Claude Opus 4.6 (1M context) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index f5d4ebe20..668bb480c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "desloppify" -version = "0.9.9" +version = "0.9.10" description = "Multi-language codebase health scanner and technical debt tracker" readme = "README.md" requires-python = ">=3.11" From 731119a179e39bce44647e9b51372bd5f35bba74 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 21:49:33 +0100 Subject: [PATCH 33/43] bump version to 0.9.10 Co-Authored-By: Claude Opus 4.6 (1M context) --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index f5d4ebe20..668bb480c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "desloppify" -version = "0.9.9" +version = "0.9.10" description = "Multi-language codebase health scanner and technical debt tracker" readme = "README.md" requires-python = ">=3.11" From b631f9c45335311f1a07e03b4c515c89f069238d Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 22:08:57 +0100 Subject: [PATCH 34/43] chore: gitignore .agents/ and untrack generated skill doc The .agents/skills/desloppify/SKILL.md is a generated file (same as .claude/skills/). Canonical copies live under docs/. Co-Authored-By: Claude Opus 4.6 (1M context) --- .agents/skills/desloppify/SKILL.md | 289 ----------------------------- .gitignore | 1 + 2 files changed, 1 insertion(+), 289 deletions(-) delete mode 100644 .agents/skills/desloppify/SKILL.md diff --git a/.agents/skills/desloppify/SKILL.md b/.agents/skills/desloppify/SKILL.md deleted file mode 100644 index 32f4908c0..000000000 --- a/.agents/skills/desloppify/SKILL.md +++ /dev/null @@ -1,289 +0,0 @@ ---- -name: desloppify -description: > - Codebase health scanner and technical debt tracker. Use when the user asks - about code quality, technical debt, dead code, large files, god classes, - duplicate functions, code smells, naming issues, import cycles, or coupling - problems. Also use when asked for a health score, what to fix next, or to - create a cleanup plan. Supports 29 languages. -allowed-tools: Bash(desloppify *) ---- - - - - -# Desloppify - -## 1. Your Job - -Maximise the **strict score** honestly. Your main cycle: **scan → plan → execute → rescan**. Follow the scan output's **INSTRUCTIONS FOR AGENTS** — don't substitute your own analysis. - -**Don't be lazy.** Do large refactors and small detailed fixes with equal energy. If it takes touching 20 files, touch 20 files. If it's a one-line change, make it. No task is too big or too small — fix things properly, not minimally. - -## 2. The Workflow - -Three phases, repeated as a cycle. - -### Phase 1: Scan and review — understand the codebase - -```bash -desloppify scan --path . # analyse the codebase -desloppify status # check scores — are we at target? -``` - -The scan will tell you if subjective dimensions need review. Follow its instructions. To trigger a review manually: -```bash -desloppify review --prepare # then follow your runner's review workflow -``` - -### Phase 2: Plan — decide what to work on - -After reviews, triage stages and plan creation appear in the execution queue surfaced by `next`. Complete them in order — `next` tells you what each stage expects in the `--report`: -```bash -desloppify next # shows the next execution workflow step -desloppify plan triage --stage observe --report "themes and root causes..." -desloppify plan triage --stage reflect --report "comparison against completed work..." -desloppify plan triage --stage organize --report "summary of priorities..." -desloppify plan triage --complete --strategy "execution plan..." -``` - -For automated triage: `desloppify plan triage --run-stages --runner codex` (Codex) or `--runner claude` (Claude). Options: `--only-stages`, `--dry-run`, `--stage-timeout-seconds`. - -Then shape the queue. **The plan shapes everything `next` gives you** — `next` is the execution queue, not the full backlog. Don't skip this step. - -```bash -desloppify plan # see the living plan details -desloppify plan queue # compact execution queue view -desloppify plan reorder top # reorder — what unblocks the most? -desloppify plan cluster create # group related issues to batch-fix -desloppify plan focus # scope next to one cluster -desloppify plan skip # defer — hide from next -``` - -### Phase 3: Execute — grind the queue to completion - -Trust the plan and execute. Don't rescan mid-queue — finish the queue first. - -**Branch first.** Create a dedicated branch — never commit health work directly to main: -```bash -git checkout -b desloppify/code-health # or desloppify/ -desloppify config set commit_pr 42 # link a PR for auto-updated descriptions -``` - -**The loop:** -```bash -# 1. Get the next item from the execution queue -desloppify next - -# 2. Fix the issue in code - -# 3. Resolve it (next shows the exact command including required attestation) - -# 4. When you have a logical batch, commit and record -git add && git commit -m "desloppify: fix 3 deferred_import findings" -desloppify plan commit-log record # moves findings uncommitted → committed, updates PR - -# 5. Push periodically -git push -u origin desloppify/code-health - -# 6. Repeat until the queue is empty -``` - -Score may temporarily drop after fixes — cascade effects are normal, keep going. -If `next` suggests an auto-fixer, run `desloppify autofix --dry-run` to preview, then apply. - -**When the queue is clear, go back to Phase 1.** New issues will surface, cascades will have resolved, priorities will have shifted. This is the cycle. - -## 3. Reference - -### Key concepts - -- **Tiers**: T1 auto-fix → T2 quick manual → T3 judgment call → T4 major refactor. -- **Auto-clusters**: related findings are auto-grouped in `next`. Drill in with `next --cluster `. -- **Zones**: production/script (scored), test/config/generated/vendor (not scored). Fix with `zone set`. -- **Wontfix cost**: widens the lenient↔strict gap. Challenge past decisions when the gap grows. - -### Scoring - -Overall score = **25% mechanical** + **75% subjective**. - -- **Mechanical (25%)**: auto-detected issues — duplication, dead code, smells, unused imports, security. Fixed by changing code and rescanning. -- **Subjective (75%)**: design quality review — naming, error handling, abstractions, clarity. Starts at **0%** until reviewed. The scan will prompt you when a review is needed. -- **Strict score** is the north star: wontfix items count as open. The gap between overall and strict is your wontfix debt. -- **Score types**: overall (lenient), strict (wontfix counts), objective (mechanical only), verified (confirmed fixes only). - -### Reviews - -Four paths to get subjective scores: - -- **Local runner (Codex)**: `desloppify review --run-batches --runner codex --parallel --scan-after-import` — automated end-to-end. -- **Local runner (Claude)**: `desloppify review --prepare` → launch parallel subagents → `desloppify review --import merged.json` — see skill doc overlay for details. -- **Cloud/external**: `desloppify review --external-start --external-runner claude` → follow session template → `--external-submit`. -- **Manual path**: `desloppify review --prepare` → review per dimension → `desloppify review --import file.json`. - -- Import first, fix after — import creates tracked state entries for correlation. -- Target-matching scores trigger auto-reset to prevent gaming. Use the blind-review workflow described in your agent overlay doc (e.g. `docs/CLAUDE.md`, `docs/HERMES.md`). -- Even moderate scores (60-80) dramatically improve overall health. -- Stale dimensions auto-surface in `next` — just follow the queue. - -**Integrity rules:** Score from evidence only — no prior chat context, score history, or target-threshold anchoring. When evidence is mixed, score lower and explain uncertainty. Assess every requested dimension; never drop one. - -#### Review output format - -Return machine-readable JSON for review imports. For `--external-submit`, include `session` from the generated template: - -```json -{ - "session": { - "id": "", - "token": "" - }, - "assessments": { - "": 0 - }, - "findings": [ - { - "dimension": "", - "identifier": "short_id", - "summary": "one-line defect summary", - "related_files": ["relative/path/to/file.py"], - "evidence": ["specific code observation"], - "suggestion": "concrete fix recommendation", - "confidence": "high|medium|low" - } - ] -} -``` - -`findings` MUST match `query.system_prompt` exactly (including `related_files`, `evidence`, and `suggestion`). Use `"findings": []` when no defects found. Import is fail-closed: invalid findings abort unless `--allow-partial` is passed. Assessment scores are auto-applied from trusted internal or cloud session imports. Legacy `--attested-external` remains supported. - -#### Import paths - -- Robust session flow (recommended): `desloppify review --external-start --external-runner claude` → use generated prompt/template → run printed `--external-submit` command. -- Durable scored import (legacy): `desloppify review --import findings.json --attested-external --attest "I validated this review was completed without awareness of overall score and is unbiased."` -- Findings-only fallback: `desloppify review --import findings.json` - -#### Reviewer agent prompt - -Runners that support agent definitions (Cursor, Copilot, Gemini) can create a dedicated reviewer agent. Use this system prompt: - -``` -You are a code quality reviewer. You will be given a codebase path, a set of -dimensions to score, and what each dimension means. Read the code, score each -dimension 0-100 from evidence only, and return JSON in the required format. -Do not anchor to target thresholds. When evidence is mixed, score lower and -explain uncertainty. -``` - -See your editor's overlay section below for the agent config format. - -### Plan commands - -```bash -desloppify plan reorder top # move all cluster members at once -desloppify plan reorder top # mix clusters + findings in one reorder -desloppify plan reorder before -t X # position relative to another item/cluster -desloppify plan cluster reorder a,b top # reorder multiple clusters as one block -desloppify plan resolve # mark complete -desloppify plan reopen # reopen -desloppify backlog # broader non-execution backlog -``` - -### Commit tracking - -```bash -desloppify plan commit-log # see uncommitted + committed status -desloppify plan commit-log record # record HEAD commit, update PR description -desloppify plan commit-log record --note "why" # with rationale -desloppify plan commit-log record --only "smells::*" # record specific findings only -desloppify plan commit-log history # show commit records -desloppify plan commit-log pr # preview PR body markdown -desloppify config set commit_tracking_enabled false # disable guidance -``` - -After resolving findings as `fixed`, the tool shows uncommitted work, committed history, and a suggested commit message. After committing externally, run `record` to move findings from uncommitted to committed and auto-update the linked PR description. - -### Quick reference - -```bash -desloppify next --count 5 # top 5 execution items -desloppify next --cluster # drill into a cluster -desloppify backlog --count 5 # top 5 backlog items outside execution -desloppify show # filter by file/detector/ID -desloppify show --status open # all open findings -desloppify plan skip --permanent "" --note "reason" --attest "..." # accept debt -desloppify exclude # exclude a directory from scanning -desloppify config show # show all config including excludes -desloppify scan --path . --reset-subjective # reset subjective baseline to 0 -``` - -## 4. Fix Tool Issues Upstream - -When desloppify itself appears wrong or inconsistent — a bug, a bad detection, a crash, confusing output — **fix it and open a PR**. If you can't confidently fix it, file an issue instead. - -### Fix and PR (preferred) - -Clone the tool repo to a temp directory, make the fix there, and verify it works against the project you're scanning before pushing. - -```bash -git clone https://github.com/peteromallet/desloppify.git /tmp/desloppify-fix -cd /tmp/desloppify-fix -git checkout -b fix/ -``` - -Make your changes, then run the test suite and verify the fix against the original project: - -```bash -python -m pytest desloppify/tests/ -q -python -m desloppify scan --path # the project you were scanning -``` - -Once it looks good, push and open a PR: - -```bash -git add && git commit -m "fix: " -git push -u origin fix/ -gh pr create --title "fix: " --body "$(cat <<'EOF' -## Problem - - -## Fix - -EOF -)" -``` - -Clean up after: `rm -rf /tmp/desloppify-fix` - -### File an issue (fallback) - -If the fix is unclear or the change needs discussion, open an issue at `https://github.com/peteromallet/desloppify/issues` with a minimal repro: command, path, expected output, actual output. - -## Prerequisite - -`command -v desloppify >/dev/null 2>&1 && echo "desloppify: installed" || echo "NOT INSTALLED — run: pip install --upgrade git+https://github.com/peteromallet/desloppify.git"` - - - -## Codex Overlay - -This is the canonical Codex overlay used by the README install command. - -1. Prefer first-class batch runs: `desloppify review --run-batches --runner codex --parallel --scan-after-import`. -2. The command writes immutable packet snapshots under `.desloppify/review_packets/holistic_packet_*.json`; use those for reproducible retries. -3. Keep reviewer input scoped to the immutable packet and the source files named in each batch. -4. If a batch fails, retry only that slice with `desloppify review --run-batches --packet --only-batches `. -5. Manual override is safety-scoped: you cannot combine it with `--allow-partial`, and provisional manual scores expire on the next `scan` unless replaced by trusted internal or attested-external imports. - -### Triage workflow - -Prefer automated triage: `desloppify plan triage --run-stages --runner codex` - -Options: `--only-stages observe,reflect` (subset), `--dry-run` (prompts only), `--stage-timeout-seconds N` (per-stage). - -Run artifacts go to `.desloppify/triage_runs//` — each run gets its own directory with `run.log` (live timestamped events), `run_summary.json`, per-stage `prompts/`, `output/`, and `logs/`. Check `run.log` to diagnose stalls or failures. Re-running resumes from the last confirmed stage. - -If automated triage stalls, check `run.log` for the last event, then use `desloppify plan triage --stage-prompt ` to get the full prompt with gate rules. - - - diff --git a/.gitignore b/.gitignore index b52af9407..c2d6e36dd 100644 --- a/.gitignore +++ b/.gitignore @@ -38,6 +38,7 @@ pytest-full.xml # Local agent-generated skills (keep canonical copies under docs/) .claude/ +.agents/ /skills/ CLAUDE.md AGENTS.md From 23262addd8ef4e4c9338de395effd4377865a251 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 22:18:39 +0100 Subject: [PATCH 35/43] fix: add hermes and droid to update-skill help text Co-Authored-By: Claude Opus 4.6 (1M context) --- desloppify/app/cli_support/parser_groups_admin.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/desloppify/app/cli_support/parser_groups_admin.py b/desloppify/app/cli_support/parser_groups_admin.py index 3b797d537..b888a07b3 100644 --- a/desloppify/app/cli_support/parser_groups_admin.py +++ b/desloppify/app/cli_support/parser_groups_admin.py @@ -195,6 +195,6 @@ def _add_update_skill_parser(sub) -> None: "interface", nargs="?", default=None, - help="Agent interface (amp, claude, codex, cursor, copilot, windsurf, gemini, opencode). " + help="Agent interface (amp, claude, codex, cursor, copilot, windsurf, gemini, hermes, droid, opencode). " "Auto-detected on updates if omitted.", ) From 9af7bb4fb0be2e144d00f2dc977b02c0b509ad93 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 22:21:31 +0100 Subject: [PATCH 36/43] docs: draft 0.9.10 release notes Co-Authored-By: Claude Opus 4.6 (1M context) --- RELEASE_NOTES_0.9.10.md | 19 ++++++------------- 1 file changed, 6 insertions(+), 13 deletions(-) diff --git a/RELEASE_NOTES_0.9.10.md b/RELEASE_NOTES_0.9.10.md index 9b1721e78..e1bffe924 100644 --- a/RELEASE_NOTES_0.9.10.md +++ b/RELEASE_NOTES_0.9.10.md @@ -6,7 +6,7 @@ This release adds **experimental Hermes Agent integration** for fully autonomous --- -**124 files changed | 26 commits | 5,425 tests passing** +**141 files changed | 47 commits | 5,438 tests passing** ## Hermes Agent Integration (Experimental) @@ -66,7 +66,7 @@ Massive contribution from **@MacHatter1** (PR #414). A new `FrameworkSpec` abstr ## SCSS Language Plugin -Thanks to **@klausagnoletti** for adding SCSS/Sass support via stylelint integration (PR #428). Detects code smells, unused variables, and style issues in `.scss` and `.sass` files. @klausagnoletti also followed up with bug fixes and tests (PR #452) that caught three runtime issues — a placeholder in the command string, a wrong formatter flag, and dead config keys — any of which would have meant zero findings at runtime. +Thanks to **@klausagnoletti** for adding SCSS/Sass support via stylelint integration (PR #428). Detects code smells, unused variables, and style issues in `.scss` and `.sass` files. @klausagnoletti has also submitted a follow-up PR (#452) with bug fixes, tests, and honest documentation — expected to land shortly after release. ## R Language Improvements @@ -96,18 +96,11 @@ Another big one from **@MacHatter1** (PR #432). Cold and full scan times reduced - **PHPStan parser fixes** — @nickperkins (PR #420). stderr output and malformed JSON from PHPStan no longer crash the parser. Clean, focused fix. - **Preserve plan_start_scores during force-rescan** — manual clusters are no longer wiped when force-rescanning. - **Import run project root** — `--scan-after-import` now derives the project root correctly from the state file path. -- **C++ detector scoping** (PR #415) — narrowed detection rules to reduce false positives. - -## Pending (expected before release) - -These PRs are open, reviewed, and passing tests: - -- **Windows codex runner fix** (PR #453) — proper `cmd /c` argument quoting + UTF-8 log encoding. Reported by **@DenysAshikhin**. +- **Windows codex runner** (PR #453) — proper `cmd /c` argument quoting + UTF-8 log encoding for Windows. Reported by **@DenysAshikhin**. - **Scan after queue drain** (PR #454) — `score_display_mode` now returns LIVE when queue is empty, fixing the UX contradiction where `next` says "run scan" but scan refuses. Reported by **@kgelpes**. -- **SKILL.md cleanup** (PR #455) — removes unsupported `allowed-tools` frontmatter, fixes batch naming inconsistency, adds pip fallback alongside uvx. Three issues all reported by **@willfrey**. +- **SKILL.md cleanup** (PR #455) — removes unsupported `allowed-tools` frontmatter, fixes batch naming inconsistency (`.raw.txt` not `.json`), adds pip fallback alongside uvx. Three issues all reported by **@willfrey**. - **Batch retry coverage gate** (PR #456) — partial retries now bypass the full-coverage requirement instead of being rejected. Reported by **@imetandy**. -- **Dead shim cleanup** (PR #460) — removes unused backward-compat wrapper from Rust inline-test filtering. -- **R anonymous function extraction** (PR #461) — follow-up fix so the tree-sitter anonymous function pattern actually works (extractor now handles missing `@name` capture). +- **R anonymous function extraction** (PR #461) — the tree-sitter anonymous function pattern from PR #449 now actually works (extractor handles missing `@name` capture with `` fallback). ## Community @@ -119,7 +112,7 @@ This release wouldn't exist without the community. Seriously — thank you all. **@sims1253** has been the driving force behind R language support, with four PRs spanning linting, tree-sitter queries, and harness support. The R plugin is becoming genuinely useful thanks to this sustained effort. -**@klausagnoletti** added SCSS support and immediately followed up with bug fixes and honest documentation — replacing a hallucinated 455-line README with an accurate 31-line one. The kind of contributor who makes the codebase more trustworthy. +**@klausagnoletti** added SCSS support and has a follow-up PR (#452) with bug fixes and honest documentation — replacing a hallucinated 455-line README with an accurate 31-line one. The kind of contributor who makes the codebase more trustworthy. **@cdunda-perchwell** fixed two separate workflow re-injection bugs that were causing phantom plan items. **@nickperkins** shipped a clean PHPStan parser fix. From c342bdbc26e3ba2a78a6eaf02ebb96c1d09622df Mon Sep 17 00:00:00 2001 From: Klaus Agnoletti <24544601+klausagnoletti@users.noreply.github.com> Date: Mon, 16 Mar 2026 22:31:17 +0100 Subject: [PATCH 37/43] =?UTF-8?q?feat(ruby):=20improve=20plugin=20?= =?UTF-8?q?=E2=80=94=20excludes,=20detect=20markers,=20default=5Fsrc,=20sp?= =?UTF-8?q?ec/=20support,=20README,=20tests=20(#462)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(ruby): improve plugin — excludes, detect markers, default_src, README, tests Co-Authored-By: Claude Sonnet 4.6 Co-Authored-By: Gemini Co-Authored-By: OpenAI Codex * feat(ruby): add spec/ test dir, bin/ exclusion; expose external_test_dirs in generic_lang - Add external_test_dirs and test_file_extensions parameters to generic_lang() so plugins can override the hardcoded ["tests", "test"] defaults - Configure Ruby plugin with external_test_dirs=["spec", "test"] (RSpec + Minitest) - Add bin/ to Ruby exclusions (binstubs/shims) - Update tests: add bin/ to excluded dirs list, add test_external_test_dirs_includes_spec Co-Authored-By: Claude Sonnet 4.6 Co-Authored-By: Gemini Co-Authored-By: OpenAI Codex * docs(ruby): add bin/ to exclusions list in README Co-Authored-By: Claude Sonnet 4.6 Co-Authored-By: Gemini Co-Authored-By: OpenAI Codex --------- Co-authored-by: Claude Sonnet 4.6 Co-authored-by: Gemini Co-authored-by: OpenAI Codex --- .../_framework/generic_support/core.py | 8 +- .../generic_support/registration.py | 2 + desloppify/languages/ruby/README.md | 66 ++++++++++ desloppify/languages/ruby/__init__.py | 32 ++++- desloppify/languages/ruby/tests/__init__.py | 0 desloppify/languages/ruby/tests/test_init.py | 116 ++++++++++++++++++ 6 files changed, 219 insertions(+), 5 deletions(-) create mode 100644 desloppify/languages/ruby/README.md create mode 100644 desloppify/languages/ruby/tests/__init__.py create mode 100644 desloppify/languages/ruby/tests/test_init.py diff --git a/desloppify/languages/_framework/generic_support/core.py b/desloppify/languages/_framework/generic_support/core.py index 980d8bbb1..1cb66154d 100644 --- a/desloppify/languages/_framework/generic_support/core.py +++ b/desloppify/languages/_framework/generic_support/core.py @@ -57,6 +57,8 @@ def generic_lang( zone_rules: list[ZoneRule] | None = None, test_coverage_module: Any | None = None, entry_patterns: list[str] | None = None, + external_test_dirs: list[str] | None = None, + test_file_extensions: list[str] | None = None, frameworks: bool = False, ) -> LangConfig: """Build and register a generic language plugin from tool specs. @@ -82,6 +84,8 @@ def generic_lang( zone_rules=zone_rules, test_coverage_module=test_coverage_module, entry_patterns=entry_patterns, + external_test_dirs=external_test_dirs, + test_file_extensions=test_file_extensions, ) from desloppify.languages import register_generic_lang @@ -123,8 +127,8 @@ def generic_lang( complexity_threshold=15, default_scan_profile="objective", detect_markers=opts.detect_markers or [], - external_test_dirs=["tests", "test"], - test_file_extensions=extensions, + external_test_dirs=opts.external_test_dirs if opts.external_test_dirs is not None else ["tests", "test"], + test_file_extensions=opts.test_file_extensions if opts.test_file_extensions is not None else extensions, zone_rules=opts.zone_rules if opts.zone_rules is not None else generic_zone_rules(extensions), ) diff --git a/desloppify/languages/_framework/generic_support/registration.py b/desloppify/languages/_framework/generic_support/registration.py index feb244483..85c6d3d0f 100644 --- a/desloppify/languages/_framework/generic_support/registration.py +++ b/desloppify/languages/_framework/generic_support/registration.py @@ -39,6 +39,8 @@ class GenericLangOptions: zone_rules: list[ZoneRule] | None = None test_coverage_module: Any | None = None entry_patterns: list[str] | None = None + external_test_dirs: list[str] | None = None + test_file_extensions: list[str] | None = None def _register_generic_tool_specs(tool_specs: list[dict[str, Any]]) -> dict[str, FixerConfig]: diff --git a/desloppify/languages/ruby/README.md b/desloppify/languages/ruby/README.md new file mode 100644 index 000000000..d15d6e2a7 --- /dev/null +++ b/desloppify/languages/ruby/README.md @@ -0,0 +1,66 @@ +# Ruby Language Plugin for Desloppify + +Static analysis for Ruby codebases via RuboCop, plus structural, coupling, and +duplication analysis powered by tree-sitter — no additional tools required for +most checks. + +## Supported extensions + +`.rb` + +## Requirements + +- [`rubocop`](https://rubocop.org/) on `PATH` — required for the **RuboCop** phase + +Install: + +```bash +gem install rubocop +``` + +## Project detection + +Activates on projects containing any of: `Gemfile`, `Rakefile`, `.ruby-version`, +or any `*.gemspec` file. + +## Usage + +```bash +# Scan for issues +desloppify scan --path + +# Full scan with all phases +desloppify scan --path --profile full + +# Auto-correct RuboCop offenses +desloppify fix --path +``` + +## What gets analysed + +| Phase | What it finds | +|-------|--------------| +| RuboCop | Style violations, layout issues, lint warnings | +| Structural analysis | God classes, large files, complexity hotspots | +| Coupling + cycles + orphaned | Import cycles, tight coupling, unreachable files | +| Duplicates | Copy-pasted methods across the codebase | +| Signature | Methods with overly broad or inconsistent signatures | +| Code smells | Empty rescue blocks, unreachable code, and more | +| Security | Common security anti-patterns | +| Subjective review | LLM-powered design and responsibility review | + +## Autofix + +RuboCop's `--auto-correct` is wired to `desloppify fix`. Only offenses that +RuboCop marks as safe to auto-correct will be changed. + +## Exclusions + +The following are excluded from analysis by default: + +- `vendor/` +- `.bundle/` +- `coverage/` +- `tmp/` +- `log/` +- `bin/` diff --git a/desloppify/languages/ruby/__init__.py b/desloppify/languages/ruby/__init__.py index 6f0e16ab0..88abdbdd9 100644 --- a/desloppify/languages/ruby/__init__.py +++ b/desloppify/languages/ruby/__init__.py @@ -1,4 +1,9 @@ -"""Ruby language plugin — rubocop.""" +"""Ruby language plugin — rubocop. + +Registers a generic desloppify language plugin for Ruby projects. RuboCop is +the sole external tool requirement; tree-sitter provides function/class +extraction for duplicate detection and import-graph analysis at no extra cost. +""" from desloppify.languages._framework.generic_support.core import generic_lang from desloppify.languages._framework.treesitter import RUBY_SPEC @@ -9,16 +14,37 @@ tools=[ { "label": "rubocop", + # JSON output is stable across RuboCop versions and machine-parseable. "cmd": "rubocop --format=json", "fmt": "rubocop", "id": "rubocop_offense", + # Tier 2 = non-blocking advisory finding (not a hard error). "tier": 2, + # --auto-correct applies only offenses RuboCop marks as safe. "fix_cmd": "rubocop --auto-correct", }, ], - exclude=["vendor"], + exclude=[ + "vendor", # Bundled third-party gems (vendored dependencies) + ".bundle", # Bundler cache directory — not project source + "coverage", # SimpleCov / test coverage output + "tmp", # Rails/Rack temp files (cache, pids, sockets) + "log", # Application log files + "bin", # Binstubs and shims + ], + # "shallow" depth is upgraded to "standard" automatically when tree-sitter + # is available (generic_support/core.py:131). No need to set "full" here. depth="shallow", - detect_markers=["Gemfile"], + # Ruby convention: library source lives in lib/, not the project root. + default_src="lib", + # Ruby convention: tests live in spec/ (RSpec) or test/ (Minitest). + external_test_dirs=["spec", "test"], + detect_markers=[ + "Gemfile", # Bundler dependency manifest — most Ruby projects + "Rakefile", # Build/task file — present even without Bundler + ".ruby-version", # rbenv/rvm version pin — reliable project marker + "*.gemspec", # Gem specification — present in library/gem projects + ], treesitter_spec=RUBY_SPEC, ) diff --git a/desloppify/languages/ruby/tests/__init__.py b/desloppify/languages/ruby/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/desloppify/languages/ruby/tests/test_init.py b/desloppify/languages/ruby/tests/test_init.py new file mode 100644 index 000000000..abdbe230e --- /dev/null +++ b/desloppify/languages/ruby/tests/test_init.py @@ -0,0 +1,116 @@ +"""Tests for ``desloppify.languages.ruby`` configuration wiring. + +These tests verify that the Ruby plugin is wired correctly — they do NOT run +RuboCop or any external tool. They are pure in-process checks of the LangConfig +object that ``generic_lang`` produces. +""" + +from __future__ import annotations + +import pytest + +from desloppify.languages import get_lang + + +# --------------------------------------------------------------------------- +# Shared fixture +# --------------------------------------------------------------------------- + +@pytest.fixture(scope="module") +def ruby_cfg(): + """Return the registered Ruby LangConfig (loaded once per test session).""" + return get_lang("ruby") + + +# --------------------------------------------------------------------------- +# Basic identity +# --------------------------------------------------------------------------- + +def test_config_name(ruby_cfg): + assert ruby_cfg.name == "ruby" + + +def test_config_extensions(ruby_cfg): + assert ruby_cfg.extensions == [".rb"] + + +def test_default_src_is_lib(ruby_cfg): + """Ruby convention: library source lives in lib/, not the project root.""" + assert ruby_cfg.default_src == "lib" + + +# --------------------------------------------------------------------------- +# Project detection markers +# --------------------------------------------------------------------------- + +@pytest.mark.parametrize("marker", [ + "Gemfile", # Bundler manifest + "Rakefile", # Build/task file + ".ruby-version", # rbenv/rvm version pin + "*.gemspec", # Gem specification (glob — supported by resolution.py) +]) +def test_detect_markers_present(ruby_cfg, marker): + assert marker in ruby_cfg.detect_markers, ( + f"Expected detect_marker {marker!r} to be registered" + ) + + +# --------------------------------------------------------------------------- +# Tool wiring +# --------------------------------------------------------------------------- + +def test_rubocop_detect_command_registered(ruby_cfg): + """rubocop_offense must be present so the RuboCop phase can run.""" + assert "rubocop_offense" in ruby_cfg.detect_commands + + +# --------------------------------------------------------------------------- +# Phases +# --------------------------------------------------------------------------- + +@pytest.mark.parametrize("label", [ + "Structural analysis", + "Coupling + cycles + orphaned", + "rubocop", +]) +def test_has_required_phases(ruby_cfg, label): + labels = {p.label for p in ruby_cfg.phases} + assert label in labels, f"Expected phase {label!r} to be present" + + +# --------------------------------------------------------------------------- +# File finder exclusions +# --------------------------------------------------------------------------- + +_EXCLUDED_DIRS = [".bundle", "coverage", "tmp", "log", "vendor", "bin"] + + +def test_file_finder_skips_excluded_dirs(tmp_path, ruby_cfg): + """Files inside excluded directories must not appear in scan results.""" + # Create one legitimate source file. + (tmp_path / "lib").mkdir() + (tmp_path / "lib" / "app.rb").write_text("class App; end\n") + + # Create an .rb file inside each excluded directory. + for name in _EXCLUDED_DIRS: + d = tmp_path / name + d.mkdir(exist_ok=True) + (d / "noise.rb").write_text("# should be ignored\n") + + from desloppify.base.runtime_state import RuntimeContext, runtime_scope + from desloppify.base.discovery.source import clear_source_file_cache_for_tests + + ctx = RuntimeContext(project_root=tmp_path) + with runtime_scope(ctx): + clear_source_file_cache_for_tests() + files = ruby_cfg.file_finder(tmp_path) + + assert files == ["lib/app.rb"], ( + f"Expected only lib/app.rb but got: {files}" + ) + + +def test_external_test_dirs_includes_spec(ruby_cfg): + """Ruby projects frequently use spec/ for RSpec tests.""" + assert "spec" in ruby_cfg.external_test_dirs + assert "test" in ruby_cfg.external_test_dirs From cba7a924fb24b8901a194b54f5afd0d826a094d7 Mon Sep 17 00:00:00 2001 From: Maximilian Scholz Date: Mon, 16 Mar 2026 22:31:19 +0100 Subject: [PATCH 38/43] feat: add Factory Droid skill harness support (#451) - Add 'droid' to SKILL_TARGETS (.factory/skills/desloppify/SKILL.md) - Add .factory/skills/ to SKILL_SEARCH_PATHS for auto-discovery - Create docs/DROID.md overlay with review and triage workflow - Bump SKILL_VERSION to 6 - Add droid to README agent prompt harness list --- README.md | 2 +- desloppify/app/skill_docs.py | 2 ++ docs/DROID.md | 39 ++++++++++++++++++++++++++++++++++++ 3 files changed, 42 insertions(+), 1 deletion(-) create mode 100644 docs/DROID.md diff --git a/README.md b/README.md index 8efe293cb..dbeadbe3c 100644 --- a/README.md +++ b/README.md @@ -23,7 +23,7 @@ I want you to improve the quality of this codebase. To do this, install and run Run ALL of the following (requires Python 3.11+): pip install --upgrade "desloppify[full]" -desloppify update-skill claude # installs the full workflow guide — pick yours: claude, cursor, codex, copilot, windsurf, gemini +desloppify update-skill claude # installs the full workflow guide — pick yours: claude, cursor, codex, copilot, droid, windsurf, gemini Add .desloppify/ to your .gitignore — it contains local state that shouldn't be committed. diff --git a/desloppify/app/skill_docs.py b/desloppify/app/skill_docs.py index ec75862ed..0c0b9a819 100644 --- a/desloppify/app/skill_docs.py +++ b/desloppify/app/skill_docs.py @@ -19,6 +19,7 @@ # Locations where the skill doc might be installed, relative to project root. SKILL_SEARCH_PATHS = ( + ".factory/skills/desloppify/SKILL.md", ".agents/skills/desloppify/SKILL.md", ".claude/skills/desloppify/SKILL.md", ".opencode/skills/desloppify/SKILL.md", @@ -38,6 +39,7 @@ "codex": (".agents/skills/desloppify/SKILL.md", "CODEX", True), "cursor": (".cursor/rules/desloppify.md", "CURSOR", True), "copilot": (".github/copilot-instructions.md", "COPILOT", False), + "droid": (".factory/skills/desloppify/SKILL.md", "DROID", True), "windsurf": ("AGENTS.md", "WINDSURF", False), "gemini": ("AGENTS.md", "GEMINI", False), "hermes": ("AGENTS.md", "HERMES", False), diff --git a/docs/DROID.md b/docs/DROID.md new file mode 100644 index 000000000..b2677967e --- /dev/null +++ b/docs/DROID.md @@ -0,0 +1,39 @@ +## Droid Overlay + +Droid skills are installed at `.factory/skills/desloppify/SKILL.md`. Droid +automatically discovers and invokes skills based on the task context, +or you can invoke directly with `/desloppify`. + +### Subagents + +Droid supports custom droids (subagents) for parallel work. Use the `worker` +droid for delegating independent tasks — it inherits the project's tool +configuration and is ideal for parallel smell detection, file analysis, +or cross-cutting refactoring work. + +### Review workflow + +1. Run `desloppify review --prepare` to generate `query.json` and + `.desloppify/review_packet_blind.json`. +2. Split dimensions into 3-4 batches by theme. +3. For each batch, launch a worker subagent: + ``` + Task("worker", "review batch 1", + prompt="Score these dimensions: . Read .desloppify/review_packet_blind.json. Score from code evidence only. Write results to review_batch_1.json.") + ``` +4. Merge assessments (average overlapping scores) and concatenate findings. +5. Import: `desloppify review --import merged.json --manual-override --attest "Worker subagents ran blind reviews" --scan-after-import`. + +Each worker must consume `.desloppify/review_packet_blind.json` (not full +`query.json`) to avoid score anchoring. + +### Triage workflow + +1. For each stage (observe → reflect → organize → enrich): + - Get prompt: `desloppify plan triage --stage-prompt ` + - Launch worker with that prompt. + - Confirm: `desloppify plan triage --confirm --attestation "..."` +2. Complete: `desloppify plan triage --complete --strategy "..." --attestation "..."` + + + From 0fd0d94a2ddde4cb81b10e43958209a11bd767c3 Mon Sep 17 00:00:00 2001 From: Klaus Agnoletti <24544601+klausagnoletti@users.noreply.github.com> Date: Mon, 16 Mar 2026 22:31:24 +0100 Subject: [PATCH 39/43] docs(python): add user-facing section to README (#459) Co-authored-by: Claude Sonnet 4.6 --- desloppify/languages/python/README.md | 67 +++++++++++++++++++++++++++ 1 file changed, 67 insertions(+) diff --git a/desloppify/languages/python/README.md b/desloppify/languages/python/README.md index c0aec65e4..856fe2107 100644 --- a/desloppify/languages/python/README.md +++ b/desloppify/languages/python/README.md @@ -1,3 +1,70 @@ +# Python Language Plugin for Desloppify + +Provides in-depth static analysis for Python codebases — no external linter required for most checks. + +## Supported extensions + +`.py` + +## Requirements + +- Python 3.11+ +- [`ruff`](https://docs.astral.sh/ruff/) on `PATH` — required for the **Unused** phase (unused imports, variables) +- [`bandit`](https://bandit.readthedocs.io/) on `PATH` — optional; enables the **Security** phase + +Install optional tools: + +```bash +pip install ruff bandit +``` + +## Project detection + +Activates on projects containing any of: `pyproject.toml`, `setup.py`, `setup.cfg`. + +## Usage + +```bash +# Scan for issues +desloppify scan --path + +# Full scan with all phases +desloppify scan --path --profile full +``` + +Autofix is **not** supported for Python — all findings are reported only. + +## What gets analysed + +| Phase | What it finds | +|-------|--------------| +| Unused (ruff) | Unused imports and variables | +| Structural analysis | God classes, large files, complexity hotspots | +| Responsibility cohesion | Modules/classes that do too many unrelated things | +| Coupling + cycles + orphaned | Import cycles, tight coupling, unreachable modules | +| Uncalled functions | Dead code — functions never called within the project | +| Test coverage | Functions/classes with no corresponding tests | +| Signature | Overly broad signatures (`*args`, `**kwargs` misuse) | +| Code smells | Swallowed exceptions, mutable defaults, bare excepts, and more | +| Mutable state | Mutable class-level defaults and module-level shared state | +| Security | Common security issues via bandit (requires bandit) | +| Private imports | Cross-module access to private (`_`-prefixed) internals | +| Layer violations | Higher-level modules importing from lower-level domains | +| Dict key flow | Inconsistent dictionary schemas across call sites | +| Unused enums | Enum members defined but never referenced | + +## Exclusions + +The following are excluded from analysis by default: + +- `__pycache__` +- `.venv` +- `node_modules` +- `.eggs` +- `*.egg-info` + +--- + ## Python Plugin Maintainer Notes ### AST smell detector layout From 8502d05070f24bf82ed867059ecdb0a319e92b34 Mon Sep 17 00:00:00 2001 From: Klaus Agnoletti <24544601+klausagnoletti@users.noreply.github.com> Date: Mon, 16 Mar 2026 22:31:30 +0100 Subject: [PATCH 40/43] feat(javascript): add plugin tests and documentation (#458) Co-authored-by: Gemini Co-authored-by: Claude Sonnet 4.6 --- desloppify/languages/javascript/README.md | 38 ++++++ .../languages/javascript/tests/__init__.py | 0 .../languages/javascript/tests/test_init.py | 113 ++++++++++++++++++ pyproject.toml | 2 +- 4 files changed, 152 insertions(+), 1 deletion(-) create mode 100644 desloppify/languages/javascript/README.md create mode 100644 desloppify/languages/javascript/tests/__init__.py create mode 100644 desloppify/languages/javascript/tests/test_init.py diff --git a/desloppify/languages/javascript/README.md b/desloppify/languages/javascript/README.md new file mode 100644 index 000000000..3b0a04d53 --- /dev/null +++ b/desloppify/languages/javascript/README.md @@ -0,0 +1,38 @@ +# JavaScript Language Plugin for Desloppify + +Provides JavaScript/JSX analysis via ESLint. + +## Supported extensions + +`.js`, `.jsx`, `.mjs`, `.cjs` + +## Requirements + +- Node.js with npm/npx available on `PATH` +- ESLint installed in the project: `npm install --save-dev eslint` + +## Project detection + +Activates on projects containing a `package.json` file. + +## Usage + +```bash +# Scan for issues +desloppify scan --path + +# Scan and auto-fix +desloppify scan --path --fix +``` + +Autofix is supported — ESLint's `--fix` flag is used to apply safe automatic corrections. + +## Exclusions + +The following directories are excluded from analysis: + +- `node_modules` +- `dist` +- `build` +- `.next` +- `coverage` diff --git a/desloppify/languages/javascript/tests/__init__.py b/desloppify/languages/javascript/tests/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/desloppify/languages/javascript/tests/test_init.py b/desloppify/languages/javascript/tests/test_init.py new file mode 100644 index 000000000..c36013c1a --- /dev/null +++ b/desloppify/languages/javascript/tests/test_init.py @@ -0,0 +1,113 @@ +"""Sanity tests for the JavaScript language plugin. + +These tests verify that the generic_lang() registration in +desloppify/languages/javascript/__init__.py produces a valid LangConfig +and that its ESLint integration is wired correctly. + +None of these tests require ESLint or Node.js to be installed; they exercise +the plugin metadata and the pure-Python parser in isolation. +""" + +from __future__ import annotations + +from pathlib import Path + +import pytest + +from desloppify.languages import get_lang +from desloppify.languages._framework.generic_parts.parsers import parse_eslint + + +@pytest.fixture(scope="module") +def cfg(): + """Return the registered LangConfig for JavaScript. + + Scoped to the module so the plugin is loaded once across all tests + in this file; generic_lang() is idempotent but the round-trip through + the registry adds a small cost on repeated calls. + """ + return get_lang("javascript") + + +def test_config_name(cfg): + """Plugin must register under the canonical 'javascript' key.""" + assert cfg.name == "javascript" + + +@pytest.mark.parametrize("ext", [".js", ".jsx", ".mjs", ".cjs"]) +def test_config_extensions(cfg, ext): + """All expected JavaScript file extensions must be present.""" + assert ext in cfg.extensions + + +def test_detect_markers(cfg): + """plugin.json must be listed as a detect marker.""" + assert "package.json" in cfg.detect_markers + + +def test_detect_commands_non_empty(cfg): + """At least one detect command must be registered (eslint_warning).""" + assert cfg.detect_commands, "expected at least one detect command" + + +def test_has_eslint_phase(cfg): + """A phase labelled 'ESLint' must be present in the plugin's phase list.""" + labels = {p.label for p in cfg.phases} + assert "ESLint" in labels, f"ESLint phase missing; found: {labels}" + + +def test_exclusions(cfg): + """node_modules and dist must be in the exclusions list.""" + assert "node_modules" in cfg.exclusions + assert "dist" in cfg.exclusions + + +def test_command_has_no_placeholder(cfg): + """The eslint command must not contain a {file_path} template placeholder. + + run_tool_result() passes the command to resolve_command_argv() which does + NOT perform string substitution — a leftover placeholder would be passed + verbatim to the shell and produce zero results silently. + + Closure inspection is used so the test does not depend on string-matching + the source code; it reads the *actual* value captured at registration time. + """ + detect_fn = cfg.detect_commands["eslint_warning"] + freevars = detect_fn.__code__.co_freevars + cmd: str = detect_fn.__closure__[freevars.index("cmd")].cell_contents + assert "{file_path}" not in cmd, ( + f"command contains {{file_path}} placeholder which will not be substituted: {cmd!r}" + ) + + +def test_fix_cmd_registered(cfg): + """JavaScript supports autofix — at least one fixer must be registered.""" + assert cfg.fixers, "expected at least one fixer (fix_cmd) to be registered for JavaScript" + + +def test_parsing_eslint_format(): + """Verify that ESLint JSON output is parsed correctly. + + ESLint JSON format emits a list of file objects, each with a ``filePath`` + and a ``messages`` list containing ``line`` and ``message`` fields. + + Two representative entries are used — one warning and one unused-variable + notice — and the summary-less JSON must be handled without error. + """ + output = ( + '[{"filePath": "/project/src/app.js", ' + '"messages": [{"line": 5, "message": "Unexpected var."}]}, ' + '{"filePath": "/project/lib/utils.js", ' + '"messages": [{"line": 12, "message": "\'x\' is defined but never used."}]}]' + ) + entries = parse_eslint(output, Path(".")) + + assert len(entries) == 2, f"expected 2 parsed entries, got {len(entries)}: {entries}" + + assert entries[0]["file"] == "/project/src/app.js" + assert entries[0]["line"] == 5 + assert "Unexpected var" in entries[0]["message"] + + assert entries[1]["file"] == "/project/lib/utils.js" + assert entries[1]["line"] == 12 + assert "defined but never used" in entries[1]["message"] diff --git a/pyproject.toml b/pyproject.toml index 668bb480c..ca52c6c1c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,7 +74,7 @@ include-package-data = false [tool.pytest.ini_options] pythonpath = ["."] -testpaths = ['desloppify/tests', 'desloppify/languages/python/tests', 'desloppify/languages/typescript/tests', 'desloppify/languages/csharp/tests', 'desloppify/languages/cxx/tests', 'desloppify/languages/dart/tests', 'desloppify/languages/gdscript/tests', 'desloppify/languages/go/tests', 'desloppify/languages/rust/tests'] +testpaths = ['desloppify/tests', 'desloppify/languages/python/tests', 'desloppify/languages/typescript/tests', 'desloppify/languages/csharp/tests', 'desloppify/languages/cxx/tests', 'desloppify/languages/dart/tests', 'desloppify/languages/gdscript/tests', 'desloppify/languages/go/tests', 'desloppify/languages/rust/tests', 'desloppify/languages/javascript/tests'] norecursedirs = ["desloppify/tests/fixtures"] [tool.ruff] From f2178ac09fe4a72c4c379dff3073facfefe07ced Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 22:36:05 +0100 Subject: [PATCH 41/43] fix(docs): correct autofix command in Ruby and JS plugin READMEs The command is `desloppify autofix`, not `desloppify fix` or `desloppify scan --fix`. Co-Authored-By: Claude Opus 4.6 (1M context) --- desloppify/languages/javascript/README.md | 6 +++--- desloppify/languages/ruby/README.md | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/desloppify/languages/javascript/README.md b/desloppify/languages/javascript/README.md index 3b0a04d53..14c1d0b33 100644 --- a/desloppify/languages/javascript/README.md +++ b/desloppify/languages/javascript/README.md @@ -21,11 +21,11 @@ Activates on projects containing a `package.json` file. # Scan for issues desloppify scan --path -# Scan and auto-fix -desloppify scan --path --fix +# Auto-fix ESLint issues +desloppify autofix --path ``` -Autofix is supported — ESLint's `--fix` flag is used to apply safe automatic corrections. +Autofix is supported — ESLint's `--fix` flag is used via `desloppify autofix` to apply safe automatic corrections. ## Exclusions diff --git a/desloppify/languages/ruby/README.md b/desloppify/languages/ruby/README.md index d15d6e2a7..fdd3f35b5 100644 --- a/desloppify/languages/ruby/README.md +++ b/desloppify/languages/ruby/README.md @@ -33,7 +33,7 @@ desloppify scan --path desloppify scan --path --profile full # Auto-correct RuboCop offenses -desloppify fix --path +desloppify autofix --path ``` ## What gets analysed @@ -51,7 +51,7 @@ desloppify fix --path ## Autofix -RuboCop's `--auto-correct` is wired to `desloppify fix`. Only offenses that +RuboCop's `--auto-correct` is wired to `desloppify autofix`. Only offenses that RuboCop marks as safe to auto-correct will be changed. ## Exclusions From e2cd0dcd571a06892b9be2ee24726adafaf54a30 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 22:41:12 +0100 Subject: [PATCH 42/43] docs: update release notes with late-merged PRs and stats Add #458, #459, #462 contributions from klausagnoletti. Update stats to reflect final commit/file/test counts. Co-Authored-By: Claude Opus 4.6 (1M context) --- RELEASE_NOTES_0.9.10.md | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/RELEASE_NOTES_0.9.10.md b/RELEASE_NOTES_0.9.10.md index e1bffe924..e47817b06 100644 --- a/RELEASE_NOTES_0.9.10.md +++ b/RELEASE_NOTES_0.9.10.md @@ -6,7 +6,7 @@ This release adds **experimental Hermes Agent integration** for fully autonomous --- -**141 files changed | 47 commits | 5,438 tests passing** +**152 files changed | 54 commits | 5,466 tests passing** ## Hermes Agent Integration (Experimental) @@ -68,6 +68,14 @@ Massive contribution from **@MacHatter1** (PR #414). A new `FrameworkSpec` abstr Thanks to **@klausagnoletti** for adding SCSS/Sass support via stylelint integration (PR #428). Detects code smells, unused variables, and style issues in `.scss` and `.sass` files. @klausagnoletti has also submitted a follow-up PR (#452) with bug fixes, tests, and honest documentation — expected to land shortly after release. +## Plugin Tests, Docs, and Ruby Improvements + +**@klausagnoletti** also contributed across multiple language plugins: + +- **Ruby plugin improvements** (PR #462) — expanded exclusions, detect markers (`Gemfile`, `Rakefile`, `.ruby-version`, `*.gemspec`), `default_src="lib"`, `spec/` + `test/` support, and 13 wiring tests. Also adds `external_test_dirs` and `test_file_extensions` params to the generic plugin framework. +- **JavaScript plugin tests + README** (PR #458) — 12 sanity tests covering ESLint integration, command construction, fixer registration, and output parsing. +- **Python plugin README** (PR #459) — user-facing documentation covering phases, requirements, and usage. + ## R Language Improvements **@sims1253** has been steadily building out R support and contributed four PRs to this release: @@ -112,7 +120,7 @@ This release wouldn't exist without the community. Seriously — thank you all. **@sims1253** has been the driving force behind R language support, with four PRs spanning linting, tree-sitter queries, and harness support. The R plugin is becoming genuinely useful thanks to this sustained effort. -**@klausagnoletti** added SCSS support and has a follow-up PR (#452) with bug fixes and honest documentation — replacing a hallucinated 455-line README with an accurate 31-line one. The kind of contributor who makes the codebase more trustworthy. +**@klausagnoletti** added SCSS support, improved the Ruby plugin, and contributed tests and documentation for JavaScript and Python plugins — seven PRs total (#428, #452, #457, #458, #459, #462). The kind of contributor who makes the codebase more trustworthy across the board. **@cdunda-perchwell** fixed two separate workflow re-injection bugs that were causing phantom plan items. **@nickperkins** shipped a clean PHPStan parser fix. From 7a835d4df234d2b973dc919efca4ba58d21bcc78 Mon Sep 17 00:00:00 2001 From: POM Date: Mon, 16 Mar 2026 22:47:59 +0100 Subject: [PATCH 43/43] fix(scss): replace {file_path} placeholders with glob patterns and use unix formatter The tool runner does not substitute {file_path} placeholders, so stylelint was receiving literal "{file_path}" and failing silently. Switch to glob patterns (matching every other plugin) and use --formatter unix with the gnu parser, since stylelint's JSON output doesn't match the expected json parser format. Based on findings from @klausagnoletti in PR #452. Co-Authored-By: Claude Opus 4.6 (1M context) --- desloppify/languages/scss/__init__.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/desloppify/languages/scss/__init__.py b/desloppify/languages/scss/__init__.py index ce0e377ac..149409a23 100644 --- a/desloppify/languages/scss/__init__.py +++ b/desloppify/languages/scss/__init__.py @@ -8,11 +8,11 @@ tools=[ { "label": "stylelint", - "cmd": "stylelint {file_path} --formatter json --max-warnings 1000", - "fmt": "json", + "cmd": "stylelint '**/*.scss' '**/*.sass' --formatter unix --max-warnings 1000", + "fmt": "gnu", "id": "stylelint_issue", "tier": 2, - "fix_cmd": "stylelint --fix {file_path}", + "fix_cmd": "stylelint --fix '**/*.scss' '**/*.sass'", }, ], exclude=["node_modules", "_output", ".quarto", "vendor"],