From f21427c0d56e52f160c08e20d2620f19d04d4c08 Mon Sep 17 00:00:00 2001 From: Alan Buscaglia Date: Fri, 9 Jan 2026 23:06:31 +0100 Subject: [PATCH 01/19] feat(skills): sync AGENTS.md to AI-specific formats - setup.sh now copies AGENTS.md to CLAUDE.md, GEMINI.md, and .github/copilot-instructions.md for each AI assistant - Added Auto-invoke Skills section to AGENTS.md to trigger skills automatically when performing specific actions - Updated .gitignore to ignore generated instruction files AGENTS.md remains the source of truth. Edit it and re-run the script to sync all AI assistants. --- .gitignore | 4 ++- AGENTS.md | 15 +++++++++++ skills/setup.sh | 67 +++++++++++++++++++++++++++++++++++++++---------- 3 files changed, 72 insertions(+), 14 deletions(-) diff --git a/.gitignore b/.gitignore index ffad8a7d37..4dfb137b89 100644 --- a/.gitignore +++ b/.gitignore @@ -150,8 +150,10 @@ node_modules # Persistent data _data/ -# Claude +# AI Instructions (generated by skills/setup.sh from AGENTS.md) CLAUDE.md +GEMINI.md +.github/copilot-instructions.md # Compliance report *.pdf diff --git a/AGENTS.md b/AGENTS.md index a982b82fae..ebda82660e 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -41,6 +41,21 @@ Use these skills for detailed patterns on-demand: | `prowler-docs` | Documentation style guide | [SKILL.md](skills/prowler-docs/SKILL.md) | | `skill-creator` | Create new AI agent skills | [SKILL.md](skills/skill-creator/SKILL.md) | +### Auto-invoke Skills + +When performing these actions, ALWAYS invoke the corresponding skill FIRST: + +| Action | Skill | +|--------|-------| +| Creating a PR | `prowler-pr` | +| Writing SDK tests | `prowler-test-sdk` | +| Writing API tests | `prowler-test-api` | +| Writing UI/E2E tests | `prowler-test-ui` | +| Creating a new check | `prowler-sdk-check` | +| Adding a new provider | `prowler-provider` | +| Writing documentation | `prowler-docs` | +| Adding compliance framework | `prowler-compliance` | + --- ## Project Overview diff --git a/skills/setup.sh b/skills/setup.sh index 8f0fc57197..2af1c26806 100755 --- a/skills/setup.sh +++ b/skills/setup.sh @@ -1,10 +1,15 @@ #!/bin/bash # Setup AI Skills for Prowler development # Configures AI coding assistants that follow agentskills.io standard: -# - Claude Code: .claude/skills/ symlink (auto-discovery) -# - Gemini CLI: .gemini/skills/ symlink (auto-discovery) -# - Codex (OpenAI): .codex/skills/ symlink + AGENTS.md -# - GitHub Copilot: reads AGENTS.md from repo root (no symlink needed) +# - Claude Code: .claude/skills/ symlink + CLAUDE.md copies +# - Gemini CLI: .gemini/skills/ symlink + GEMINI.md copies +# - Codex (OpenAI): .codex/skills/ symlink + AGENTS.md (native) +# - GitHub Copilot: .github/copilot-instructions.md copy +# +# AGENTS.md is the source of truth. This script copies it to: +# - CLAUDE.md (for Claude Code) +# - GEMINI.md (for Gemini CLI) +# - .github/copilot-instructions.md (for GitHub Copilot, root only) set -e @@ -42,7 +47,7 @@ echo "" # ============================================================================= # CLAUDE CODE SETUP (.claude/skills symlink - auto-discovery) # ============================================================================= -echo -e "${YELLOW}[1/3] Setting up Claude Code...${NC}" +echo -e "${YELLOW}[1/4] Setting up Claude Code...${NC}" if [ ! -d "$REPO_ROOT/.claude" ]; then mkdir -p "$REPO_ROOT/.claude" @@ -60,7 +65,7 @@ echo -e "${GREEN} ✓ .claude/skills -> skills/${NC}" # ============================================================================= # CODEX (OPENAI) SETUP (.codex/skills symlink) # ============================================================================= -echo -e "${YELLOW}[2/3] Setting up Codex (OpenAI)...${NC}" +echo -e "${YELLOW}[2/4] Setting up Codex (OpenAI)...${NC}" if [ ! -d "$REPO_ROOT/.codex" ]; then mkdir -p "$REPO_ROOT/.codex" @@ -78,7 +83,7 @@ echo -e "${GREEN} ✓ .codex/skills -> skills/${NC}" # ============================================================================= # GEMINI CLI SETUP (.gemini/skills symlink - auto-discovery) # ============================================================================= -echo -e "${YELLOW}[3/3] Setting up Gemini CLI...${NC}" +echo -e "${YELLOW}[3/4] Setting up Gemini CLI...${NC}" if [ ! -d "$REPO_ROOT/.gemini" ]; then mkdir -p "$REPO_ROOT/.gemini" @@ -93,6 +98,43 @@ fi ln -s "$SKILLS_SOURCE" "$GEMINI_SKILLS_TARGET" echo -e "${GREEN} ✓ .gemini/skills -> skills/${NC}" +# ============================================================================= +# COPY AGENTS.md TO AI-SPECIFIC FORMATS +# ============================================================================= +echo -e "${YELLOW}[4/4] Copying AGENTS.md to AI-specific formats...${NC}" + +# Find all AGENTS.md files in the repository +AGENTS_FILES=$(find "$REPO_ROOT" -name "AGENTS.md" -not -path "*/node_modules/*" -not -path "*/.git/*" 2>/dev/null) +AGENTS_COUNT=0 + +for AGENTS_FILE in $AGENTS_FILES; do + AGENTS_DIR=$(dirname "$AGENTS_FILE") + + # Copy to CLAUDE.md (same directory) + cp "$AGENTS_FILE" "$AGENTS_DIR/CLAUDE.md" + + # Copy to GEMINI.md (same directory) + cp "$AGENTS_FILE" "$AGENTS_DIR/GEMINI.md" + + # Get relative path for display + REL_PATH="${AGENTS_DIR#"$REPO_ROOT"/}" + if [ "$AGENTS_DIR" = "$REPO_ROOT" ]; then + REL_PATH="(root)" + fi + + echo -e "${GREEN} ✓ $REL_PATH/AGENTS.md -> CLAUDE.md, GEMINI.md${NC}" + AGENTS_COUNT=$((AGENTS_COUNT + 1)) +done + +# Copy root AGENTS.md to .github/copilot-instructions.md (GitHub Copilot) +if [ -f "$REPO_ROOT/AGENTS.md" ]; then + mkdir -p "$REPO_ROOT/.github" + cp "$REPO_ROOT/AGENTS.md" "$REPO_ROOT/.github/copilot-instructions.md" + echo -e "${GREEN} ✓ AGENTS.md -> .github/copilot-instructions.md (Copilot)${NC}" +fi + +echo -e "${BLUE} Copied $AGENTS_COUNT AGENTS.md file(s)${NC}" + # ============================================================================= # SUMMARY # ============================================================================= @@ -100,10 +142,10 @@ echo "" echo -e "${GREEN}✅ Successfully configured $SKILL_COUNT AI skills!${NC}" echo "" echo "Configuration created:" -echo " • Claude Code: .claude/skills/ (symlink, auto-discovery)" -echo " • Codex (OpenAI): .codex/skills/ (symlink, reads AGENTS.md)" -echo " • Gemini CLI: .gemini/skills/ (symlink, auto-discovery)" -echo " • GitHub Copilot: reads AGENTS.md from repo root (no setup needed)" +echo " • Claude Code: .claude/skills/ + CLAUDE.md copies" +echo " • Codex (OpenAI): .codex/skills/ + AGENTS.md (native)" +echo " • Gemini CLI: .gemini/skills/ + GEMINI.md copies" +echo " • GitHub Copilot: .github/copilot-instructions.md" echo "" echo "Available skills:" echo " Generic: typescript, react-19, nextjs-15, playwright, pytest," @@ -115,5 +157,4 @@ echo " prowler-test-sdk, prowler-compliance, prowler-docs," echo " prowler-provider, prowler-pr" echo "" echo -e "${BLUE}Note: Restart your AI coding assistant to load the skills.${NC}" -echo -e "${BLUE} Claude/Gemini auto-discover skills from SKILL.md descriptions.${NC}" -echo -e "${BLUE} Codex/Copilot use AGENTS.md instructions to reference skills.${NC}" +echo -e "${BLUE} AGENTS.md is the source of truth - edit it, then re-run this script.${NC}" From 1eac7d943652ce0cfc2a8c866aa1136c7884a480 Mon Sep 17 00:00:00 2001 From: Alan Buscaglia Date: Fri, 9 Jan 2026 23:35:10 +0100 Subject: [PATCH 02/19] feat(skills): add interactive menu and unit tests for setup.sh - Add interactive menu to select AI assistants (Claude, Gemini, Codex, Copilot) - Add CLI flags for non-interactive usage (--claude, --gemini, --codex, --copilot, --all) - Refactor setup logic into reusable functions - Add 19 unit tests covering flags, symlinks, AGENTS.md copying, and idempotency --- skills/setup.sh | 349 ++++++++++++++++++++++++++++++------------- skills/setup_test.sh | 340 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 587 insertions(+), 102 deletions(-) create mode 100755 skills/setup_test.sh diff --git a/skills/setup.sh b/skills/setup.sh index 2af1c26806..ec5512e8c5 100755 --- a/skills/setup.sh +++ b/skills/setup.sh @@ -6,10 +6,11 @@ # - Codex (OpenAI): .codex/skills/ symlink + AGENTS.md (native) # - GitHub Copilot: .github/copilot-instructions.md copy # -# AGENTS.md is the source of truth. This script copies it to: -# - CLAUDE.md (for Claude Code) -# - GEMINI.md (for Gemini CLI) -# - .github/copilot-instructions.md (for GitHub Copilot, root only) +# Usage: +# ./setup.sh # Interactive mode (select AI assistants) +# ./setup.sh --all # Configure all AI assistants +# ./setup.sh --claude # Configure only Claude Code +# ./setup.sh --claude --codex # Configure multiple set -e @@ -17,123 +18,276 @@ SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" REPO_ROOT="$(dirname "$SCRIPT_DIR")" SKILLS_SOURCE="$SCRIPT_DIR" -# Target locations -CLAUDE_SKILLS_TARGET="$REPO_ROOT/.claude/skills" -CODEX_SKILLS_TARGET="$REPO_ROOT/.codex/skills" -GEMINI_SKILLS_TARGET="$REPO_ROOT/.gemini/skills" - # Colors for output RED='\033[0;31m' GREEN='\033[0;32m' YELLOW='\033[1;33m' BLUE='\033[0;34m' +CYAN='\033[0;36m' +BOLD='\033[1m' NC='\033[0m' # No Color -echo "🤖 Prowler AI Skills Setup" -echo "==========================" -echo "" +# Selection flags +SETUP_CLAUDE=false +SETUP_GEMINI=false +SETUP_CODEX=false +SETUP_COPILOT=false -# Count skills (directories with SKILL.md) -SKILL_COUNT=$(find "$SKILLS_SOURCE" -maxdepth 2 -name "SKILL.md" | wc -l | tr -d ' ') +# ============================================================================= +# HELPER FUNCTIONS +# ============================================================================= -if [ "$SKILL_COUNT" -eq 0 ]; then - echo -e "${RED}No skills found in $SKILLS_SOURCE${NC}" - exit 1 -fi +show_help() { + echo "Usage: $0 [OPTIONS]" + echo "" + echo "Configure AI coding assistants for Prowler development." + echo "" + echo "Options:" + echo " --all Configure all AI assistants" + echo " --claude Configure Claude Code" + echo " --gemini Configure Gemini CLI" + echo " --codex Configure Codex (OpenAI)" + echo " --copilot Configure GitHub Copilot" + echo " --help Show this help message" + echo "" + echo "If no options provided, runs in interactive mode." + echo "" + echo "Examples:" + echo " $0 # Interactive selection" + echo " $0 --all # All AI assistants" + echo " $0 --claude --codex # Only Claude and Codex" +} -echo -e "${BLUE}Found $SKILL_COUNT skills to configure${NC}" -echo "" +show_menu() { + echo -e "${BOLD}Which AI assistants do you use?${NC}" + echo -e "${CYAN}(Use numbers to toggle, Enter to confirm)${NC}" + echo "" -# ============================================================================= -# CLAUDE CODE SETUP (.claude/skills symlink - auto-discovery) -# ============================================================================= -echo -e "${YELLOW}[1/4] Setting up Claude Code...${NC}" + local options=("Claude Code" "Gemini CLI" "Codex (OpenAI)" "GitHub Copilot") + local selected=(true false false false) # Claude selected by default -if [ ! -d "$REPO_ROOT/.claude" ]; then - mkdir -p "$REPO_ROOT/.claude" -fi + while true; do + for i in "${!options[@]}"; do + if [ "${selected[$i]}" = true ]; then + echo -e " ${GREEN}[x]${NC} $((i+1)). ${options[$i]}" + else + echo -e " [ ] $((i+1)). ${options[$i]}" + fi + done + echo "" + echo -e " ${YELLOW}a${NC}. Select all" + echo -e " ${YELLOW}n${NC}. Select none" + echo "" + echo -n "Toggle (1-4, a, n) or Enter to confirm: " -if [ -L "$CLAUDE_SKILLS_TARGET" ]; then - rm "$CLAUDE_SKILLS_TARGET" -elif [ -d "$CLAUDE_SKILLS_TARGET" ]; then - mv "$CLAUDE_SKILLS_TARGET" "$REPO_ROOT/.claude/skills.backup.$(date +%s)" -fi + read -r choice -ln -s "$SKILLS_SOURCE" "$CLAUDE_SKILLS_TARGET" -echo -e "${GREEN} ✓ .claude/skills -> skills/${NC}" + case $choice in + 1) selected[0]=$([ "${selected[0]}" = true ] && echo false || echo true) ;; + 2) selected[1]=$([ "${selected[1]}" = true ] && echo false || echo true) ;; + 3) selected[2]=$([ "${selected[2]}" = true ] && echo false || echo true) ;; + 4) selected[3]=$([ "${selected[3]}" = true ] && echo false || echo true) ;; + a|A) selected=(true true true true) ;; + n|N) selected=(false false false false) ;; + "") break ;; + *) echo -e "${RED}Invalid option${NC}" ;; + esac -# ============================================================================= -# CODEX (OPENAI) SETUP (.codex/skills symlink) -# ============================================================================= -echo -e "${YELLOW}[2/4] Setting up Codex (OpenAI)...${NC}" + # Move cursor up to redraw menu + echo -en "\033[10A\033[J" + done -if [ ! -d "$REPO_ROOT/.codex" ]; then - mkdir -p "$REPO_ROOT/.codex" -fi + SETUP_CLAUDE=${selected[0]} + SETUP_GEMINI=${selected[1]} + SETUP_CODEX=${selected[2]} + SETUP_COPILOT=${selected[3]} +} -if [ -L "$CODEX_SKILLS_TARGET" ]; then - rm "$CODEX_SKILLS_TARGET" -elif [ -d "$CODEX_SKILLS_TARGET" ]; then - mv "$CODEX_SKILLS_TARGET" "$REPO_ROOT/.codex/skills.backup.$(date +%s)" -fi +setup_claude() { + local target="$REPO_ROOT/.claude/skills" -ln -s "$SKILLS_SOURCE" "$CODEX_SKILLS_TARGET" -echo -e "${GREEN} ✓ .codex/skills -> skills/${NC}" + if [ ! -d "$REPO_ROOT/.claude" ]; then + mkdir -p "$REPO_ROOT/.claude" + fi -# ============================================================================= -# GEMINI CLI SETUP (.gemini/skills symlink - auto-discovery) -# ============================================================================= -echo -e "${YELLOW}[3/4] Setting up Gemini CLI...${NC}" + if [ -L "$target" ]; then + rm "$target" + elif [ -d "$target" ]; then + mv "$target" "$REPO_ROOT/.claude/skills.backup.$(date +%s)" + fi -if [ ! -d "$REPO_ROOT/.gemini" ]; then - mkdir -p "$REPO_ROOT/.gemini" -fi + ln -s "$SKILLS_SOURCE" "$target" + echo -e "${GREEN} ✓ .claude/skills -> skills/${NC}" -if [ -L "$GEMINI_SKILLS_TARGET" ]; then - rm "$GEMINI_SKILLS_TARGET" -elif [ -d "$GEMINI_SKILLS_TARGET" ]; then - mv "$GEMINI_SKILLS_TARGET" "$REPO_ROOT/.gemini/skills.backup.$(date +%s)" -fi + # Copy AGENTS.md to CLAUDE.md + copy_agents_md "CLAUDE.md" +} -ln -s "$SKILLS_SOURCE" "$GEMINI_SKILLS_TARGET" -echo -e "${GREEN} ✓ .gemini/skills -> skills/${NC}" +setup_gemini() { + local target="$REPO_ROOT/.gemini/skills" -# ============================================================================= -# COPY AGENTS.md TO AI-SPECIFIC FORMATS -# ============================================================================= -echo -e "${YELLOW}[4/4] Copying AGENTS.md to AI-specific formats...${NC}" + if [ ! -d "$REPO_ROOT/.gemini" ]; then + mkdir -p "$REPO_ROOT/.gemini" + fi + + if [ -L "$target" ]; then + rm "$target" + elif [ -d "$target" ]; then + mv "$target" "$REPO_ROOT/.gemini/skills.backup.$(date +%s)" + fi -# Find all AGENTS.md files in the repository -AGENTS_FILES=$(find "$REPO_ROOT" -name "AGENTS.md" -not -path "*/node_modules/*" -not -path "*/.git/*" 2>/dev/null) -AGENTS_COUNT=0 + ln -s "$SKILLS_SOURCE" "$target" + echo -e "${GREEN} ✓ .gemini/skills -> skills/${NC}" -for AGENTS_FILE in $AGENTS_FILES; do - AGENTS_DIR=$(dirname "$AGENTS_FILE") + # Copy AGENTS.md to GEMINI.md + copy_agents_md "GEMINI.md" +} - # Copy to CLAUDE.md (same directory) - cp "$AGENTS_FILE" "$AGENTS_DIR/CLAUDE.md" +setup_codex() { + local target="$REPO_ROOT/.codex/skills" + + if [ ! -d "$REPO_ROOT/.codex" ]; then + mkdir -p "$REPO_ROOT/.codex" + fi + + if [ -L "$target" ]; then + rm "$target" + elif [ -d "$target" ]; then + mv "$target" "$REPO_ROOT/.codex/skills.backup.$(date +%s)" + fi - # Copy to GEMINI.md (same directory) - cp "$AGENTS_FILE" "$AGENTS_DIR/GEMINI.md" + ln -s "$SKILLS_SOURCE" "$target" + echo -e "${GREEN} ✓ .codex/skills -> skills/${NC}" + echo -e "${GREEN} ✓ Codex uses AGENTS.md natively${NC}" +} - # Get relative path for display - REL_PATH="${AGENTS_DIR#"$REPO_ROOT"/}" - if [ "$AGENTS_DIR" = "$REPO_ROOT" ]; then - REL_PATH="(root)" +setup_copilot() { + if [ -f "$REPO_ROOT/AGENTS.md" ]; then + mkdir -p "$REPO_ROOT/.github" + cp "$REPO_ROOT/AGENTS.md" "$REPO_ROOT/.github/copilot-instructions.md" + echo -e "${GREEN} ✓ AGENTS.md -> .github/copilot-instructions.md${NC}" fi +} - echo -e "${GREEN} ✓ $REL_PATH/AGENTS.md -> CLAUDE.md, GEMINI.md${NC}" - AGENTS_COUNT=$((AGENTS_COUNT + 1)) +copy_agents_md() { + local target_name="$1" + local agents_files + local count=0 + + agents_files=$(find "$REPO_ROOT" -name "AGENTS.md" -not -path "*/node_modules/*" -not -path "*/.git/*" 2>/dev/null) + + for agents_file in $agents_files; do + local agents_dir + agents_dir=$(dirname "$agents_file") + cp "$agents_file" "$agents_dir/$target_name" + count=$((count + 1)) + done + + echo -e "${GREEN} ✓ Copied $count AGENTS.md -> $target_name${NC}" +} + +# ============================================================================= +# PARSE ARGUMENTS +# ============================================================================= + +while [[ $# -gt 0 ]]; do + case $1 in + --all) + SETUP_CLAUDE=true + SETUP_GEMINI=true + SETUP_CODEX=true + SETUP_COPILOT=true + shift + ;; + --claude) + SETUP_CLAUDE=true + shift + ;; + --gemini) + SETUP_GEMINI=true + shift + ;; + --codex) + SETUP_CODEX=true + shift + ;; + --copilot) + SETUP_COPILOT=true + shift + ;; + --help|-h) + show_help + exit 0 + ;; + *) + echo -e "${RED}Unknown option: $1${NC}" + show_help + exit 1 + ;; + esac done -# Copy root AGENTS.md to .github/copilot-instructions.md (GitHub Copilot) -if [ -f "$REPO_ROOT/AGENTS.md" ]; then - mkdir -p "$REPO_ROOT/.github" - cp "$REPO_ROOT/AGENTS.md" "$REPO_ROOT/.github/copilot-instructions.md" - echo -e "${GREEN} ✓ AGENTS.md -> .github/copilot-instructions.md (Copilot)${NC}" +# ============================================================================= +# MAIN +# ============================================================================= + +echo "🤖 Prowler AI Skills Setup" +echo "==========================" +echo "" + +# Count skills +SKILL_COUNT=$(find "$SKILLS_SOURCE" -maxdepth 2 -name "SKILL.md" | wc -l | tr -d ' ') + +if [ "$SKILL_COUNT" -eq 0 ]; then + echo -e "${RED}No skills found in $SKILLS_SOURCE${NC}" + exit 1 +fi + +echo -e "${BLUE}Found $SKILL_COUNT skills to configure${NC}" +echo "" + +# Interactive mode if no flags provided +if [ "$SETUP_CLAUDE" = false ] && [ "$SETUP_GEMINI" = false ] && [ "$SETUP_CODEX" = false ] && [ "$SETUP_COPILOT" = false ]; then + show_menu + echo "" +fi + +# Check if at least one selected +if [ "$SETUP_CLAUDE" = false ] && [ "$SETUP_GEMINI" = false ] && [ "$SETUP_CODEX" = false ] && [ "$SETUP_COPILOT" = false ]; then + echo -e "${YELLOW}No AI assistants selected. Nothing to do.${NC}" + exit 0 +fi + +# Run selected setups +STEP=1 +TOTAL=0 +[ "$SETUP_CLAUDE" = true ] && TOTAL=$((TOTAL + 1)) +[ "$SETUP_GEMINI" = true ] && TOTAL=$((TOTAL + 1)) +[ "$SETUP_CODEX" = true ] && TOTAL=$((TOTAL + 1)) +[ "$SETUP_COPILOT" = true ] && TOTAL=$((TOTAL + 1)) + +if [ "$SETUP_CLAUDE" = true ]; then + echo -e "${YELLOW}[$STEP/$TOTAL] Setting up Claude Code...${NC}" + setup_claude + STEP=$((STEP + 1)) fi -echo -e "${BLUE} Copied $AGENTS_COUNT AGENTS.md file(s)${NC}" +if [ "$SETUP_GEMINI" = true ]; then + echo -e "${YELLOW}[$STEP/$TOTAL] Setting up Gemini CLI...${NC}" + setup_gemini + STEP=$((STEP + 1)) +fi + +if [ "$SETUP_CODEX" = true ]; then + echo -e "${YELLOW}[$STEP/$TOTAL] Setting up Codex (OpenAI)...${NC}" + setup_codex + STEP=$((STEP + 1)) +fi + +if [ "$SETUP_COPILOT" = true ]; then + echo -e "${YELLOW}[$STEP/$TOTAL] Setting up GitHub Copilot...${NC}" + setup_copilot +fi # ============================================================================= # SUMMARY @@ -141,20 +295,11 @@ echo -e "${BLUE} Copied $AGENTS_COUNT AGENTS.md file(s)${NC}" echo "" echo -e "${GREEN}✅ Successfully configured $SKILL_COUNT AI skills!${NC}" echo "" -echo "Configuration created:" -echo " • Claude Code: .claude/skills/ + CLAUDE.md copies" -echo " • Codex (OpenAI): .codex/skills/ + AGENTS.md (native)" -echo " • Gemini CLI: .gemini/skills/ + GEMINI.md copies" -echo " • GitHub Copilot: .github/copilot-instructions.md" -echo "" -echo "Available skills:" -echo " Generic: typescript, react-19, nextjs-15, playwright, pytest," -echo " django-drf, zod-4, zustand-5, tailwind-4, ai-sdk-5" -echo "" -echo " Prowler: prowler, prowler-api, prowler-ui, prowler-mcp," -echo " prowler-sdk-check, prowler-test-ui, prowler-test-api," -echo " prowler-test-sdk, prowler-compliance, prowler-docs," -echo " prowler-provider, prowler-pr" +echo "Configured:" +[ "$SETUP_CLAUDE" = true ] && echo " • Claude Code: .claude/skills/ + CLAUDE.md" +[ "$SETUP_CODEX" = true ] && echo " • Codex (OpenAI): .codex/skills/ + AGENTS.md (native)" +[ "$SETUP_GEMINI" = true ] && echo " • Gemini CLI: .gemini/skills/ + GEMINI.md" +[ "$SETUP_COPILOT" = true ] && echo " • GitHub Copilot: .github/copilot-instructions.md" echo "" -echo -e "${BLUE}Note: Restart your AI coding assistant to load the skills.${NC}" +echo -e "${BLUE}Note: Restart your AI assistant to load the skills.${NC}" echo -e "${BLUE} AGENTS.md is the source of truth - edit it, then re-run this script.${NC}" diff --git a/skills/setup_test.sh b/skills/setup_test.sh new file mode 100755 index 0000000000..c0e80afe99 --- /dev/null +++ b/skills/setup_test.sh @@ -0,0 +1,340 @@ +#!/bin/bash +# Unit tests for setup.sh +# Run: ./skills/setup_test.sh +# +# shellcheck disable=SC2317 +# Reason: Test functions are discovered and called dynamically via declare -F + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SETUP_SCRIPT="$SCRIPT_DIR/setup.sh" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +# Test counters +TESTS_RUN=0 +TESTS_PASSED=0 +TESTS_FAILED=0 + +# Test environment +TEST_DIR="" + +# ============================================================================= +# TEST FRAMEWORK +# ============================================================================= + +setup_test_env() { + TEST_DIR=$(mktemp -d) + + # Create mock repo structure + mkdir -p "$TEST_DIR/skills/typescript" + mkdir -p "$TEST_DIR/skills/react-19" + mkdir -p "$TEST_DIR/api" + mkdir -p "$TEST_DIR/ui" + mkdir -p "$TEST_DIR/.github" + + # Create mock SKILL.md files + echo "# TypeScript Skill" > "$TEST_DIR/skills/typescript/SKILL.md" + echo "# React 19 Skill" > "$TEST_DIR/skills/react-19/SKILL.md" + + # Create mock AGENTS.md files + echo "# Root AGENTS" > "$TEST_DIR/AGENTS.md" + echo "# API AGENTS" > "$TEST_DIR/api/AGENTS.md" + echo "# UI AGENTS" > "$TEST_DIR/ui/AGENTS.md" + + # Copy setup.sh to test dir + cp "$SETUP_SCRIPT" "$TEST_DIR/skills/setup.sh" +} + +teardown_test_env() { + if [ -n "$TEST_DIR" ] && [ -d "$TEST_DIR" ]; then + rm -rf "$TEST_DIR" + fi +} + +run_setup() { + (cd "$TEST_DIR/skills" && bash setup.sh "$@" 2>&1) +} + +# Assertions return 0 on success, 1 on failure +assert_equals() { + local expected="$1" actual="$2" message="$3" + if [ "$expected" = "$actual" ]; then + return 0 + fi + echo -e "${RED} FAIL: $message${NC}" + echo " Expected: $expected" + echo " Actual: $actual" + return 1 +} + +assert_contains() { + local haystack="$1" needle="$2" message="$3" + if echo "$haystack" | grep -q -F -- "$needle"; then + return 0 + fi + echo -e "${RED} FAIL: $message${NC}" + echo " String not found: $needle" + return 1 +} + +assert_file_exists() { + local file="$1" message="$2" + if [ -f "$file" ]; then + return 0 + fi + echo -e "${RED} FAIL: $message${NC}" + echo " File not found: $file" + return 1 +} + +assert_file_not_exists() { + local file="$1" message="$2" + if [ ! -f "$file" ]; then + return 0 + fi + echo -e "${RED} FAIL: $message${NC}" + echo " File should not exist: $file" + return 1 +} + +assert_symlink_exists() { + local link="$1" message="$2" + if [ -L "$link" ]; then + return 0 + fi + echo -e "${RED} FAIL: $message${NC}" + echo " Symlink not found: $link" + return 1 +} + +assert_symlink_not_exists() { + local link="$1" message="$2" + if [ ! -L "$link" ]; then + return 0 + fi + echo -e "${RED} FAIL: $message${NC}" + echo " Symlink should not exist: $link" + return 1 +} + +assert_dir_exists() { + local dir="$1" message="$2" + if [ -d "$dir" ]; then + return 0 + fi + echo -e "${RED} FAIL: $message${NC}" + echo " Directory not found: $dir" + return 1 +} + +# ============================================================================= +# TESTS: FLAG PARSING +# ============================================================================= + +test_flag_help_shows_usage() { + local output + output=$(run_setup --help) + assert_contains "$output" "Usage:" "Help should show usage" && \ + assert_contains "$output" "--all" "Help should mention --all flag" && \ + assert_contains "$output" "--claude" "Help should mention --claude flag" +} + +test_flag_unknown_reports_error() { + local output + output=$(run_setup --unknown 2>&1) || true + assert_contains "$output" "Unknown option" "Should report unknown option" +} + +test_flag_all_configures_everything() { + local output + output=$(run_setup --all) + assert_contains "$output" "Claude Code" "Should setup Claude" && \ + assert_contains "$output" "Gemini CLI" "Should setup Gemini" && \ + assert_contains "$output" "Codex" "Should setup Codex" && \ + assert_contains "$output" "Copilot" "Should setup Copilot" +} + +test_flag_single_claude() { + local output + output=$(run_setup --claude) + assert_contains "$output" "Claude Code" "Should setup Claude" && \ + assert_contains "$output" "[1/1]" "Should show 1/1 steps" +} + +test_flag_multiple_combined() { + local output + output=$(run_setup --claude --codex) + assert_contains "$output" "[1/2]" "Should show step 1/2" && \ + assert_contains "$output" "[2/2]" "Should show step 2/2" +} + +# ============================================================================= +# TESTS: SYMLINK CREATION +# ============================================================================= + +test_symlink_claude_created() { + run_setup --claude > /dev/null + assert_symlink_exists "$TEST_DIR/.claude/skills" "Claude skills symlink should exist" +} + +test_symlink_gemini_created() { + run_setup --gemini > /dev/null + assert_symlink_exists "$TEST_DIR/.gemini/skills" "Gemini skills symlink should exist" +} + +test_symlink_codex_created() { + run_setup --codex > /dev/null + assert_symlink_exists "$TEST_DIR/.codex/skills" "Codex skills symlink should exist" +} + +test_symlink_not_created_without_flag() { + run_setup --copilot > /dev/null + assert_symlink_not_exists "$TEST_DIR/.claude/skills" "Claude symlink should not exist" && \ + assert_symlink_not_exists "$TEST_DIR/.gemini/skills" "Gemini symlink should not exist" && \ + assert_symlink_not_exists "$TEST_DIR/.codex/skills" "Codex symlink should not exist" +} + +# ============================================================================= +# TESTS: AGENTS.md COPYING +# ============================================================================= + +test_copy_claude_agents_md() { + run_setup --claude > /dev/null + assert_file_exists "$TEST_DIR/CLAUDE.md" "Root CLAUDE.md should exist" && \ + assert_file_exists "$TEST_DIR/api/CLAUDE.md" "api/CLAUDE.md should exist" && \ + assert_file_exists "$TEST_DIR/ui/CLAUDE.md" "ui/CLAUDE.md should exist" +} + +test_copy_gemini_agents_md() { + run_setup --gemini > /dev/null + assert_file_exists "$TEST_DIR/GEMINI.md" "Root GEMINI.md should exist" && \ + assert_file_exists "$TEST_DIR/api/GEMINI.md" "api/GEMINI.md should exist" && \ + assert_file_exists "$TEST_DIR/ui/GEMINI.md" "ui/GEMINI.md should exist" +} + +test_copy_copilot_to_github() { + run_setup --copilot > /dev/null + assert_file_exists "$TEST_DIR/.github/copilot-instructions.md" "Copilot instructions should exist" +} + +test_copy_codex_no_extra_files() { + run_setup --codex > /dev/null + assert_file_not_exists "$TEST_DIR/CODEX.md" "CODEX.md should not be created" +} + +test_copy_not_created_without_flag() { + run_setup --codex > /dev/null + assert_file_not_exists "$TEST_DIR/CLAUDE.md" "CLAUDE.md should not exist" && \ + assert_file_not_exists "$TEST_DIR/GEMINI.md" "GEMINI.md should not exist" +} + +test_copy_content_matches_source() { + run_setup --claude > /dev/null + local source_content target_content + source_content=$(cat "$TEST_DIR/AGENTS.md") + target_content=$(cat "$TEST_DIR/CLAUDE.md") + assert_equals "$source_content" "$target_content" "CLAUDE.md content should match AGENTS.md" +} + +# ============================================================================= +# TESTS: DIRECTORY CREATION +# ============================================================================= + +test_dir_claude_created() { + rm -rf "$TEST_DIR/.claude" + run_setup --claude > /dev/null + assert_dir_exists "$TEST_DIR/.claude" ".claude directory should be created" +} + +test_dir_gemini_created() { + rm -rf "$TEST_DIR/.gemini" + run_setup --gemini > /dev/null + assert_dir_exists "$TEST_DIR/.gemini" ".gemini directory should be created" +} + +test_dir_codex_created() { + rm -rf "$TEST_DIR/.codex" + run_setup --codex > /dev/null + assert_dir_exists "$TEST_DIR/.codex" ".codex directory should be created" +} + +# ============================================================================= +# TESTS: IDEMPOTENCY +# ============================================================================= + +test_idempotent_multiple_runs() { + run_setup --claude > /dev/null + run_setup --claude > /dev/null + assert_symlink_exists "$TEST_DIR/.claude/skills" "Symlink should still exist after second run" && \ + assert_file_exists "$TEST_DIR/CLAUDE.md" "CLAUDE.md should still exist after second run" +} + +# ============================================================================= +# TEST RUNNER (autodiscovery) +# ============================================================================= + +run_all_tests() { + local test_functions current_section="" + + # Discover all test_* functions + test_functions=$(declare -F | awk '{print $3}' | grep '^test_' | sort) + + for test_func in $test_functions; do + # Extract section from function name (e.g., test_flag_* -> "Flag") + local section + section=$(echo "$test_func" | sed 's/^test_//' | cut -d'_' -f1) + section="$(echo "${section:0:1}" | tr '[:lower:]' '[:upper:]')${section:1}" + + # Print section header if changed + if [ "$section" != "$current_section" ]; then + [ -n "$current_section" ] && echo "" + echo -e "${YELLOW}${section} tests:${NC}" + current_section="$section" + fi + + # Convert function name to readable test name + local test_name + test_name=$(echo "$test_func" | sed 's/^test_//' | tr '_' ' ') + + TESTS_RUN=$((TESTS_RUN + 1)) + echo -n " $test_name... " + + setup_test_env + + if $test_func; then + echo -e "${GREEN}PASS${NC}" + TESTS_PASSED=$((TESTS_PASSED + 1)) + else + TESTS_FAILED=$((TESTS_FAILED + 1)) + fi + + teardown_test_env + done +} + +# ============================================================================= +# MAIN +# ============================================================================= + +echo "" +echo "🧪 Running setup.sh unit tests" +echo "===============================" +echo "" + +run_all_tests + +echo "" +echo "===============================" +if [ $TESTS_FAILED -eq 0 ]; then + echo -e "${GREEN}✅ All $TESTS_RUN tests passed!${NC}" + exit 0 +else + echo -e "${RED}❌ $TESTS_FAILED of $TESTS_RUN tests failed${NC}" + exit 1 +fi From 73cc4ec4b7156efd01520b94f23c53850bb356b8 Mon Sep 17 00:00:00 2001 From: Alan-TheGentleman Date: Mon, 12 Jan 2026 11:26:28 +0100 Subject: [PATCH 03/19] feat(skills): add skill-sync for auto-generating AGENTS.md Auto-invoke - Create skill-sync skill with sync.sh script and 22 unit tests - Add metadata.scope and metadata.auto_invoke to 17 skills - Auto-generate Auto-invoke sections in ui/, api/, prowler/, root AGENTS.md - Update skills/README.md with Auto-invoke documentation The Auto-invoke sections solve a known issue where AI assistants ignore skills even when Trigger: descriptions match. Explicit commands force the AI to load skills before performing actions. Usage: ./skills/skill-sync/assets/sync.sh [--dry-run] [--scope ] --- AGENTS.md | 10 +- api/AGENTS.md | 12 + prowler/AGENTS.md | 13 + skills/README.md | 23 +- skills/django-drf/SKILL.md | 2 + skills/nextjs-15/SKILL.md | 2 + skills/prowler-api/SKILL.md | 2 + skills/prowler-compliance/SKILL.md | 2 + skills/prowler-docs/SKILL.md | 2 + skills/prowler-pr/SKILL.md | 2 + skills/prowler-provider/SKILL.md | 2 + skills/prowler-sdk-check/SKILL.md | 2 + skills/prowler-test-api/SKILL.md | 2 + skills/prowler-test-sdk/SKILL.md | 2 + skills/prowler-test-ui/SKILL.md | 2 + skills/prowler-ui/SKILL.md | 2 + skills/skill-creator/SKILL.md | 2 + skills/skill-sync/SKILL.md | 108 ++++++ skills/skill-sync/assets/sync.sh | 233 ++++++++++++ skills/skill-sync/assets/sync_test.sh | 524 ++++++++++++++++++++++++++ skills/tailwind-4/SKILL.md | 2 + skills/zod-4/SKILL.md | 2 + skills/zustand-5/SKILL.md | 2 + ui/AGENTS.md | 15 + 24 files changed, 960 insertions(+), 10 deletions(-) create mode 100644 skills/skill-sync/SKILL.md create mode 100755 skills/skill-sync/assets/sync.sh create mode 100755 skills/skill-sync/assets/sync_test.sh diff --git a/AGENTS.md b/AGENTS.md index ebda82660e..6dbb7a6621 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -47,14 +47,10 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST: | Action | Skill | |--------|-------| -| Creating a PR | `prowler-pr` | -| Writing SDK tests | `prowler-test-sdk` | -| Writing API tests | `prowler-test-api` | -| Writing UI/E2E tests | `prowler-test-ui` | -| Creating a new check | `prowler-sdk-check` | -| Adding a new provider | `prowler-provider` | | Writing documentation | `prowler-docs` | -| Adding compliance framework | `prowler-compliance` | +| Creating a PR | `prowler-pr` | +| Creating new skills | `skill-creator` | +| After creating/modifying a skill | `skill-sync` | --- diff --git a/api/AGENTS.md b/api/AGENTS.md index 9ae73401c6..cd69c16340 100644 --- a/api/AGENTS.md +++ b/api/AGENTS.md @@ -6,6 +6,18 @@ > - [`django-drf`](../skills/django-drf/SKILL.md) - Generic DRF patterns > - [`pytest`](../skills/pytest/SKILL.md) - Generic pytest patterns +### Auto-invoke Skills + +When performing these actions, ALWAYS invoke the corresponding skill FIRST: + +| Action | Skill | +|--------|-------| +| Generic DRF patterns | `django-drf` | +| Creating/modifying models, views, serializers | `prowler-api` | +| Writing API tests | `prowler-test-api` | + +--- + ## CRITICAL RULES - NON-NEGOTIABLE ### Models diff --git a/prowler/AGENTS.md b/prowler/AGENTS.md index 86c2244edf..5f8c3f4587 100644 --- a/prowler/AGENTS.md +++ b/prowler/AGENTS.md @@ -7,6 +7,19 @@ > - [`prowler-compliance`](../skills/prowler-compliance/SKILL.md) - Compliance framework structure > - [`pytest`](../skills/pytest/SKILL.md) - Generic pytest patterns +### Auto-invoke Skills + +When performing these actions, ALWAYS invoke the corresponding skill FIRST: + +| Action | Skill | +|--------|-------| +| Adding compliance frameworks | `prowler-compliance` | +| Adding new providers | `prowler-provider` | +| Creating new checks | `prowler-sdk-check` | +| Writing SDK tests | `prowler-test-sdk` | + +--- + ## Project Overview The Prowler SDK is the core Python engine powering cloud security assessments across AWS, Azure, GCP, Kubernetes, GitHub, M365, and more. It includes 1000+ security checks and 30+ compliance frameworks. diff --git a/skills/README.md b/skills/README.md index becdffaf9b..4296b49088 100644 --- a/skills/README.md +++ b/skills/README.md @@ -83,6 +83,7 @@ Patterns tailored for Prowler development: | Skill | Description | |-------|-------------| | `skill-creator` | Create new AI agent skills | +| `skill-sync` | Sync skill metadata to AGENTS.md Auto-invoke sections | ## Directory Structure @@ -96,6 +97,20 @@ skills/ └── README.md # This file ``` +## Why Auto-invoke Sections? + +**Problem**: AI assistants (Claude, Gemini, etc.) don't reliably auto-invoke skills even when the `Trigger:` in the skill description matches the user's request. They treat skill suggestions as "background noise" and barrel ahead with their default approach. + +**Solution**: The `AGENTS.md` files in each directory contain an **Auto-invoke Skills** section that explicitly commands the AI: "When performing X action, ALWAYS invoke Y skill FIRST." This is a [known workaround](https://scottspence.com/posts/claude-code-skills-dont-auto-activate) that forces the AI to load skills. + +**Automation**: Instead of manually maintaining these sections, run `skill-sync` after creating or modifying a skill: + +```bash +./skills/skill-sync/assets/sync.sh +``` + +This reads `metadata.scope` and `metadata.auto_invoke` from each `SKILL.md` and generates the Auto-invoke tables in the corresponding `AGENTS.md` files. + ## Creating New Skills Use the `skill-creator` skill for guidance: @@ -108,9 +123,11 @@ Read skills/skill-creator/SKILL.md 1. Create directory: `skills/{skill-name}/` 2. Add `SKILL.md` with required frontmatter -3. Keep content concise (under 500 lines) -4. Reference existing docs instead of duplicating -5. Add to `AGENTS.md` skills table +3. Add `metadata.scope` and `metadata.auto_invoke` fields +4. Keep content concise (under 500 lines) +5. Reference existing docs instead of duplicating +6. Run `./skills/skill-sync/assets/sync.sh` to update AGENTS.md +7. Add to `AGENTS.md` skills table (if not auto-generated) ## Design Principles diff --git a/skills/django-drf/SKILL.md b/skills/django-drf/SKILL.md index 03b43fc64e..25d1a39692 100644 --- a/skills/django-drf/SKILL.md +++ b/skills/django-drf/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [api] + auto_invoke: "Generic DRF patterns" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/nextjs-15/SKILL.md b/skills/nextjs-15/SKILL.md index 4fa47fd619..b529f30fd0 100644 --- a/skills/nextjs-15/SKILL.md +++ b/skills/nextjs-15/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [ui] + auto_invoke: "App Router / Server Actions" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-api/SKILL.md b/skills/prowler-api/SKILL.md index 62f3c15156..c779b08c0f 100644 --- a/skills/prowler-api/SKILL.md +++ b/skills/prowler-api/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [api] + auto_invoke: "Creating/modifying models, views, serializers" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-compliance/SKILL.md b/skills/prowler-compliance/SKILL.md index 8b82617d6e..c06ca5f2fa 100644 --- a/skills/prowler-compliance/SKILL.md +++ b/skills/prowler-compliance/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [sdk] + auto_invoke: "Adding compliance frameworks" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-docs/SKILL.md b/skills/prowler-docs/SKILL.md index d8fd8f9cb2..41a97f5ff9 100644 --- a/skills/prowler-docs/SKILL.md +++ b/skills/prowler-docs/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [root] + auto_invoke: "Writing documentation" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-pr/SKILL.md b/skills/prowler-pr/SKILL.md index 9d1e299a78..63bf83c051 100644 --- a/skills/prowler-pr/SKILL.md +++ b/skills/prowler-pr/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [root] + auto_invoke: "Creating a PR" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-provider/SKILL.md b/skills/prowler-provider/SKILL.md index 493c393f1e..c41c6e5315 100644 --- a/skills/prowler-provider/SKILL.md +++ b/skills/prowler-provider/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [sdk] + auto_invoke: "Adding new providers" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-sdk-check/SKILL.md b/skills/prowler-sdk-check/SKILL.md index f032f6b590..0fdc3a822a 100644 --- a/skills/prowler-sdk-check/SKILL.md +++ b/skills/prowler-sdk-check/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [sdk] + auto_invoke: "Creating new checks" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-test-api/SKILL.md b/skills/prowler-test-api/SKILL.md index b53eddc44b..69d9613aab 100644 --- a/skills/prowler-test-api/SKILL.md +++ b/skills/prowler-test-api/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [api] + auto_invoke: "Writing API tests" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-test-sdk/SKILL.md b/skills/prowler-test-sdk/SKILL.md index f9e08b5811..02df667205 100644 --- a/skills/prowler-test-sdk/SKILL.md +++ b/skills/prowler-test-sdk/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [sdk] + auto_invoke: "Writing SDK tests" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-test-ui/SKILL.md b/skills/prowler-test-ui/SKILL.md index fbff0f9310..761723956a 100644 --- a/skills/prowler-test-ui/SKILL.md +++ b/skills/prowler-test-ui/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [ui] + auto_invoke: "Writing E2E/Playwright tests" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-ui/SKILL.md b/skills/prowler-ui/SKILL.md index 0c9e338e91..a400d07f0f 100644 --- a/skills/prowler-ui/SKILL.md +++ b/skills/prowler-ui/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [ui] + auto_invoke: "Creating/modifying React components" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/skill-creator/SKILL.md b/skills/skill-creator/SKILL.md index d84aa5101a..11787abe2b 100644 --- a/skills/skill-creator/SKILL.md +++ b/skills/skill-creator/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [root] + auto_invoke: "Creating new skills" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/skill-sync/SKILL.md b/skills/skill-sync/SKILL.md new file mode 100644 index 0000000000..e307c7d3fd --- /dev/null +++ b/skills/skill-sync/SKILL.md @@ -0,0 +1,108 @@ +--- +name: skill-sync +description: > + Syncs skill metadata to AGENTS.md Auto-invoke sections. + Trigger: After creating/modifying a skill, run sync to update AGENTS.md files. +license: Apache-2.0 +metadata: + author: prowler-cloud + version: "1.0" + scope: [root] + auto_invoke: "After creating/modifying a skill" +allowed-tools: Read, Edit, Write, Glob, Grep, Bash +--- + +## Purpose + +Keeps AGENTS.md Auto-invoke sections in sync with skill metadata. When you create or modify a skill, run the sync script to automatically update all affected AGENTS.md files. + +## Required Skill Metadata + +Each skill that should appear in Auto-invoke sections needs these fields in `metadata`: + +```yaml +metadata: + author: prowler-cloud + version: "1.0" + scope: [ui] # Which AGENTS.md: ui, api, sdk, root + auto_invoke: "Creating/modifying components" # When to invoke (action description) +``` + +### Scope Values + +| Scope | Updates | +|-------|---------| +| `root` | `AGENTS.md` (repo root) | +| `ui` | `ui/AGENTS.md` | +| `api` | `api/AGENTS.md` | +| `sdk` | `prowler/AGENTS.md` | + +Skills can have multiple scopes: `scope: [ui, api]` + +--- + +## Usage + +### After Creating/Modifying a Skill + +```bash +./skills/skill-sync/assets/sync.sh +``` + +### What It Does + +1. Reads all `skills/*/SKILL.md` files +2. Extracts `metadata.scope` and `metadata.auto_invoke` +3. Generates Auto-invoke tables for each AGENTS.md +4. Updates the `### Auto-invoke Skills` section in each file + +--- + +## Example + +Given this skill metadata: + +```yaml +# skills/prowler-ui/SKILL.md +metadata: + author: prowler-cloud + version: "1.0" + scope: [ui] + auto_invoke: "Creating/modifying React components" +``` + +The sync script generates in `ui/AGENTS.md`: + +```markdown +### Auto-invoke Skills + +When performing these actions, ALWAYS invoke the corresponding skill FIRST: + +| Action | Skill | +|--------|-------| +| Creating/modifying React components | `prowler-ui` | +``` + +--- + +## Commands + +```bash +# Sync all AGENTS.md files +./skills/skill-sync/assets/sync.sh + +# Dry run (show what would change) +./skills/skill-sync/assets/sync.sh --dry-run + +# Sync specific scope only +./skills/skill-sync/assets/sync.sh --scope ui +``` + +--- + +## Checklist After Modifying Skills + +- [ ] Added `metadata.scope` to new/modified skill +- [ ] Added `metadata.auto_invoke` with action description +- [ ] Ran `./skills/skill-sync/assets/sync.sh` +- [ ] Verified AGENTS.md files updated correctly diff --git a/skills/skill-sync/assets/sync.sh b/skills/skill-sync/assets/sync.sh new file mode 100755 index 0000000000..3cee870514 --- /dev/null +++ b/skills/skill-sync/assets/sync.sh @@ -0,0 +1,233 @@ +#!/bin/bash +# Sync skill metadata to AGENTS.md Auto-invoke sections +# Usage: ./sync.sh [--dry-run] [--scope ] + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +REPO_ROOT="$(dirname "$(dirname "$(dirname "$SCRIPT_DIR")")")" +SKILLS_DIR="$REPO_ROOT/skills" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +# Options +DRY_RUN=false +FILTER_SCOPE="" + +# Parse arguments +while [[ $# -gt 0 ]]; do + case $1 in + --dry-run) + DRY_RUN=true + shift + ;; + --scope) + FILTER_SCOPE="$2" + shift 2 + ;; + --help|-h) + echo "Usage: $0 [--dry-run] [--scope ]" + echo "" + echo "Options:" + echo " --dry-run Show what would change without modifying files" + echo " --scope Only sync specific scope (root, ui, api, sdk)" + exit 0 + ;; + *) + echo -e "${RED}Unknown option: $1${NC}" + exit 1 + ;; + esac +done + +# Map scope to AGENTS.md path +get_agents_path() { + local scope="$1" + case "$scope" in + root) echo "$REPO_ROOT/AGENTS.md" ;; + ui) echo "$REPO_ROOT/ui/AGENTS.md" ;; + api) echo "$REPO_ROOT/api/AGENTS.md" ;; + sdk) echo "$REPO_ROOT/prowler/AGENTS.md" ;; + *) echo "" ;; + esac +} + +# Extract YAML frontmatter field using awk +extract_field() { + local file="$1" + local field="$2" + awk -v field="$field" ' + /^---$/ { in_frontmatter = !in_frontmatter; next } + in_frontmatter && $1 == field":" { + # Handle single line value + sub(/^[^:]+:[[:space:]]*/, "") + if ($0 != "" && $0 != ">") { + gsub(/^["'\'']|["'\'']$/, "") # Remove quotes + print + exit + } + # Handle multi-line value + getline + while (/^[[:space:]]/ && !/^---$/) { + sub(/^[[:space:]]+/, "") + printf "%s ", $0 + if (!getline) break + } + print "" + exit + } + ' "$file" | sed 's/[[:space:]]*$//' +} + +# Extract nested metadata field +extract_metadata() { + local file="$1" + local field="$2" + awk -v field="$field" ' + /^---$/ { in_frontmatter = !in_frontmatter; next } + in_frontmatter && /^metadata:/ { in_metadata = 1; next } + in_frontmatter && in_metadata && /^[a-z]/ && !/^[[:space:]]/ { in_metadata = 0 } + in_frontmatter && in_metadata && $1 == field":" { + sub(/^[^:]+:[[:space:]]*/, "") + gsub(/^["'\'']|["'\'']$/, "") + gsub(/^\[|\]$/, "") # Remove array brackets + print + exit + } + ' "$file" +} + +echo -e "${BLUE}Skill Sync - Updating AGENTS.md Auto-invoke sections${NC}" +echo "========================================================" +echo "" + +# Collect skills by scope +declare -A SCOPE_SKILLS # scope -> "skill1:action1|skill2:action2|..." + +for skill_file in "$SKILLS_DIR"/*/SKILL.md; do + [ -f "$skill_file" ] || continue + + skill_name=$(extract_field "$skill_file" "name") + scope_raw=$(extract_metadata "$skill_file" "scope") + auto_invoke=$(extract_metadata "$skill_file" "auto_invoke") + + # Skip if no scope or auto_invoke defined + [ -z "$scope_raw" ] || [ -z "$auto_invoke" ] && continue + + # Parse scope (can be comma-separated or space-separated) + IFS=', ' read -ra scopes <<< "$scope_raw" + + for scope in "${scopes[@]}"; do + scope=$(echo "$scope" | tr -d '[:space:]') + [ -z "$scope" ] && continue + + # Filter by scope if specified + [ -n "$FILTER_SCOPE" ] && [ "$scope" != "$FILTER_SCOPE" ] && continue + + # Append to scope's skill list + if [ -z "${SCOPE_SKILLS[$scope]}" ]; then + SCOPE_SKILLS[$scope]="$skill_name:$auto_invoke" + else + SCOPE_SKILLS[$scope]="${SCOPE_SKILLS[$scope]}|$skill_name:$auto_invoke" + fi + done +done + +# Generate Auto-invoke section for each scope +for scope in "${!SCOPE_SKILLS[@]}"; do + agents_path=$(get_agents_path "$scope") + + if [ -z "$agents_path" ] || [ ! -f "$agents_path" ]; then + echo -e "${YELLOW}Warning: No AGENTS.md found for scope '$scope'${NC}" + continue + fi + + echo -e "${BLUE}Processing: $scope -> $(basename "$(dirname "$agents_path")")/AGENTS.md${NC}" + + # Build the Auto-invoke table + auto_invoke_section="### Auto-invoke Skills + +When performing these actions, ALWAYS invoke the corresponding skill FIRST: + +| Action | Skill | +|--------|-------|" + + IFS='|' read -ra skill_entries <<< "${SCOPE_SKILLS[$scope]}" + for entry in "${skill_entries[@]}"; do + skill_name="${entry%%:*}" + action="${entry#*:}" + auto_invoke_section="$auto_invoke_section +| $action | \`$skill_name\` |" + done + + if $DRY_RUN; then + echo -e "${YELLOW}[DRY RUN] Would update $agents_path with:${NC}" + echo "$auto_invoke_section" + echo "" + else + # Check if Auto-invoke section exists + if grep -q "### Auto-invoke Skills" "$agents_path"; then + # Replace existing section (up to next --- or ## heading) + awk -v new_section="$auto_invoke_section" ' + /^### Auto-invoke Skills/ { + print new_section + skip = 1 + next + } + skip && /^(---|## )/ { + skip = 0 + print "" + } + !skip { print } + ' "$agents_path" > "$agents_path.tmp" + mv "$agents_path.tmp" "$agents_path" + echo -e "${GREEN} ✓ Updated Auto-invoke section${NC}" + else + # Insert after Skills Reference blockquote + awk -v new_section="$auto_invoke_section" ' + /^>.*SKILL\.md\)$/ && !inserted { + print + getline + if (/^$/) { + print "" + print new_section + print "" + inserted = 1 + next + } + } + { print } + ' "$agents_path" > "$agents_path.tmp" + mv "$agents_path.tmp" "$agents_path" + echo -e "${GREEN} ✓ Inserted Auto-invoke section${NC}" + fi + fi +done + +echo "" +echo -e "${GREEN}Done!${NC}" + +# Show skills without metadata +echo "" +echo -e "${BLUE}Skills missing sync metadata:${NC}" +missing=0 +for skill_file in "$SKILLS_DIR"/*/SKILL.md; do + [ -f "$skill_file" ] || continue + skill_name=$(extract_field "$skill_file" "name") + scope_raw=$(extract_metadata "$skill_file" "scope") + auto_invoke=$(extract_metadata "$skill_file" "auto_invoke") + + if [ -z "$scope_raw" ] || [ -z "$auto_invoke" ]; then + echo -e " ${YELLOW}$skill_name${NC} - missing: ${scope_raw:+}${scope_raw:-scope} ${auto_invoke:+}${auto_invoke:-auto_invoke}" + missing=$((missing + 1)) + fi +done + +if [ $missing -eq 0 ]; then + echo -e " ${GREEN}All skills have sync metadata${NC}" +fi diff --git a/skills/skill-sync/assets/sync_test.sh b/skills/skill-sync/assets/sync_test.sh new file mode 100755 index 0000000000..b313273c4b --- /dev/null +++ b/skills/skill-sync/assets/sync_test.sh @@ -0,0 +1,524 @@ +#!/bin/bash +# Unit tests for sync.sh +# Run: ./skills/skill-sync/assets/sync_test.sh +# +# shellcheck disable=SC2317 +# Reason: Test functions are discovered and called dynamically via declare -F + +set -e + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SYNC_SCRIPT="$SCRIPT_DIR/sync.sh" + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' + +# Test counters +TESTS_RUN=0 +TESTS_PASSED=0 +TESTS_FAILED=0 + +# Test environment +TEST_DIR="" + +# ============================================================================= +# TEST FRAMEWORK +# ============================================================================= + +setup_test_env() { + TEST_DIR=$(mktemp -d) + + # Create mock repo structure + mkdir -p "$TEST_DIR/skills/mock-ui-skill" + mkdir -p "$TEST_DIR/skills/mock-api-skill" + mkdir -p "$TEST_DIR/skills/mock-sdk-skill" + mkdir -p "$TEST_DIR/skills/mock-root-skill" + mkdir -p "$TEST_DIR/skills/mock-no-metadata" + mkdir -p "$TEST_DIR/skills/skill-sync/assets" + mkdir -p "$TEST_DIR/ui" + mkdir -p "$TEST_DIR/api" + mkdir -p "$TEST_DIR/prowler" + + # Create mock SKILL.md files with metadata + cat > "$TEST_DIR/skills/mock-ui-skill/SKILL.md" << 'EOF' +--- +name: mock-ui-skill +description: > + Mock UI skill for testing. + Trigger: When testing UI. +license: Apache-2.0 +metadata: + author: test + version: "1.0" + scope: [ui] + auto_invoke: "Testing UI components" +allowed-tools: Read +--- + +# Mock UI Skill +EOF + + cat > "$TEST_DIR/skills/mock-api-skill/SKILL.md" << 'EOF' +--- +name: mock-api-skill +description: > + Mock API skill for testing. + Trigger: When testing API. +license: Apache-2.0 +metadata: + author: test + version: "1.0" + scope: [api] + auto_invoke: "Testing API endpoints" +allowed-tools: Read +--- + +# Mock API Skill +EOF + + cat > "$TEST_DIR/skills/mock-sdk-skill/SKILL.md" << 'EOF' +--- +name: mock-sdk-skill +description: > + Mock SDK skill for testing. + Trigger: When testing SDK. +license: Apache-2.0 +metadata: + author: test + version: "1.0" + scope: [sdk] + auto_invoke: "Testing SDK checks" +allowed-tools: Read +--- + +# Mock SDK Skill +EOF + + cat > "$TEST_DIR/skills/mock-root-skill/SKILL.md" << 'EOF' +--- +name: mock-root-skill +description: > + Mock root skill for testing. + Trigger: When testing root. +license: Apache-2.0 +metadata: + author: test + version: "1.0" + scope: [root] + auto_invoke: "Testing root actions" +allowed-tools: Read +--- + +# Mock Root Skill +EOF + + # Skill without sync metadata + cat > "$TEST_DIR/skills/mock-no-metadata/SKILL.md" << 'EOF' +--- +name: mock-no-metadata +description: > + Skill without sync metadata. +license: Apache-2.0 +metadata: + author: test + version: "1.0" +allowed-tools: Read +--- + +# No Metadata Skill +EOF + + # Create mock AGENTS.md files with Skills Reference section + cat > "$TEST_DIR/AGENTS.md" << 'EOF' +# Root AGENTS + +> **Skills Reference**: For detailed patterns, use these skills: +> - [`mock-root-skill`](skills/mock-root-skill/SKILL.md) + +## Project Overview + +This is the root agents file. +EOF + + cat > "$TEST_DIR/ui/AGENTS.md" << 'EOF' +# UI AGENTS + +> **Skills Reference**: For detailed patterns, use these skills: +> - [`mock-ui-skill`](../skills/mock-ui-skill/SKILL.md) + +## CRITICAL RULES + +UI rules here. +EOF + + cat > "$TEST_DIR/api/AGENTS.md" << 'EOF' +# API AGENTS + +> **Skills Reference**: For detailed patterns, use these skills: +> - [`mock-api-skill`](../skills/mock-api-skill/SKILL.md) + +## CRITICAL RULES + +API rules here. +EOF + + cat > "$TEST_DIR/prowler/AGENTS.md" << 'EOF' +# SDK AGENTS + +> **Skills Reference**: For detailed patterns, use these skills: +> - [`mock-sdk-skill`](../skills/mock-sdk-skill/SKILL.md) + +## Project Overview + +SDK overview here. +EOF + + # Copy sync.sh to test dir + cp "$SYNC_SCRIPT" "$TEST_DIR/skills/skill-sync/assets/sync.sh" + chmod +x "$TEST_DIR/skills/skill-sync/assets/sync.sh" +} + +teardown_test_env() { + if [ -n "$TEST_DIR" ] && [ -d "$TEST_DIR" ]; then + rm -rf "$TEST_DIR" + fi +} + +run_sync() { + (cd "$TEST_DIR/skills/skill-sync/assets" && bash sync.sh "$@" 2>&1) +} + +# Assertions +assert_equals() { + local expected="$1" actual="$2" message="$3" + if [ "$expected" = "$actual" ]; then + return 0 + fi + echo -e "${RED} FAIL: $message${NC}" + echo " Expected: $expected" + echo " Actual: $actual" + return 1 +} + +assert_contains() { + local haystack="$1" needle="$2" message="$3" + if echo "$haystack" | grep -q -F -- "$needle"; then + return 0 + fi + echo -e "${RED} FAIL: $message${NC}" + echo " String not found: $needle" + return 1 +} + +assert_not_contains() { + local haystack="$1" needle="$2" message="$3" + if ! echo "$haystack" | grep -q -F -- "$needle"; then + return 0 + fi + echo -e "${RED} FAIL: $message${NC}" + echo " String should not be found: $needle" + return 1 +} + +assert_file_contains() { + local file="$1" needle="$2" message="$3" + if grep -q -F -- "$needle" "$file" 2>/dev/null; then + return 0 + fi + echo -e "${RED} FAIL: $message${NC}" + echo " File: $file" + echo " String not found: $needle" + return 1 +} + +assert_file_not_contains() { + local file="$1" needle="$2" message="$3" + if ! grep -q -F -- "$needle" "$file" 2>/dev/null; then + return 0 + fi + echo -e "${RED} FAIL: $message${NC}" + echo " File: $file" + echo " String should not be found: $needle" + return 1 +} + +# ============================================================================= +# TESTS: FLAG PARSING +# ============================================================================= + +test_flag_help_shows_usage() { + local output + output=$(run_sync --help) + assert_contains "$output" "Usage:" "Help should show usage" && \ + assert_contains "$output" "--dry-run" "Help should mention --dry-run" && \ + assert_contains "$output" "--scope" "Help should mention --scope" +} + +test_flag_unknown_reports_error() { + local output + output=$(run_sync --unknown 2>&1) || true + assert_contains "$output" "Unknown option" "Should report unknown option" +} + +test_flag_dryrun_shows_changes() { + local output + output=$(run_sync --dry-run) + assert_contains "$output" "[DRY RUN]" "Should show dry run marker" && \ + assert_contains "$output" "Would update" "Should say would update" +} + +test_flag_dryrun_no_file_changes() { + run_sync --dry-run > /dev/null + assert_file_not_contains "$TEST_DIR/ui/AGENTS.md" "### Auto-invoke Skills" \ + "AGENTS.md should not be modified in dry run" +} + +test_flag_scope_filters_correctly() { + local output + output=$(run_sync --scope ui) + assert_contains "$output" "Processing: ui" "Should process ui scope" && \ + assert_not_contains "$output" "Processing: api" "Should not process api scope" +} + +# ============================================================================= +# TESTS: METADATA EXTRACTION +# ============================================================================= + +test_metadata_extracts_scope() { + local output + output=$(run_sync --dry-run) + assert_contains "$output" "Processing: ui" "Should detect ui scope" && \ + assert_contains "$output" "Processing: api" "Should detect api scope" && \ + assert_contains "$output" "Processing: sdk" "Should detect sdk scope" && \ + assert_contains "$output" "Processing: root" "Should detect root scope" +} + +test_metadata_extracts_auto_invoke() { + local output + output=$(run_sync --dry-run) + assert_contains "$output" "Testing UI components" "Should extract UI auto_invoke" && \ + assert_contains "$output" "Testing API endpoints" "Should extract API auto_invoke" && \ + assert_contains "$output" "Testing SDK checks" "Should extract SDK auto_invoke" +} + +test_metadata_missing_reports_skills() { + local output + output=$(run_sync --dry-run) + assert_contains "$output" "Skills missing sync metadata" "Should report missing metadata section" && \ + assert_contains "$output" "mock-no-metadata" "Should list skill without metadata" +} + +test_metadata_skips_without_scope_in_processing() { + local output + output=$(run_sync --dry-run) + # Should not appear in "Processing:" lines, only in "missing metadata" section + local processing_lines + processing_lines=$(echo "$output" | grep "Processing:") + assert_not_contains "$processing_lines" "mock-no-metadata" "Should not process skill without scope" +} + +# ============================================================================= +# TESTS: AUTO-INVOKE GENERATION +# ============================================================================= + +test_generate_creates_table() { + run_sync > /dev/null + assert_file_contains "$TEST_DIR/ui/AGENTS.md" "### Auto-invoke Skills" \ + "Should create Auto-invoke section" && \ + assert_file_contains "$TEST_DIR/ui/AGENTS.md" "| Action | Skill |" \ + "Should create table header" +} + +test_generate_correct_skill_in_ui() { + run_sync > /dev/null + assert_file_contains "$TEST_DIR/ui/AGENTS.md" "mock-ui-skill" \ + "UI AGENTS should contain mock-ui-skill" && \ + assert_file_not_contains "$TEST_DIR/ui/AGENTS.md" "mock-api-skill" \ + "UI AGENTS should not contain mock-api-skill" +} + +test_generate_correct_skill_in_api() { + run_sync > /dev/null + assert_file_contains "$TEST_DIR/api/AGENTS.md" "mock-api-skill" \ + "API AGENTS should contain mock-api-skill" && \ + assert_file_not_contains "$TEST_DIR/api/AGENTS.md" "mock-ui-skill" \ + "API AGENTS should not contain mock-ui-skill" +} + +test_generate_correct_skill_in_sdk() { + run_sync > /dev/null + assert_file_contains "$TEST_DIR/prowler/AGENTS.md" "mock-sdk-skill" \ + "SDK AGENTS should contain mock-sdk-skill" && \ + assert_file_not_contains "$TEST_DIR/prowler/AGENTS.md" "mock-ui-skill" \ + "SDK AGENTS should not contain mock-ui-skill" +} + +test_generate_correct_skill_in_root() { + run_sync > /dev/null + assert_file_contains "$TEST_DIR/AGENTS.md" "mock-root-skill" \ + "Root AGENTS should contain mock-root-skill" && \ + assert_file_not_contains "$TEST_DIR/AGENTS.md" "mock-ui-skill" \ + "Root AGENTS should not contain mock-ui-skill" +} + +test_generate_includes_action_text() { + run_sync > /dev/null + assert_file_contains "$TEST_DIR/ui/AGENTS.md" "Testing UI components" \ + "Should include auto_invoke action text" +} + +# ============================================================================= +# TESTS: AGENTS.MD UPDATE +# ============================================================================= + +test_update_preserves_header() { + run_sync > /dev/null + assert_file_contains "$TEST_DIR/ui/AGENTS.md" "# UI AGENTS" \ + "Should preserve original header" +} + +test_update_preserves_skills_reference() { + run_sync > /dev/null + assert_file_contains "$TEST_DIR/ui/AGENTS.md" "Skills Reference" \ + "Should preserve Skills Reference section" +} + +test_update_preserves_content_after() { + run_sync > /dev/null + assert_file_contains "$TEST_DIR/ui/AGENTS.md" "## CRITICAL RULES" \ + "Should preserve content after Auto-invoke section" +} + +test_update_replaces_existing_section() { + # First run creates section + run_sync > /dev/null + + # Modify a skill's auto_invoke + sed -i 's/Testing UI components/Modified UI action/' "$TEST_DIR/skills/mock-ui-skill/SKILL.md" + + # Second run should replace + run_sync > /dev/null + + assert_file_contains "$TEST_DIR/ui/AGENTS.md" "Modified UI action" \ + "Should update with new auto_invoke text" && \ + assert_file_not_contains "$TEST_DIR/ui/AGENTS.md" "Testing UI components" \ + "Should remove old auto_invoke text" +} + +# ============================================================================= +# TESTS: IDEMPOTENCY +# ============================================================================= + +test_idempotent_multiple_runs() { + run_sync > /dev/null + local first_content + first_content=$(cat "$TEST_DIR/ui/AGENTS.md") + + run_sync > /dev/null + local second_content + second_content=$(cat "$TEST_DIR/ui/AGENTS.md") + + assert_equals "$first_content" "$second_content" \ + "Multiple runs should produce identical output" +} + +test_idempotent_no_duplicate_sections() { + run_sync > /dev/null + run_sync > /dev/null + run_sync > /dev/null + + local count + count=$(grep -c "### Auto-invoke Skills" "$TEST_DIR/ui/AGENTS.md") + assert_equals "1" "$count" "Should have exactly one Auto-invoke section" +} + +# ============================================================================= +# TESTS: MULTI-SCOPE SKILLS +# ============================================================================= + +test_multiscope_skill_appears_in_multiple() { + # Create a skill with multiple scopes + cat > "$TEST_DIR/skills/mock-ui-skill/SKILL.md" << 'EOF' +--- +name: mock-ui-skill +description: Mock skill with multiple scopes. +license: Apache-2.0 +metadata: + author: test + version: "1.0" + scope: [ui, api] + auto_invoke: "Multi-scope action" +allowed-tools: Read +--- +EOF + + run_sync > /dev/null + + assert_file_contains "$TEST_DIR/ui/AGENTS.md" "mock-ui-skill" \ + "Multi-scope skill should appear in UI" && \ + assert_file_contains "$TEST_DIR/api/AGENTS.md" "mock-ui-skill" \ + "Multi-scope skill should appear in API" +} + +# ============================================================================= +# TEST RUNNER +# ============================================================================= + +run_all_tests() { + local test_functions current_section="" + + test_functions=$(declare -F | awk '{print $3}' | grep '^test_' | sort) + + for test_func in $test_functions; do + local section + section=$(echo "$test_func" | sed 's/^test_//' | cut -d'_' -f1) + section="$(echo "${section:0:1}" | tr '[:lower:]' '[:upper:]')${section:1}" + + if [ "$section" != "$current_section" ]; then + [ -n "$current_section" ] && echo "" + echo -e "${YELLOW}${section} tests:${NC}" + current_section="$section" + fi + + local test_name + test_name=$(echo "$test_func" | sed 's/^test_//' | tr '_' ' ') + + TESTS_RUN=$((TESTS_RUN + 1)) + echo -n " $test_name... " + + setup_test_env + + if $test_func; then + echo -e "${GREEN}PASS${NC}" + TESTS_PASSED=$((TESTS_PASSED + 1)) + else + TESTS_FAILED=$((TESTS_FAILED + 1)) + fi + + teardown_test_env + done +} + +# ============================================================================= +# MAIN +# ============================================================================= + +echo "" +echo "🧪 Running sync.sh unit tests" +echo "==============================" +echo "" + +run_all_tests + +echo "" +echo "==============================" +if [ $TESTS_FAILED -eq 0 ]; then + echo -e "${GREEN}✅ All $TESTS_RUN tests passed!${NC}" + exit 0 +else + echo -e "${RED}❌ $TESTS_FAILED of $TESTS_RUN tests failed${NC}" + exit 1 +fi diff --git a/skills/tailwind-4/SKILL.md b/skills/tailwind-4/SKILL.md index 84b50261ce..44d03319e3 100644 --- a/skills/tailwind-4/SKILL.md +++ b/skills/tailwind-4/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [ui] + auto_invoke: "Working with Tailwind classes" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/zod-4/SKILL.md b/skills/zod-4/SKILL.md index 186b6950e5..13c4fc6553 100644 --- a/skills/zod-4/SKILL.md +++ b/skills/zod-4/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [ui] + auto_invoke: "Creating Zod schemas" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/zustand-5/SKILL.md b/skills/zustand-5/SKILL.md index af3fc5aad3..482a964c36 100644 --- a/skills/zustand-5/SKILL.md +++ b/skills/zustand-5/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [ui] + auto_invoke: "Using Zustand stores" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/ui/AGENTS.md b/ui/AGENTS.md index 0e26f60f6f..1976bea43b 100644 --- a/ui/AGENTS.md +++ b/ui/AGENTS.md @@ -12,6 +12,21 @@ > - [`ai-sdk-5`](../skills/ai-sdk-5/SKILL.md) - UIMessage, sendMessage > - [`playwright`](../skills/playwright/SKILL.md) - Page Object Model, selectors +### Auto-invoke Skills + +When performing these actions, ALWAYS invoke the corresponding skill FIRST: + +| Action | Skill | +|--------|-------| +| App Router / Server Actions | `nextjs-15` | +| Writing E2E/Playwright tests | `prowler-test-ui` | +| Creating/modifying React components | `prowler-ui` | +| Working with Tailwind classes | `tailwind-4` | +| Creating Zod schemas | `zod-4` | +| Using Zustand stores | `zustand-5` | + +--- + ## CRITICAL RULES - NON-NEGOTIABLE ### React From be0c0ddb6badfebaa89aaa28175df3ca279f1622 Mon Sep 17 00:00:00 2001 From: Alan Buscaglia Date: Mon, 12 Jan 2026 20:32:11 +0100 Subject: [PATCH 04/19] fix(skills): stable ordering in auto-invoke sync --- AGENTS.md | 31 ++++++++- api/AGENTS.md | 6 +- prowler/AGENTS.md | 9 ++- skills/skill-sync/assets/sync.sh | 110 +++++++++++++++++++++++++++---- ui/AGENTS.md | 12 +++- 5 files changed, 146 insertions(+), 22 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 6dbb7a6621..f3814bdca5 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -47,10 +47,37 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST: | Action | Skill | |--------|-------| -| Writing documentation | `prowler-docs` | +| Adding new providers | `prowler-provider` | +| Adding services to existing providers | `prowler-provider` | +| After creating/modifying a skill | `skill-sync` | +| App Router / Server Actions | `nextjs-15` | +| Building AI chat features | `ai-sdk-5` | +| Creating Zod schemas | `zod-4` | | Creating a PR | `prowler-pr` | +| Creating new checks | `prowler-sdk-check` | | Creating new skills | `skill-creator` | -| After creating/modifying a skill | `skill-sync` | +| Creating/modifying Prowler UI components | `prowler-ui` | +| Creating/modifying models, views, serializers | `prowler-api` | +| Creating/updating compliance frameworks | `prowler-compliance` | +| General Prowler development questions | `prowler` | +| Generic DRF patterns | `django-drf` | +| Mapping checks to compliance controls | `prowler-compliance` | +| Mocking AWS with moto in tests | `prowler-test-sdk` | +| Testing RLS tenant isolation | `prowler-test-api` | +| Updating existing checks and metadata | `prowler-sdk-check` | +| Using Zustand stores | `zustand-5` | +| Working on MCP server tools | `prowler-mcp` | +| Working on Prowler UI structure (actions/adapters/types/hooks) | `prowler-ui` | +| Working with Prowler UI test helpers/pages | `prowler-test-ui` | +| Working with Tailwind classes | `tailwind-4` | +| Writing Playwright E2E tests | `playwright` | +| Writing Prowler API tests | `prowler-test-api` | +| Writing Prowler SDK tests | `prowler-test-sdk` | +| Writing Prowler UI E2E tests | `prowler-test-ui` | +| Writing Python tests with pytest | `pytest` | +| Writing React components | `react-19` | +| Writing TypeScript types/interfaces | `typescript` | +| Writing documentation | `prowler-docs` | --- diff --git a/api/AGENTS.md b/api/AGENTS.md index cd69c16340..f43e0dcbb3 100644 --- a/api/AGENTS.md +++ b/api/AGENTS.md @@ -12,9 +12,11 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST: | Action | Skill | |--------|-------| -| Generic DRF patterns | `django-drf` | | Creating/modifying models, views, serializers | `prowler-api` | -| Writing API tests | `prowler-test-api` | +| Generic DRF patterns | `django-drf` | +| Testing RLS tenant isolation | `prowler-test-api` | +| Writing Prowler API tests | `prowler-test-api` | +| Writing Python tests with pytest | `pytest` | --- diff --git a/prowler/AGENTS.md b/prowler/AGENTS.md index 5f8c3f4587..85217c9c8e 100644 --- a/prowler/AGENTS.md +++ b/prowler/AGENTS.md @@ -13,10 +13,15 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST: | Action | Skill | |--------|-------| -| Adding compliance frameworks | `prowler-compliance` | | Adding new providers | `prowler-provider` | +| Adding services to existing providers | `prowler-provider` | | Creating new checks | `prowler-sdk-check` | -| Writing SDK tests | `prowler-test-sdk` | +| Creating/updating compliance frameworks | `prowler-compliance` | +| Mapping checks to compliance controls | `prowler-compliance` | +| Mocking AWS with moto in tests | `prowler-test-sdk` | +| Updating existing checks and metadata | `prowler-sdk-check` | +| Writing Prowler SDK tests | `prowler-test-sdk` | +| Writing Python tests with pytest | `pytest` | --- diff --git a/skills/skill-sync/assets/sync.sh b/skills/skill-sync/assets/sync.sh index 3cee870514..f2e14a3dc4 100755 --- a/skills/skill-sync/assets/sync.sh +++ b/skills/skill-sync/assets/sync.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Sync skill metadata to AGENTS.md Auto-invoke sections # Usage: ./sync.sh [--dry-run] [--scope ] @@ -85,18 +85,71 @@ extract_field() { } # Extract nested metadata field +# +# Supports either: +# auto_invoke: "Single Action" +# or: +# auto_invoke: +# - "Action A" +# - "Action B" +# +# For list values, this returns a pipe-delimited string: "Action A|Action B" extract_metadata() { local file="$1" local field="$2" + awk -v field="$field" ' + function trim(s) { + sub(/^[[:space:]]+/, "", s) + sub(/[[:space:]]+$/, "", s) + return s + } + /^---$/ { in_frontmatter = !in_frontmatter; next } + in_frontmatter && /^metadata:/ { in_metadata = 1; next } in_frontmatter && in_metadata && /^[a-z]/ && !/^[[:space:]]/ { in_metadata = 0 } + in_frontmatter && in_metadata && $1 == field":" { + # Remove "field:" prefix sub(/^[^:]+:[[:space:]]*/, "") - gsub(/^["'\'']|["'\'']$/, "") - gsub(/^\[|\]$/, "") # Remove array brackets - print + + # Single-line scalar: auto_invoke: "Action" + if ($0 != "") { + v = $0 + gsub(/^["'\'']|["'\'']$/, "", v) + gsub(/^\[|\]$/, "", v) # legacy: allow inline [a, b] + print trim(v) + exit + } + + # Multi-line list: + # auto_invoke: + # - "Action A" + # - "Action B" + out = "" + while (getline) { + # Stop when leaving metadata block + if (!in_frontmatter) break + if (!in_metadata) break + if ($0 ~ /^[a-z]/ && $0 !~ /^[[:space:]]/) break + + # On multi-line list, only accept "- item" lines. Anything else ends the list. + line = $0 + if (line ~ /^[[:space:]]*-[[:space:]]*/) { + sub(/^[[:space:]]*-[[:space:]]*/, "", line) + line = trim(line) + gsub(/^["'\'']|["'\'']$/, "", line) + if (line != "") { + if (out == "") out = line + else out = out "|" line + } + } else { + break + } + } + + if (out != "") print out exit } ' "$file" @@ -109,12 +162,20 @@ echo "" # Collect skills by scope declare -A SCOPE_SKILLS # scope -> "skill1:action1|skill2:action2|..." -for skill_file in "$SKILLS_DIR"/*/SKILL.md; do +# Deterministic iteration order (stable diffs) +# Note: macOS ships BSD find; avoid GNU-only flags. +while IFS= read -r skill_file; do [ -f "$skill_file" ] || continue skill_name=$(extract_field "$skill_file" "name") scope_raw=$(extract_metadata "$skill_file" "scope") - auto_invoke=$(extract_metadata "$skill_file" "auto_invoke") + + auto_invoke_raw=$(extract_metadata "$skill_file" "auto_invoke") + # extract_metadata() returns: + # - single action: "Action" + # - multiple actions: "Action A|Action B" (pipe-delimited) + # But SCOPE_SKILLS also uses '|' to separate entries, so we protect it. + auto_invoke=${auto_invoke_raw//|/;;} # Skip if no scope or auto_invoke defined [ -z "$scope_raw" ] || [ -z "$auto_invoke" ] && continue @@ -136,10 +197,16 @@ for skill_file in "$SKILLS_DIR"/*/SKILL.md; do SCOPE_SKILLS[$scope]="${SCOPE_SKILLS[$scope]}|$skill_name:$auto_invoke" fi done -done +done < <(find "$SKILLS_DIR" -mindepth 2 -maxdepth 2 -name SKILL.md -print | sort) # Generate Auto-invoke section for each scope -for scope in "${!SCOPE_SKILLS[@]}"; do +# Deterministic scope order (stable diffs) +scopes_sorted=() +while IFS= read -r scope; do + scopes_sorted+=("$scope") +done < <(printf "%s\n" "${!SCOPE_SKILLS[@]}" | sort) + +for scope in "${scopes_sorted[@]}"; do agents_path=$(get_agents_path "$scope") if [ -z "$agents_path" ] || [ ! -f "$agents_path" ]; then @@ -157,13 +224,29 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST: | Action | Skill | |--------|-------|" + # Expand into sortable rows: "actionskill" + rows=() + IFS='|' read -ra skill_entries <<< "${SCOPE_SKILLS[$scope]}" for entry in "${skill_entries[@]}"; do skill_name="${entry%%:*}" - action="${entry#*:}" + actions_raw="${entry#*:}" + + actions_raw=${actions_raw//;;/|} + IFS='|' read -ra actions <<< "$actions_raw" + for action in "${actions[@]}"; do + action="$(echo "$action" | sed 's/^[[:space:]]*//; s/[[:space:]]*$//')" + [ -z "$action" ] && continue + rows+=("$action $skill_name") + done + done + + # Deterministic row order: Action then Skill + while IFS=$'\t' read -r action skill_name; do + [ -z "$action" ] && continue auto_invoke_section="$auto_invoke_section | $action | \`$skill_name\` |" - done + done < <(printf "%s\n" "${rows[@]}" | LC_ALL=C sort -t $'\t' -k1,1 -k2,2) if $DRY_RUN; then echo -e "${YELLOW}[DRY RUN] Would update $agents_path with:${NC}" @@ -216,17 +299,18 @@ echo -e "${GREEN}Done!${NC}" echo "" echo -e "${BLUE}Skills missing sync metadata:${NC}" missing=0 -for skill_file in "$SKILLS_DIR"/*/SKILL.md; do +while IFS= read -r skill_file; do [ -f "$skill_file" ] || continue skill_name=$(extract_field "$skill_file" "name") scope_raw=$(extract_metadata "$skill_file" "scope") - auto_invoke=$(extract_metadata "$skill_file" "auto_invoke") + auto_invoke_raw=$(extract_metadata "$skill_file" "auto_invoke") + auto_invoke=${auto_invoke_raw//|/;;} if [ -z "$scope_raw" ] || [ -z "$auto_invoke" ]; then echo -e " ${YELLOW}$skill_name${NC} - missing: ${scope_raw:+}${scope_raw:-scope} ${auto_invoke:+}${auto_invoke:-auto_invoke}" missing=$((missing + 1)) fi -done +done < <(find "$SKILLS_DIR" -mindepth 2 -maxdepth 2 -name SKILL.md -print | sort) if [ $missing -eq 0 ]; then echo -e " ${GREEN}All skills have sync metadata${NC}" diff --git a/ui/AGENTS.md b/ui/AGENTS.md index 1976bea43b..e8b2377944 100644 --- a/ui/AGENTS.md +++ b/ui/AGENTS.md @@ -19,11 +19,17 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST: | Action | Skill | |--------|-------| | App Router / Server Actions | `nextjs-15` | -| Writing E2E/Playwright tests | `prowler-test-ui` | -| Creating/modifying React components | `prowler-ui` | -| Working with Tailwind classes | `tailwind-4` | +| Building AI chat features | `ai-sdk-5` | | Creating Zod schemas | `zod-4` | +| Creating/modifying Prowler UI components | `prowler-ui` | | Using Zustand stores | `zustand-5` | +| Working on Prowler UI structure (actions/adapters/types/hooks) | `prowler-ui` | +| Working with Prowler UI test helpers/pages | `prowler-test-ui` | +| Working with Tailwind classes | `tailwind-4` | +| Writing Playwright E2E tests | `playwright` | +| Writing Prowler UI E2E tests | `prowler-test-ui` | +| Writing React components | `react-19` | +| Writing TypeScript types/interfaces | `typescript` | --- From 7f84d777bcace8654f7f25b96a59e813d431e782 Mon Sep 17 00:00:00 2001 From: Alan Buscaglia Date: Mon, 12 Jan 2026 20:36:58 +0100 Subject: [PATCH 05/19] test(skills): cover multi-action auto-invoke sync --- skills/skill-sync/assets/sync_test.sh | 84 ++++++++++++++++++++++++++- 1 file changed, 82 insertions(+), 2 deletions(-) diff --git a/skills/skill-sync/assets/sync_test.sh b/skills/skill-sync/assets/sync_test.sh index b313273c4b..198056e539 100755 --- a/skills/skill-sync/assets/sync_test.sh +++ b/skills/skill-sync/assets/sync_test.sh @@ -370,6 +370,85 @@ test_generate_includes_action_text() { "Should include auto_invoke action text" } +test_generate_splits_multi_action_auto_invoke_list() { + # Change UI skill to use list auto_invoke (two actions) + cat > "$TEST_DIR/skills/mock-ui-skill/SKILL.md" << 'EOF' +--- +name: mock-ui-skill +description: Mock UI skill with multi-action auto_invoke list. +license: Apache-2.0 +metadata: + author: test + version: "1.0" + scope: [ui] + auto_invoke: + - "Action B" + - "Action A" +allowed-tools: Read +--- +EOF + + run_sync > /dev/null + + # Both actions should produce rows + assert_file_contains "$TEST_DIR/ui/AGENTS.md" "| Action A | \`mock-ui-skill\` |" \ + "Should create row for Action A" && \ + assert_file_contains "$TEST_DIR/ui/AGENTS.md" "| Action B | \`mock-ui-skill\` |" \ + "Should create row for Action B" +} + +test_generate_orders_rows_by_action_then_skill() { + # Two skills, intentionally out-of-order actions, same scope + cat > "$TEST_DIR/skills/mock-ui-skill/SKILL.md" << 'EOF' +--- +name: mock-ui-skill +description: Mock UI skill. +license: Apache-2.0 +metadata: + author: test + version: "1.0" + scope: [ui] + auto_invoke: + - "Z action" + - "A action" +allowed-tools: Read +--- +EOF + + mkdir -p "$TEST_DIR/skills/mock-ui-skill-2" + cat > "$TEST_DIR/skills/mock-ui-skill-2/SKILL.md" << 'EOF' +--- +name: mock-ui-skill-2 +description: Second UI skill. +license: Apache-2.0 +metadata: + author: test + version: "1.0" + scope: [ui] + auto_invoke: "A action" +allowed-tools: Read +--- +EOF + + run_sync > /dev/null + + # Verify order within the table is: "A action" rows first, then "Z action" + local table_segment + table_segment=$(awk ' + /^\| Action \| Skill \|/ { in_table=1 } + in_table && /^---$/ { next } + in_table && /^\|/ { print } + in_table && !/^\|/ { exit } + ' "$TEST_DIR/ui/AGENTS.md") + + local first_a_index first_z_index + first_a_index=$(echo "$table_segment" | awk '/\| A action \|/ { print NR; exit }') + first_z_index=$(echo "$table_segment" | awk '/\| Z action \|/ { print NR; exit }') + + # Both must exist and A must come before Z + [ -n "$first_a_index" ] && [ -n "$first_z_index" ] && [ "$first_a_index" -lt "$first_z_index" ] +} + # ============================================================================= # TESTS: AGENTS.MD UPDATE # ============================================================================= @@ -396,8 +475,9 @@ test_update_replaces_existing_section() { # First run creates section run_sync > /dev/null - # Modify a skill's auto_invoke - sed -i 's/Testing UI components/Modified UI action/' "$TEST_DIR/skills/mock-ui-skill/SKILL.md" + # Modify a skill's auto_invoke (portable: BSD/GNU sed) + # macOS/BSD sed needs -i '' (separate arg). GNU sed accepts it too. + sed -i '' 's/Testing UI components/Modified UI action/' "$TEST_DIR/skills/mock-ui-skill/SKILL.md" # Second run should replace run_sync > /dev/null From 21174ad5935037ac50d3d6305085a976dfce65c9 Mon Sep 17 00:00:00 2001 From: Alan Buscaglia Date: Mon, 12 Jan 2026 20:42:33 +0100 Subject: [PATCH 06/19] feat(skills): expand scopes and auto-invoke metadata --- skills/ai-sdk-5/SKILL.md | 4 +++- skills/django-drf/SKILL.md | 4 ++-- skills/nextjs-15/SKILL.md | 4 ++-- skills/playwright/SKILL.md | 4 +++- skills/prowler-api/SKILL.md | 6 +++--- skills/prowler-compliance/SKILL.md | 8 +++++--- skills/prowler-mcp/SKILL.md | 5 ++++- skills/prowler-provider/SKILL.md | 8 +++++--- skills/prowler-sdk-check/SKILL.md | 8 +++++--- skills/prowler-test-api/SKILL.md | 10 ++++++---- skills/prowler-test-sdk/SKILL.md | 8 +++++--- skills/prowler-test-ui/SKILL.md | 8 +++++--- skills/prowler-ui/SKILL.md | 8 +++++--- skills/prowler/SKILL.md | 2 ++ skills/pytest/SKILL.md | 4 +++- skills/react-19/SKILL.md | 4 +++- skills/skill-sync/SKILL.md | 13 +++++++++++-- skills/tailwind-4/SKILL.md | 4 ++-- skills/typescript/SKILL.md | 4 +++- skills/zod-4/SKILL.md | 4 ++-- skills/zustand-5/SKILL.md | 4 ++-- 21 files changed, 81 insertions(+), 43 deletions(-) diff --git a/skills/ai-sdk-5/SKILL.md b/skills/ai-sdk-5/SKILL.md index 3b1c979f73..e365546576 100644 --- a/skills/ai-sdk-5/SKILL.md +++ b/skills/ai-sdk-5/SKILL.md @@ -2,11 +2,13 @@ name: ai-sdk-5 description: > Vercel AI SDK 5 patterns. - Trigger: When building AI chat features - breaking changes from v4. + Trigger: When building AI features with AI SDK v5 (chat, streaming, tools/function calling, UIMessage parts), including migration from v4. license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [root, ui] + auto_invoke: "Building AI chat features" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/django-drf/SKILL.md b/skills/django-drf/SKILL.md index 25d1a39692..df740a3e4d 100644 --- a/skills/django-drf/SKILL.md +++ b/skills/django-drf/SKILL.md @@ -2,12 +2,12 @@ name: django-drf description: > Django REST Framework patterns. - Trigger: When building REST APIs with Django - ViewSets, Serializers, Filters. + Trigger: When implementing generic DRF APIs (ViewSets, serializers, routers, permissions, filtersets). For Prowler API specifics (RLS/JSON:API), also use prowler-api. license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" - scope: [api] + scope: [root, api] auto_invoke: "Generic DRF patterns" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/nextjs-15/SKILL.md b/skills/nextjs-15/SKILL.md index b529f30fd0..793c1db2e1 100644 --- a/skills/nextjs-15/SKILL.md +++ b/skills/nextjs-15/SKILL.md @@ -2,12 +2,12 @@ name: nextjs-15 description: > Next.js 15 App Router patterns. - Trigger: When working with Next.js - routing, Server Actions, data fetching. + Trigger: When working in Next.js App Router (app/), Server Components vs Client Components, Server Actions, Route Handlers, caching/revalidation, and streaming/Suspense. license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" - scope: [ui] + scope: [root, ui] auto_invoke: "App Router / Server Actions" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/playwright/SKILL.md b/skills/playwright/SKILL.md index 34e71aadd1..d9009d6f4a 100644 --- a/skills/playwright/SKILL.md +++ b/skills/playwright/SKILL.md @@ -2,11 +2,13 @@ name: playwright description: > Playwright E2E testing patterns. - Trigger: When writing E2E tests - Page Objects, selectors, MCP workflow. + Trigger: When writing Playwright E2E tests (Page Object Model, selectors, MCP exploration workflow). For Prowler-specific UI conventions under ui/tests, also use prowler-test-ui. license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [root, ui] + auto_invoke: "Writing Playwright E2E tests" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-api/SKILL.md b/skills/prowler-api/SKILL.md index c779b08c0f..a0202bd783 100644 --- a/skills/prowler-api/SKILL.md +++ b/skills/prowler-api/SKILL.md @@ -1,13 +1,13 @@ --- name: prowler-api description: > - Prowler API patterns: RLS, RBAC, providers, Celery tasks. - Trigger: When working on api/ - models, serializers, views, filters, tasks. + Prowler API patterns: JSON:API, RLS, RBAC, providers, Celery tasks. + Trigger: When working in api/ on models/serializers/viewsets/filters/tasks involving tenant isolation (RLS), RBAC, JSON:API, or provider lifecycle. license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" - scope: [api] + scope: [root, api] auto_invoke: "Creating/modifying models, views, serializers" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-compliance/SKILL.md b/skills/prowler-compliance/SKILL.md index c06ca5f2fa..8b98045cdb 100644 --- a/skills/prowler-compliance/SKILL.md +++ b/skills/prowler-compliance/SKILL.md @@ -2,13 +2,15 @@ name: prowler-compliance description: > Creates and manages Prowler compliance frameworks. - Trigger: When working with compliance frameworks (CIS, NIST, PCI-DSS, SOC2, GDPR). + Trigger: When creating or updating compliance frameworks (adding requirements, mapping checks to controls) under prowler/compliance/{provider}/. license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" - scope: [sdk] - auto_invoke: "Adding compliance frameworks" + scope: [root, sdk] + auto_invoke: + - "Creating/updating compliance frameworks" + - "Mapping checks to compliance controls" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-mcp/SKILL.md b/skills/prowler-mcp/SKILL.md index 4bf3eec7a0..b56a692e04 100644 --- a/skills/prowler-mcp/SKILL.md +++ b/skills/prowler-mcp/SKILL.md @@ -2,11 +2,14 @@ name: prowler-mcp description: > Creates MCP tools for Prowler MCP Server. Covers BaseTool pattern, model design, - and API client usage. Use when working on mcp_server/ directory. + and API client usage. + Trigger: When working in mcp_server/ on tools (BaseTool), models (MinimalSerializerMixin/from_api_response), or API client patterns. license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [root] + auto_invoke: "Working on MCP server tools" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-provider/SKILL.md b/skills/prowler-provider/SKILL.md index c41c6e5315..3c8a0f5433 100644 --- a/skills/prowler-provider/SKILL.md +++ b/skills/prowler-provider/SKILL.md @@ -2,13 +2,15 @@ name: prowler-provider description: > Creates new Prowler cloud providers or adds services to existing providers. - Trigger: When adding a new cloud provider or service to Prowler SDK. + Trigger: When extending Prowler SDK provider architecture (adding a new provider or a new service to an existing provider). license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" - scope: [sdk] - auto_invoke: "Adding new providers" + scope: [root, sdk] + auto_invoke: + - "Adding new providers" + - "Adding services to existing providers" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-sdk-check/SKILL.md b/skills/prowler-sdk-check/SKILL.md index 0fdc3a822a..c9319f705c 100644 --- a/skills/prowler-sdk-check/SKILL.md +++ b/skills/prowler-sdk-check/SKILL.md @@ -2,13 +2,15 @@ name: prowler-sdk-check description: > Creates Prowler security checks following SDK architecture patterns. - Trigger: When user asks to create a new security check for any provider (AWS, Azure, GCP, K8s, GitHub, etc.) + Trigger: When creating or updating a Prowler SDK security check (implementation + metadata) for any provider (AWS, Azure, GCP, K8s, GitHub, etc.). license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" - scope: [sdk] - auto_invoke: "Creating new checks" + scope: [root, sdk] + auto_invoke: + - "Creating new checks" + - "Updating existing checks and metadata" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-test-api/SKILL.md b/skills/prowler-test-api/SKILL.md index 69d9613aab..edda0b9c6d 100644 --- a/skills/prowler-test-api/SKILL.md +++ b/skills/prowler-test-api/SKILL.md @@ -1,14 +1,16 @@ --- name: prowler-test-api description: > - Testing patterns for Prowler API: ViewSets, Celery tasks, RLS isolation, RBAC. - Trigger: When writing tests for api/ - viewsets, serializers, tasks, models. + Testing patterns for Prowler API: JSON:API, Celery tasks, RLS isolation, RBAC. + Trigger: When writing tests for api/ (JSON:API requests/assertions, cross-tenant isolation, RBAC, Celery tasks, viewsets/serializers). license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" - scope: [api] - auto_invoke: "Writing API tests" + scope: [root, api] + auto_invoke: + - "Writing Prowler API tests" + - "Testing RLS tenant isolation" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-test-sdk/SKILL.md b/skills/prowler-test-sdk/SKILL.md index 02df667205..c729bb6896 100644 --- a/skills/prowler-test-sdk/SKILL.md +++ b/skills/prowler-test-sdk/SKILL.md @@ -2,13 +2,15 @@ name: prowler-test-sdk description: > Testing patterns for Prowler SDK (Python). - Trigger: When writing tests for checks, services, or providers. + Trigger: When writing tests for the Prowler SDK (checks/services/providers), including provider-specific mocking rules (moto for AWS only). license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" - scope: [sdk] - auto_invoke: "Writing SDK tests" + scope: [root, sdk] + auto_invoke: + - "Writing Prowler SDK tests" + - "Mocking AWS with moto in tests" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-test-ui/SKILL.md b/skills/prowler-test-ui/SKILL.md index 761723956a..67dab1194f 100644 --- a/skills/prowler-test-ui/SKILL.md +++ b/skills/prowler-test-ui/SKILL.md @@ -2,13 +2,15 @@ name: prowler-test-ui description: > E2E testing patterns for Prowler UI (Playwright). - Trigger: When writing E2E tests for the Next.js frontend. + Trigger: When writing Playwright E2E tests under ui/tests in the Prowler UI (Prowler-specific base page/helpers, tags, flows). license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" - scope: [ui] - auto_invoke: "Writing E2E/Playwright tests" + scope: [root, ui] + auto_invoke: + - "Writing Prowler UI E2E tests" + - "Working with Prowler UI test helpers/pages" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler-ui/SKILL.md b/skills/prowler-ui/SKILL.md index a400d07f0f..b1e68f8170 100644 --- a/skills/prowler-ui/SKILL.md +++ b/skills/prowler-ui/SKILL.md @@ -2,13 +2,15 @@ name: prowler-ui description: > Prowler UI-specific patterns. For generic patterns, see: typescript, react-19, nextjs-15, tailwind-4. - Trigger: When working on ui/ directory - components, pages, actions, hooks. + Trigger: When working inside ui/ on Prowler-specific conventions (shadcn vs HeroUI legacy, folder placement, actions/adapters, shared types/hooks/lib). license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" - scope: [ui] - auto_invoke: "Creating/modifying React components" + scope: [root, ui] + auto_invoke: + - "Creating/modifying Prowler UI components" + - "Working on Prowler UI structure (actions/adapters/types/hooks)" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler/SKILL.md b/skills/prowler/SKILL.md index bb520fe42b..4ea521260b 100644 --- a/skills/prowler/SKILL.md +++ b/skills/prowler/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [root] + auto_invoke: "General Prowler development questions" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/pytest/SKILL.md b/skills/pytest/SKILL.md index bb2328cc41..35f4e04b40 100644 --- a/skills/pytest/SKILL.md +++ b/skills/pytest/SKILL.md @@ -2,11 +2,13 @@ name: pytest description: > Pytest testing patterns for Python. - Trigger: When writing Python tests - fixtures, mocking, markers. + Trigger: When writing or refactoring pytest tests (fixtures, mocking, parametrize, markers). For Prowler-specific API/SDK testing conventions, also use prowler-test-api or prowler-test-sdk. license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [root, sdk, api] + auto_invoke: "Writing Python tests with pytest" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/react-19/SKILL.md b/skills/react-19/SKILL.md index 53ab6b0aea..519ae9f15e 100644 --- a/skills/react-19/SKILL.md +++ b/skills/react-19/SKILL.md @@ -2,11 +2,13 @@ name: react-19 description: > React 19 patterns with React Compiler. - Trigger: When writing React components - no useMemo/useCallback needed. + Trigger: When writing React 19 components/hooks in .tsx (React Compiler rules, hook patterns, refs as props). If using Next.js App Router/Server Actions, also use nextjs-15. license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [root, ui] + auto_invoke: "Writing React components" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/skill-sync/SKILL.md b/skills/skill-sync/SKILL.md index e307c7d3fd..085d9656eb 100644 --- a/skills/skill-sync/SKILL.md +++ b/skills/skill-sync/SKILL.md @@ -18,14 +18,23 @@ Keeps AGENTS.md Auto-invoke sections in sync with skill metadata. When you creat ## Required Skill Metadata -Each skill that should appear in Auto-invoke sections needs these fields in `metadata`: +Each skill that should appear in Auto-invoke sections needs these fields in `metadata`. + +`auto_invoke` can be either a single string **or** a list of actions: ```yaml metadata: author: prowler-cloud version: "1.0" scope: [ui] # Which AGENTS.md: ui, api, sdk, root - auto_invoke: "Creating/modifying components" # When to invoke (action description) + + # Option A: single action + auto_invoke: "Creating/modifying components" + + # Option B: multiple actions + # auto_invoke: + # - "Creating/modifying components" + # - "Refactoring component folder placement" ``` ### Scope Values diff --git a/skills/tailwind-4/SKILL.md b/skills/tailwind-4/SKILL.md index 44d03319e3..51f57576af 100644 --- a/skills/tailwind-4/SKILL.md +++ b/skills/tailwind-4/SKILL.md @@ -2,12 +2,12 @@ name: tailwind-4 description: > Tailwind CSS 4 patterns and best practices. - Trigger: When styling with Tailwind - cn(), theme variables, no var() in className. + Trigger: When styling with Tailwind (className, variants, cn()), especially when dynamic styling or CSS variables are involved (no var() in className). license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" - scope: [ui] + scope: [root, ui] auto_invoke: "Working with Tailwind classes" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/typescript/SKILL.md b/skills/typescript/SKILL.md index 6e06effdc6..046465a75b 100644 --- a/skills/typescript/SKILL.md +++ b/skills/typescript/SKILL.md @@ -2,11 +2,13 @@ name: typescript description: > TypeScript strict patterns and best practices. - Trigger: When writing TypeScript code - types, interfaces, generics. + Trigger: When implementing or refactoring TypeScript in .ts/.tsx (types, interfaces, generics, const maps, type guards, removing any, tightening unknown). license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [root, ui] + auto_invoke: "Writing TypeScript types/interfaces" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/zod-4/SKILL.md b/skills/zod-4/SKILL.md index 13c4fc6553..730a840d64 100644 --- a/skills/zod-4/SKILL.md +++ b/skills/zod-4/SKILL.md @@ -2,12 +2,12 @@ name: zod-4 description: > Zod 4 schema validation patterns. - Trigger: When using Zod for validation - breaking changes from v3. + Trigger: When creating or updating Zod v4 schemas for validation/parsing (forms, request payloads, adapters), including v3 -> v4 migration patterns. license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" - scope: [ui] + scope: [root, ui] auto_invoke: "Creating Zod schemas" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/zustand-5/SKILL.md b/skills/zustand-5/SKILL.md index 482a964c36..88defdcbd0 100644 --- a/skills/zustand-5/SKILL.md +++ b/skills/zustand-5/SKILL.md @@ -2,12 +2,12 @@ name: zustand-5 description: > Zustand 5 state management patterns. - Trigger: When managing React state with Zustand. + Trigger: When implementing client-side state with Zustand (stores, selectors, persist middleware, slices). license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" - scope: [ui] + scope: [root, ui] auto_invoke: "Using Zustand stores" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- From f14a5c83b0eeb176ceee615e97ea2e1a18dcdcf2 Mon Sep 17 00:00:00 2001 From: Alan Buscaglia Date: Mon, 12 Jan 2026 21:03:36 +0100 Subject: [PATCH 07/19] feat(skills): add prowler-ci skill for PR CI gates --- AGENTS.md | 14 +++++++++- skills/prowler-ci/SKILL.md | 52 ++++++++++++++++++++++++++++++++++++++ skills/prowler-pr/SKILL.md | 9 +++++-- skills/prowler/SKILL.md | 2 +- skills/skill-sync/SKILL.md | 7 +++-- 5 files changed, 78 insertions(+), 6 deletions(-) create mode 100644 skills/prowler-ci/SKILL.md diff --git a/AGENTS.md b/AGENTS.md index f3814bdca5..1367a5dc5d 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -37,6 +37,7 @@ Use these skills for detailed patterns on-demand: | `prowler-test-ui` | E2E testing (Playwright) | [SKILL.md](skills/prowler-test-ui/SKILL.md) | | `prowler-compliance` | Compliance framework structure | [SKILL.md](skills/prowler-compliance/SKILL.md) | | `prowler-provider` | Add new cloud providers | [SKILL.md](skills/prowler-provider/SKILL.md) | +| `prowler-ci` | CI checks and PR gates (GitHub Actions) | [SKILL.md](skills/prowler-ci/SKILL.md) | | `prowler-pr` | Pull request conventions | [SKILL.md](skills/prowler-pr/SKILL.md) | | `prowler-docs` | Documentation style guide | [SKILL.md](skills/prowler-docs/SKILL.md) | | `skill-creator` | Create new AI agent skills | [SKILL.md](skills/skill-creator/SKILL.md) | @@ -52,18 +53,29 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST: | After creating/modifying a skill | `skill-sync` | | App Router / Server Actions | `nextjs-15` | | Building AI chat features | `ai-sdk-5` | +| Create a PR with gh pr create | `prowler-pr` | | Creating Zod schemas | `zod-4` | -| Creating a PR | `prowler-pr` | | Creating new checks | `prowler-sdk-check` | | Creating new skills | `skill-creator` | | Creating/modifying Prowler UI components | `prowler-ui` | | Creating/modifying models, views, serializers | `prowler-api` | | Creating/updating compliance frameworks | `prowler-compliance` | +| Debug why a GitHub Actions job is failing | `prowler-ci` | +| Fill .github/pull_request_template.md (Context/Description/Steps to review/Checklist) | `prowler-pr` | | General Prowler development questions | `prowler` | | Generic DRF patterns | `django-drf` | +| Inspect PR CI checks and gates (.github/workflows/*) | `prowler-ci` | +| Inspect PR CI workflows (.github/workflows/*): conventional-commit, pr-check-changelog, pr-conflict-checker, labeler | `prowler-pr` | | Mapping checks to compliance controls | `prowler-compliance` | | Mocking AWS with moto in tests | `prowler-test-sdk` | +| Regenerate AGENTS.md Auto-invoke tables (sync.sh) | `skill-sync` | +| Review PR requirements: template, title conventions, changelog gate | `prowler-pr` | | Testing RLS tenant isolation | `prowler-test-api` | +| Troubleshoot why a skill is missing from AGENTS.md auto-invoke | `skill-sync` | +| Understand CODEOWNERS/labeler-based automation | `prowler-ci` | +| Understand PR title conventional-commit validation | `prowler-ci` | +| Understand changelog gate and no-changelog label behavior | `prowler-ci` | +| Understand review ownership with CODEOWNERS | `prowler-pr` | | Updating existing checks and metadata | `prowler-sdk-check` | | Using Zustand stores | `zustand-5` | | Working on MCP server tools | `prowler-mcp` | diff --git a/skills/prowler-ci/SKILL.md b/skills/prowler-ci/SKILL.md new file mode 100644 index 0000000000..1bed87d74e --- /dev/null +++ b/skills/prowler-ci/SKILL.md @@ -0,0 +1,52 @@ +--- +name: prowler-ci +description: > + Helps with Prowler repository CI and PR gates (GitHub Actions workflows). + Trigger: When investigating CI checks failing on a PR, PR title validation, changelog gate/no-changelog label, + conflict marker checks, secret scanning, CODEOWNERS/labeler automation, or anything under .github/workflows. +license: Apache-2.0 +metadata: + author: prowler-cloud + version: "1.0" + scope: [root] + auto_invoke: + - "Inspect PR CI checks and gates (.github/workflows/*)" + - "Debug why a GitHub Actions job is failing" + - "Understand changelog gate and no-changelog label behavior" + - "Understand PR title conventional-commit validation" + - "Understand CODEOWNERS/labeler-based automation" +allowed-tools: Read, Edit, Write, Glob, Grep, Bash +--- + +## What this skill covers + +Use this skill whenever you are: + +- Reading or changing GitHub Actions workflows under `.github/workflows/` +- Explaining why a PR fails checks (title, changelog, conflict markers, secret scanning) +- Figuring out which workflows run for UI/API/SDK changes and why +- Diagnosing path-filtering behavior (why a workflow did/didn't run) + +## Quick map (where to look) + +- PR template: `.github/pull_request_template.md` +- PR title validation: `.github/workflows/conventional-commit.yml` +- Changelog gate: `.github/workflows/pr-check-changelog.yml` +- Conflict markers check: `.github/workflows/pr-conflict-checker.yml` +- Secret scanning: `.github/workflows/find-secrets.yml` +- Auto labels: `.github/workflows/labeler.yml` and `.github/labeler.yml` +- Review ownership: `.github/CODEOWNERS` + +## Debug checklist (PR failing checks) + +1. Identify which workflow/job is failing (name + file under `.github/workflows/`). +2. Check path filters: is the workflow supposed to run for your changed files? +3. If it's a title check: verify PR title matches Conventional Commits. +4. If it's changelog: verify the right `CHANGELOG.md` is updated OR apply `no-changelog` label. +5. If it's conflict checker: remove `<<<<<<<`, `=======`, `>>>>>>>` markers. +6. If it's secrets: remove credentials and rotate anything leaked. + +## Notes + +- Keep `prowler-pr` focused on *creating* PRs and filling the template. +- Use `prowler-ci` for *CI policies and gates* that apply to PRs. diff --git a/skills/prowler-pr/SKILL.md b/skills/prowler-pr/SKILL.md index 63bf83c051..c2d87716ad 100644 --- a/skills/prowler-pr/SKILL.md +++ b/skills/prowler-pr/SKILL.md @@ -2,13 +2,18 @@ name: prowler-pr description: > Creates Pull Requests for Prowler following the project template and conventions. - Trigger: When user asks to create a PR, submit changes, or open a pull request. + Trigger: When working on pull request requirements or creation (PR template sections, PR title Conventional Commits check, changelog gate/no-changelog label), or when inspecting PR-related GitHub workflows like conventional-commit.yml, pr-check-changelog.yml, pr-conflict-checker.yml, labeler.yml, or CODEOWNERS. license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" scope: [root] - auto_invoke: "Creating a PR" + auto_invoke: + - "Create a PR with gh pr create" + - "Review PR requirements: template, title conventions, changelog gate" + - "Fill .github/pull_request_template.md (Context/Description/Steps to review/Checklist)" + - "Inspect PR CI workflows (.github/workflows/*): conventional-commit, pr-check-changelog, pr-conflict-checker, labeler" + - "Understand review ownership with CODEOWNERS" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/prowler/SKILL.md b/skills/prowler/SKILL.md index 4ea521260b..a9294708fb 100644 --- a/skills/prowler/SKILL.md +++ b/skills/prowler/SKILL.md @@ -2,7 +2,7 @@ name: prowler description: > Main entry point for Prowler development - quick reference for all components. - Trigger: General Prowler development questions, project overview, component navigation. + Trigger: General Prowler development questions, project overview, component navigation (NOT PR CI gates or GitHub Actions workflows). license: Apache-2.0 metadata: author: prowler-cloud diff --git a/skills/skill-sync/SKILL.md b/skills/skill-sync/SKILL.md index 085d9656eb..4b136dd5b2 100644 --- a/skills/skill-sync/SKILL.md +++ b/skills/skill-sync/SKILL.md @@ -2,13 +2,16 @@ name: skill-sync description: > Syncs skill metadata to AGENTS.md Auto-invoke sections. - Trigger: After creating/modifying a skill, run sync to update AGENTS.md files. + Trigger: When updating skill metadata (metadata.scope/metadata.auto_invoke), regenerating Auto-invoke tables, or running ./skills/skill-sync/assets/sync.sh (including --dry-run/--scope). license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" scope: [root] - auto_invoke: "After creating/modifying a skill" + auto_invoke: + - "After creating/modifying a skill" + - "Regenerate AGENTS.md Auto-invoke tables (sync.sh)" + - "Troubleshoot why a skill is missing from AGENTS.md auto-invoke" allowed-tools: Read, Edit, Write, Glob, Grep, Bash --- From 83a192c02f4a43b712f44912e4e2fee6aa58856f Mon Sep 17 00:00:00 2001 From: pedrooot Date: Tue, 13 Jan 2026 10:24:09 +0100 Subject: [PATCH 08/19] feat(ai): improve compliance skills --- skills/prowler-compliance/SKILL.md | 486 ++++++++++++++++-- .../assets/cis_framework.json | 104 +++- .../assets/ens_framework.json | 128 +++++ .../assets/generic_framework.json | 103 ++++ .../assets/iso27001_framework.json | 91 ++++ .../assets/mitre_attack_framework.json | 142 +++++ .../assets/prowler_threatscore_framework.json | 189 +++++++ .../references/compliance-docs.md | 140 ++++- 8 files changed, 1299 insertions(+), 84 deletions(-) create mode 100644 skills/prowler-compliance/assets/ens_framework.json create mode 100644 skills/prowler-compliance/assets/generic_framework.json create mode 100644 skills/prowler-compliance/assets/iso27001_framework.json create mode 100644 skills/prowler-compliance/assets/mitre_attack_framework.json create mode 100644 skills/prowler-compliance/assets/prowler_threatscore_framework.json diff --git a/skills/prowler-compliance/SKILL.md b/skills/prowler-compliance/SKILL.md index 8b98045cdb..0a1a14541d 100644 --- a/skills/prowler-compliance/SKILL.md +++ b/skills/prowler-compliance/SKILL.md @@ -2,11 +2,11 @@ name: prowler-compliance description: > Creates and manages Prowler compliance frameworks. - Trigger: When creating or updating compliance frameworks (adding requirements, mapping checks to controls) under prowler/compliance/{provider}/. + Trigger: When working with compliance frameworks (CIS, NIST, PCI-DSS, SOC2, GDPR, ISO27001, ENS, MITRE ATT&CK). license: Apache-2.0 metadata: author: prowler-cloud - version: "1.0" + version: "1.1" scope: [root, sdk] auto_invoke: - "Creating/updating compliance frameworks" @@ -20,98 +20,472 @@ Use this skill when: - Creating a new compliance framework for any provider - Adding requirements to existing frameworks - Mapping checks to compliance controls +- Understanding compliance framework structures and attributes -## Compliance Framework Structure +## Compliance Framework Location -Frameworks are JSON files in: `prowler/compliance/{provider}/{framework}.json` +Frameworks are JSON files located in: `prowler/compliance/{provider}/{framework_name}_{provider}.json` + +**Supported Providers:** +- `aws` - Amazon Web Services +- `azure` - Microsoft Azure +- `gcp` - Google Cloud Platform +- `kubernetes` - Kubernetes +- `github` - GitHub +- `m365` - Microsoft 365 +- `alibabacloud` - Alibaba Cloud +- `oraclecloud` - Oracle Cloud +- `oci` - Oracle Cloud Infrastructure +- `nhn` - NHN Cloud +- `mongodbatlas` - MongoDB Atlas +- `iac` - Infrastructure as Code +- `llm` - Large Language Models + +## Base Framework Structure + +All compliance frameworks share this base structure: ```json { - "Framework": "CIS", - "Name": "CIS Amazon Web Services Foundations Benchmark v2.0.0", - "Version": "2.0", - "Provider": "AWS", - "Description": "The CIS Amazon Web Services Foundations Benchmark provides prescriptive guidance...", + "Framework": "FRAMEWORK_NAME", + "Name": "Full Framework Name with Version", + "Version": "X.X", + "Provider": "PROVIDER", + "Description": "Framework description...", "Requirements": [ { - "Id": "1.1", - "Name": "Requirement name", - "Description": "Detailed description of the requirement", - "Attributes": [ - { - "Section": "1. Identity and Access Management", - "Profile": "Level 1", - "AssessmentStatus": "Automated", - "Description": "Attribute description" - } - ], + "Id": "requirement_id", + "Description": "Requirement description", + "Name": "Optional requirement name", + "Attributes": [...], "Checks": ["check_name_1", "check_name_2"] } ] } ``` -## Supported Frameworks +## Framework-Specific Attribute Structures + +Each framework type has its own attribute model. Below are the exact structures used by Prowler: + +### CIS (Center for Internet Security) + +**Framework ID format:** `cis_{version}_{provider}` (e.g., `cis_5.0_aws`) + +```json +{ + "Id": "1.1", + "Description": "Maintain current contact details", + "Checks": ["account_maintain_current_contact_details"], + "Attributes": [ + { + "Section": "1 Identity and Access Management", + "SubSection": "Optional subsection", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "Detailed attribute description", + "RationaleStatement": "Why this control matters", + "ImpactStatement": "Impact of implementing this control", + "RemediationProcedure": "Steps to fix the issue", + "AuditProcedure": "Steps to verify compliance", + "AdditionalInformation": "Extra notes", + "DefaultValue": "Default configuration value", + "References": "https://docs.example.com/reference" + } + ] +} +``` + +**Profile values:** `Level 1`, `Level 2`, `E3 Level 1`, `E3 Level 2`, `E5 Level 1`, `E5 Level 2` +**AssessmentStatus values:** `Automated`, `Manual` + +--- + +### ISO 27001 + +**Framework ID format:** `iso27001_{year}_{provider}` (e.g., `iso27001_2022_aws`) + +```json +{ + "Id": "A.5.1", + "Description": "Policies for information security should be defined...", + "Name": "Policies for information security", + "Checks": ["securityhub_enabled"], + "Attributes": [ + { + "Category": "A.5 Organizational controls", + "Objetive_ID": "A.5.1", + "Objetive_Name": "Policies for information security", + "Check_Summary": "Summary of what is being checked" + } + ] +} +``` + +**Note:** `Objetive_ID` and `Objetive_Name` use this exact spelling (not "Objective"). + +--- + +### ENS (Esquema Nacional de Seguridad - Spain) + +**Framework ID format:** `ens_rd2022_{provider}` (e.g., `ens_rd2022_aws`) + +```json +{ + "Id": "op.acc.1.aws.iam.2", + "Description": "Proveedor de identidad centralizado", + "Checks": ["iam_check_saml_providers_sts"], + "Attributes": [ + { + "IdGrupoControl": "op.acc.1", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Detailed control description in Spanish", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": ["trazabilidad", "autenticidad"], + "ModoEjecucion": "automatico", + "Dependencias": [] + } + ] +} +``` + +**Nivel values:** `opcional`, `bajo`, `medio`, `alto` +**Tipo values:** `refuerzo`, `requisito`, `recomendacion`, `medida` +**Dimensiones values:** `confidencialidad`, `integridad`, `trazabilidad`, `autenticidad`, `disponibilidad` + +--- + +### MITRE ATT&CK + +**Framework ID format:** `mitre_attack_{provider}` (e.g., `mitre_attack_aws`) + +MITRE uses a different requirement structure: + +```json +{ + "Name": "Exploit Public-Facing Application", + "Id": "T1190", + "Tactics": ["Initial Access"], + "SubTechniques": [], + "Platforms": ["Containers", "IaaS", "Linux", "Network", "Windows", "macOS"], + "Description": "Adversaries may attempt to exploit a weakness...", + "TechniqueURL": "https://attack.mitre.org/techniques/T1190/", + "Checks": ["guardduty_is_enabled", "inspector2_is_enabled"], + "Attributes": [ + { + "AWSService": "Amazon GuardDuty", + "Category": "Detect", + "Value": "Minimal", + "Comment": "Explanation of how this service helps..." + } + ] +} +``` + +**For Azure:** Use `AzureService` instead of `AWSService` +**For GCP:** Use `GCPService` instead of `AWSService` +**Category values:** `Detect`, `Protect`, `Respond` +**Value values:** `Minimal`, `Partial`, `Significant` + +--- + +### NIST 800-53 + +**Framework ID format:** `nist_800_53_revision_{version}_{provider}` (e.g., `nist_800_53_revision_5_aws`) + +```json +{ + "Id": "ac_2_1", + "Name": "AC-2(1) Automated System Account Management", + "Description": "Support the management of system accounts...", + "Checks": ["iam_password_policy_minimum_length_14"], + "Attributes": [ + { + "ItemId": "ac_2_1", + "Section": "Access Control (AC)", + "SubSection": "Account Management (AC-2)", + "SubGroup": "AC-2(3) Disable Accounts", + "Service": "iam" + } + ] +} +``` + +--- + +### Generic Compliance (Fallback) + +For frameworks without specific attribute models: + +```json +{ + "Id": "requirement_id", + "Description": "Requirement description", + "Name": "Optional name", + "Checks": ["check_name"], + "Attributes": [ + { + "ItemId": "item_id", + "Section": "Section name", + "SubSection": "Subsection name", + "SubGroup": "Subgroup name", + "Service": "service_name", + "Type": "type" + } + ] +} +``` + +--- + +### AWS Well-Architected Framework + +**Framework ID format:** `aws_well_architected_framework_{pillar}_pillar_aws` + +```json +{ + "Id": "SEC01-BP01", + "Description": "Establish common guardrails...", + "Name": "Establish common guardrails", + "Checks": ["account_part_of_organizations"], + "Attributes": [ + { + "Name": "Establish common guardrails", + "WellArchitectedQuestionId": "securely-operate", + "WellArchitectedPracticeId": "sec_securely_operate_multi_accounts", + "Section": "Security", + "SubSection": "Security foundations", + "LevelOfRisk": "High", + "AssessmentMethod": "Automated", + "Description": "Detailed description", + "ImplementationGuidanceUrl": "https://docs.aws.amazon.com/..." + } + ] +} +``` + +--- + +### KISA ISMS-P (Korea) -**Industry standards:** -- CIS (Center for Internet Security) -- NIST 800-53, NIST CSF -- CISA +**Framework ID format:** `kisa_isms_p_{year}_{provider}` (e.g., `kisa_isms_p_2023_aws`) -**Regulatory compliance:** -- PCI-DSS -- HIPAA -- GDPR -- FedRAMP -- SOC2 +```json +{ + "Id": "1.1.1", + "Description": "Requirement description", + "Name": "Requirement name", + "Checks": ["check_name"], + "Attributes": [ + { + "Domain": "1. Management System", + "Subdomain": "1.1 Management System Establishment", + "Section": "1.1.1 Section Name", + "AuditChecklist": ["Checklist item 1", "Checklist item 2"], + "RelatedRegulations": ["Regulation 1"], + "AuditEvidence": ["Evidence type 1"], + "NonComplianceCases": ["Non-compliance example"] + } + ] +} +``` -**Cloud-specific:** -- AWS Well-Architected Framework (Security Pillar) -- AWS Foundational Technical Review (FTR) -- Azure Security Benchmark -- GCP Security Best Practices +--- -## Framework Requirement Mapping +### C5 (Germany Cloud Computing Compliance Criteria Catalogue) -Each requirement maps to one or more checks: +**Framework ID format:** `c5_{provider}` (e.g., `c5_aws`) ```json { - "Id": "2.1.1", - "Name": "Ensure MFA is enabled for all IAM users", - "Description": "Multi-Factor Authentication adds an extra layer of protection...", - "Checks": [ - "iam_user_mfa_enabled", - "iam_root_mfa_enabled", - "iam_user_hardware_mfa_enabled" + "Id": "BCM-01", + "Description": "Requirement description", + "Name": "Requirement name", + "Checks": ["check_name"], + "Attributes": [ + { + "Section": "BCM Business Continuity Management", + "SubSection": "BCM-01", + "Type": "Basic Criteria", + "AboutCriteria": "Description of criteria", + "ComplementaryCriteria": "Additional criteria" + } ] } ``` +--- + +### CCC (Cloud Computing Compliance) + +**Framework ID format:** `ccc_{provider}` (e.g., `ccc_aws`) + +```json +{ + "Id": "CCC.C01", + "Description": "Requirement description", + "Name": "Requirement name", + "Checks": ["check_name"], + "Attributes": [ + { + "FamilyName": "Cryptography & Key Management", + "FamilyDescription": "Family description", + "Section": "CCC.C01", + "SubSection": "Key Management", + "SubSectionObjective": "Objective description", + "Applicability": ["IaaS", "PaaS", "SaaS"], + "Recommendation": "Recommended action", + "SectionThreatMappings": [{"threat": "T1190"}], + "SectionGuidelineMappings": [{"guideline": "NIST"}] + } + ] +} +``` + +--- + +### Prowler ThreatScore + +**Framework ID format:** `prowler_threatscore_{provider}` (e.g., `prowler_threatscore_aws`) + +Prowler ThreatScore is a custom security scoring framework developed by Prowler that evaluates AWS account security based on **four main pillars**: + +| Pillar | Description | +|--------|-------------| +| **1. IAM** | Identity and Access Management controls (authentication, authorization, credentials) | +| **2. Attack Surface** | Network exposure, public resources, security group rules | +| **3. Logging and Monitoring** | Audit logging, threat detection, forensic readiness | +| **4. Encryption** | Data at rest and in transit encryption | + +**Scoring System:** +- **LevelOfRisk** (1-5): Severity of the security issue + - `5` = Critical (e.g., root MFA, public S3 buckets) + - `4` = High (e.g., user MFA, public EC2) + - `3` = Medium (e.g., password policies, encryption) + - `2` = Low + - `1` = Informational +- **Weight**: Impact multiplier for score calculation + - `1000` = Critical controls (root security, public exposure) + - `100` = High-impact controls (user authentication, monitoring) + - `10` = Standard controls (password policies, encryption) + - `1` = Low-impact controls (best practices) + +```json +{ + "Id": "1.1.1", + "Description": "Ensure MFA is enabled for the 'root' user account", + "Checks": ["iam_root_mfa_enabled"], + "Attributes": [ + { + "Title": "MFA enabled for 'root'", + "Section": "1. IAM", + "SubSection": "1.1 Authentication", + "AttributeDescription": "The root user account holds the highest level of privileges within an AWS account. Enabling MFA enhances security by adding an additional layer of protection.", + "AdditionalInformation": "Enabling MFA enhances console security by requiring the authenticating user to both possess a time-sensitive key-generating device and have knowledge of their credentials.", + "LevelOfRisk": 5, + "Weight": 1000 + } + ] +} +``` + +**Available for providers:** AWS, Kubernetes, M365 + +--- + +## Available Compliance Frameworks + +### AWS (41 frameworks) +| Framework | File Name | +|-----------|-----------| +| CIS 1.4, 1.5, 2.0, 3.0, 4.0, 5.0 | `cis_{version}_aws.json` | +| ISO 27001:2013, 2022 | `iso27001_{year}_aws.json` | +| NIST 800-53 Rev 4, 5 | `nist_800_53_revision_{version}_aws.json` | +| NIST 800-171 Rev 2 | `nist_800_171_revision_2_aws.json` | +| NIST CSF 1.1, 2.0 | `nist_csf_{version}_aws.json` | +| PCI DSS 3.2.1, 4.0 | `pci_{version}_aws.json` | +| HIPAA | `hipaa_aws.json` | +| GDPR | `gdpr_aws.json` | +| SOC 2 | `soc2_aws.json` | +| FedRAMP Low/Moderate | `fedramp_{level}_revision_4_aws.json` | +| ENS RD2022 | `ens_rd2022_aws.json` | +| MITRE ATT&CK | `mitre_attack_aws.json` | +| C5 Germany | `c5_aws.json` | +| CISA | `cisa_aws.json` | +| FFIEC | `ffiec_aws.json` | +| RBI Cyber Security | `rbi_cyber_security_framework_aws.json` | +| AWS Well-Architected | `aws_well_architected_framework_{pillar}_pillar_aws.json` | +| AWS FTR | `aws_foundational_technical_review_aws.json` | +| GxP 21 CFR Part 11, EU Annex 11 | `gxp_{standard}_aws.json` | +| KISA ISMS-P 2023 | `kisa_isms_p_2023_aws.json` | +| NIS2 | `nis2_aws.json` | + +### Azure (15+ frameworks) +| Framework | File Name | +|-----------|-----------| +| CIS 2.0, 2.1, 3.0, 4.0 | `cis_{version}_azure.json` | +| ISO 27001:2022 | `iso27001_2022_azure.json` | +| ENS RD2022 | `ens_rd2022_azure.json` | +| MITRE ATT&CK | `mitre_attack_azure.json` | +| PCI DSS 4.0 | `pci_4.0_azure.json` | +| NIST CSF 2.0 | `nist_csf_2.0_azure.json` | + +### GCP (15+ frameworks) +| Framework | File Name | +|-----------|-----------| +| CIS 2.0, 3.0, 4.0 | `cis_{version}_gcp.json` | +| ISO 27001:2022 | `iso27001_2022_gcp.json` | +| HIPAA | `hipaa_gcp.json` | +| MITRE ATT&CK | `mitre_attack_gcp.json` | +| PCI DSS 4.0 | `pci_4.0_gcp.json` | +| NIST CSF 2.0 | `nist_csf_2.0_gcp.json` | + +### Kubernetes (6 frameworks) +| Framework | File Name | +|-----------|-----------| +| CIS 1.8, 1.10, 1.11 | `cis_{version}_kubernetes.json` | +| ISO 27001:2022 | `iso27001_2022_kubernetes.json` | +| PCI DSS 4.0 | `pci_4.0_kubernetes.json` | + +### Other Providers +- **GitHub:** `cis_1.0_github.json` +- **M365:** `cis_4.0_m365.json`, `iso27001_2022_m365.json` +- **NHN:** `iso27001_2022_nhn.json` + ## Best Practices -1. **Requirement IDs**: Follow the original framework numbering (e.g., "1.1", "2.3.4") -2. **Check Mapping**: Map to existing checks when possible, create new checks only if needed -3. **Completeness**: Include all framework requirements, even if no check exists (document as manual) -4. **Version Control**: Include framework version in the name and file +1. **Requirement IDs**: Follow the original framework numbering exactly (e.g., "1.1", "A.5.1", "T1190", "ac_2_1") +2. **Check Mapping**: Map to existing checks when possible. Use `Checks: []` for manual-only requirements +3. **Completeness**: Include all framework requirements, even those without automated checks +4. **Version Control**: Include framework version in `Name` and `Version` fields +5. **File Naming**: Use format `{framework}_{version}_{provider}.json` +6. **Validation**: Prowler validates JSON against Pydantic models at startup - invalid JSON will cause errors ## Commands ```bash # List available frameworks for a provider -poetry run python prowler-cli.py {provider} --list-compliance +prowler {provider} --list-compliance # Run scan with specific compliance framework -poetry run python prowler-cli.py {provider} --compliance {framework} +prowler aws --compliance cis_5.0_aws # Run scan with multiple frameworks -poetry run python prowler-cli.py {provider} --compliance cis_aws_benchmark_v2 pci_dss_3.2.1 +prowler aws --compliance cis_5.0_aws pci_4.0_aws -# Output compliance report -poetry run python prowler-cli.py {provider} --compliance {framework} -M csv json html +# Output compliance report in multiple formats +prowler aws --compliance cis_5.0_aws -M csv json html ``` +## Code References + +- **Compliance Models:** `prowler/lib/check/compliance_models.py` +- **Compliance Processing:** `prowler/lib/check/compliance.py` +- **Compliance Output:** `prowler/lib/outputs/compliance/` + ## Resources -- **Templates**: See [assets/](assets/) for complete CIS framework JSON template -- **Documentation**: See [references/compliance-docs.md](references/compliance-docs.md) for official Prowler Developer Guide links +- **Templates:** See [assets/](assets/) for framework JSON templates +- **Documentation:** See [references/compliance-docs.md](references/compliance-docs.md) for additional resources diff --git a/skills/prowler-compliance/assets/cis_framework.json b/skills/prowler-compliance/assets/cis_framework.json index 817c0ca6aa..c764f07506 100644 --- a/skills/prowler-compliance/assets/cis_framework.json +++ b/skills/prowler-compliance/assets/cis_framework.json @@ -3,7 +3,7 @@ "Name": "CIS Amazon Web Services Foundations Benchmark v5.0.0", "Version": "5.0", "Provider": "AWS", - "Description": "The CIS Amazon Web Services Foundations Benchmark provides prescriptive guidance for configuring security options for a subset of Amazon Web Services.", + "Description": "The CIS Amazon Web Services Foundations Benchmark provides prescriptive guidance for configuring security options for a subset of Amazon Web Services with an emphasis on foundational, testable, and architecture agnostic settings.", "Requirements": [ { "Id": "1.1", @@ -17,13 +17,35 @@ "Profile": "Level 1", "AssessmentStatus": "Manual", "Description": "Ensure contact email and telephone details for AWS accounts are current and map to more than one individual in your organization.", - "RationaleStatement": "If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed.", + "RationaleStatement": "If an AWS account is observed to be behaving in a prohibited or suspicious manner, AWS will attempt to contact the account owner by email and phone using the contact details listed. If this is unsuccessful and the account behavior is not corrected then AWS may suspend the account.", "ImpactStatement": "", "RemediationProcedure": "This activity can only be performed via the AWS Console. Navigate to Account Settings and update contact information.", "AuditProcedure": "This activity can only be performed via the AWS Console. Navigate to Account Settings and verify contact information is current.", "AdditionalInformation": "", - "References": "https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html", - "DefaultValue": "" + "DefaultValue": "", + "References": "https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact.html" + } + ] + }, + { + "Id": "1.2", + "Description": "Ensure security contact information is registered", + "Checks": [ + "account_security_contact_information_is_registered" + ], + "Attributes": [ + { + "Section": "1 Identity and Access Management", + "Profile": "Level 1", + "AssessmentStatus": "Automated", + "Description": "AWS provides customers with the option to specify the contact information for the account's security team. It is recommended that this information be provided.", + "RationaleStatement": "Specifying security-specific contact information will help ensure that security advisories sent by AWS reach the team in your organization that is best equipped to respond to them.", + "ImpactStatement": "", + "RemediationProcedure": "Navigate to AWS Console > Account > Alternate Contacts and add security contact information.", + "AuditProcedure": "Run: aws account get-alternate-contact --alternate-contact-type SECURITY", + "AdditionalInformation": "", + "DefaultValue": "By default, no security contact is registered.", + "References": "https://docs.aws.amazon.com/accounts/latest/reference/manage-acct-update-contact-alternate.html" } ] }, @@ -38,37 +60,81 @@ "Section": "1 Identity and Access Management", "Profile": "Level 1", "AssessmentStatus": "Automated", - "Description": "The 'root' user account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account.", - "RationaleStatement": "Deleting access keys associated with the 'root' user account limits vectors by which the account can be compromised.", + "Description": "The 'root' user account is the most privileged user in an AWS account. AWS Access Keys provide programmatic access to a given AWS account. It is recommended that all access keys associated with the 'root' user account be deleted.", + "RationaleStatement": "Deleting access keys associated with the 'root' user account limits vectors by which the account can be compromised. Additionally, deleting the root access keys encourages the creation and use of role based accounts that are least privileged.", "ImpactStatement": "", "RemediationProcedure": "Navigate to IAM console, select root user, Security credentials tab, and delete any access keys.", "AuditProcedure": "Run: aws iam get-account-summary | grep 'AccountAccessKeysPresent'", "AdditionalInformation": "IAM User account root for us-gov cloud regions is not enabled by default.", - "References": "https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html", - "DefaultValue": "" + "DefaultValue": "By default, no root access keys exist.", + "References": "https://docs.aws.amazon.com/general/latest/gr/aws-access-keys-best-practices.html" } ] }, { - "Id": "1.11", - "Description": "Ensure credentials unused for 45 days or more are disabled", + "Id": "1.4", + "Description": "Ensure MFA is enabled for the 'root' user account", "Checks": [ - "iam_user_accesskey_unused", - "iam_user_console_access_unused" + "iam_root_mfa_enabled" ], "Attributes": [ { "Section": "1 Identity and Access Management", "Profile": "Level 1", "AssessmentStatus": "Automated", - "Description": "AWS IAM users can access AWS resources using different types of credentials. It is recommended that all credentials unused for 45 days or more be deactivated or removed.", - "RationaleStatement": "Disabling or removing unnecessary credentials reduces the window of opportunity for compromised accounts.", - "ImpactStatement": "Users with deactivated credentials will lose access until re-enabled.", - "RemediationProcedure": "Use IAM console or CLI to deactivate unused access keys and remove unused passwords.", - "AuditProcedure": "Generate credential report and review password_last_used and access_key_last_used fields.", + "Description": "The 'root' user account is the most privileged user in an AWS account. Multi-factor Authentication (MFA) adds an extra layer of protection on top of a username and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their username and password as well as for an authentication code from their AWS MFA device.", + "RationaleStatement": "Enabling MFA provides increased security for console access as it requires the authenticating principal to possess a device that emits a time-sensitive key and have knowledge of a credential.", + "ImpactStatement": "", + "RemediationProcedure": "Using IAM console, navigate to Dashboard and choose Activate MFA on your root account.", + "AuditProcedure": "Run: aws iam get-account-summary | grep 'AccountMFAEnabled'. Ensure the value is 1.", + "AdditionalInformation": "", + "DefaultValue": "MFA is not enabled by default.", + "References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_root-user.html#id_root-user_manage_mfa" + } + ] + }, + { + "Id": "1.5", + "Description": "Ensure hardware MFA is enabled for the 'root' user account", + "Checks": [ + "iam_root_hardware_mfa_enabled" + ], + "Attributes": [ + { + "Section": "1 Identity and Access Management", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "The 'root' user account is the most privileged user in an AWS account. MFA adds an extra layer of protection on top of a user name and password. With MFA enabled, when a user signs in to an AWS website, they will be prompted for their user name and password as well as for an authentication code from their AWS MFA device. For Level 2, it is recommended that the root user account be protected with a hardware MFA.", + "RationaleStatement": "A hardware MFA has a smaller attack surface than a virtual MFA. For example, a hardware MFA does not suffer from the attack surface introduced by the mobile smartphone on which a virtual MFA resides.", + "ImpactStatement": "Using a hardware MFA device instead of a virtual MFA may result in additional hardware costs.", + "RemediationProcedure": "Using IAM console, navigate to Dashboard, select root user, and configure hardware MFA device.", + "AuditProcedure": "Run: aws iam list-virtual-mfa-devices and verify the root account is not using a virtual MFA.", + "AdditionalInformation": "For recommendations on protecting hardware MFA devices, refer to https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_lost-or-broken.html", + "DefaultValue": "MFA is not enabled by default.", + "References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_mfa_enable_physical.html" + } + ] + }, + { + "Id": "2.1.1", + "Description": "Ensure S3 Bucket Policy is set to deny HTTP requests", + "Checks": [ + "s3_bucket_secure_transport_policy" + ], + "Attributes": [ + { + "Section": "2 Storage", + "SubSection": "2.1 Simple Storage Service (S3)", + "Profile": "Level 2", + "AssessmentStatus": "Automated", + "Description": "At the Amazon S3 bucket level, you can configure permissions through a bucket policy making the objects accessible only through HTTPS.", + "RationaleStatement": "By default, Amazon S3 allows both HTTP and HTTPS requests. To achieve only allowing access to Amazon S3 objects through HTTPS you also have to explicitly deny access to HTTP requests. Bucket policies that allow HTTPS requests without explicitly denying HTTP requests will not comply with this recommendation.", + "ImpactStatement": "Enabling this setting will result in rejection of requests that do not use HTTPS for S3 bucket operations.", + "RemediationProcedure": "Add a bucket policy with condition aws:SecureTransport: false that denies all s3 actions.", + "AuditProcedure": "Review bucket policies for Deny statements with aws:SecureTransport: false condition.", "AdditionalInformation": "", - "References": "https://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_getting-report.html", - "DefaultValue": "" + "DefaultValue": "By default, S3 buckets allow both HTTP and HTTPS requests.", + "References": "https://aws.amazon.com/blogs/security/how-to-use-bucket-policies-and-apply-defense-in-depth-to-help-secure-your-amazon-s3-data/" } ] } diff --git a/skills/prowler-compliance/assets/ens_framework.json b/skills/prowler-compliance/assets/ens_framework.json new file mode 100644 index 0000000000..c357c8c78d --- /dev/null +++ b/skills/prowler-compliance/assets/ens_framework.json @@ -0,0 +1,128 @@ +{ + "Framework": "ENS", + "Name": "ENS RD 311/2022 - Categoria Alta", + "Version": "RD2022", + "Provider": "AWS", + "Description": "The accreditation scheme of the ENS (Esquema Nacional de Seguridad - National Security Scheme of Spain) has been developed by the Ministry of Finance and Public Administrations and the CCN (National Cryptological Center). This includes the basic principles and minimum requirements necessary for the adequate protection of information.", + "Requirements": [ + { + "Id": "op.acc.1.aws.iam.2", + "Description": "Proveedor de identidad centralizado", + "Attributes": [ + { + "IdGrupoControl": "op.acc.1", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Es muy recomendable la utilizacion de un proveedor de identidades que permita administrar las identidades en un lugar centralizado, en vez de utilizar IAM para ello.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "trazabilidad", + "autenticidad" + ], + "ModoEjecucion": "automatico", + "Dependencias": [] + } + ], + "Checks": [ + "iam_check_saml_providers_sts" + ] + }, + { + "Id": "op.acc.2.aws.iam.4", + "Description": "Requisitos de acceso", + "Attributes": [ + { + "IdGrupoControl": "op.acc.2", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "Se debera delegar en cuentas administradoras la administracion de la organizacion, dejando la cuenta maestra sin uso y con las medidas de seguridad pertinentes.", + "Nivel": "alto", + "Tipo": "requisito", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ], + "ModoEjecucion": "automatico", + "Dependencias": [] + } + ], + "Checks": [ + "iam_avoid_root_usage" + ] + }, + { + "Id": "op.acc.3.r1.aws.iam.1", + "Description": "Segregacion rigurosa", + "Attributes": [ + { + "IdGrupoControl": "op.acc.3.r1", + "Marco": "operacional", + "Categoria": "control de acceso", + "DescripcionControl": "En caso de ser de aplicacion, la segregacion debera tener en cuenta la separacion de las funciones de configuracion y mantenimiento y de auditoria de cualquier otra.", + "Nivel": "alto", + "Tipo": "refuerzo", + "Dimensiones": [ + "confidencialidad", + "integridad", + "trazabilidad", + "autenticidad" + ], + "ModoEjecucion": "automatico", + "Dependencias": [] + } + ], + "Checks": [ + "iam_support_role_created" + ] + }, + { + "Id": "op.exp.8.aws.cloudwatch.1", + "Description": "Registro de la actividad", + "Attributes": [ + { + "IdGrupoControl": "op.exp.8", + "Marco": "operacional", + "Categoria": "explotacion", + "DescripcionControl": "Se registraran las actividades de los usuarios en el sistema, de forma que se pueda identificar que acciones ha realizado cada usuario.", + "Nivel": "medio", + "Tipo": "requisito", + "Dimensiones": [ + "trazabilidad" + ], + "ModoEjecucion": "automatico", + "Dependencias": [] + } + ], + "Checks": [ + "cloudtrail_multi_region_enabled", + "cloudwatch_log_group_retention_policy_specific_days_enabled" + ] + }, + { + "Id": "mp.info.3.aws.s3.1", + "Description": "Cifrado de la informacion", + "Attributes": [ + { + "IdGrupoControl": "mp.info.3", + "Marco": "medidas de proteccion", + "Categoria": "proteccion de la informacion", + "DescripcionControl": "La informacion con un nivel de clasificacion CONFIDENCIAL o superior debera ser cifrada.", + "Nivel": "bajo", + "Tipo": "medida", + "Dimensiones": [ + "confidencialidad" + ], + "ModoEjecucion": "automatico", + "Dependencias": [] + } + ], + "Checks": [ + "s3_bucket_default_encryption", + "s3_bucket_kms_encryption" + ] + } + ] +} diff --git a/skills/prowler-compliance/assets/generic_framework.json b/skills/prowler-compliance/assets/generic_framework.json new file mode 100644 index 0000000000..61a75fa445 --- /dev/null +++ b/skills/prowler-compliance/assets/generic_framework.json @@ -0,0 +1,103 @@ +{ + "Framework": "CUSTOM-FRAMEWORK", + "Name": "Custom Security Framework Example v1.0", + "Version": "1.0", + "Provider": "AWS", + "Description": "This is a template for creating custom compliance frameworks using the generic attribute model. Use this when creating frameworks that don't match existing attribute types (CIS, ISO, ENS, MITRE, etc.).", + "Requirements": [ + { + "Id": "SEC-001", + "Description": "Ensure all storage resources are encrypted at rest", + "Name": "Storage Encryption", + "Attributes": [ + { + "ItemId": "SEC-001", + "Section": "Data Protection", + "SubSection": "Encryption", + "SubGroup": "Storage", + "Service": "s3", + "Type": "Automated" + } + ], + "Checks": [ + "s3_bucket_default_encryption", + "rds_instance_storage_encrypted", + "ec2_ebs_volume_encryption" + ] + }, + { + "Id": "SEC-002", + "Description": "Ensure all network traffic is encrypted in transit", + "Name": "Network Encryption", + "Attributes": [ + { + "ItemId": "SEC-002", + "Section": "Data Protection", + "SubSection": "Encryption", + "SubGroup": "Network", + "Service": "multiple", + "Type": "Automated" + } + ], + "Checks": [ + "s3_bucket_secure_transport_policy", + "elb_ssl_listeners", + "cloudfront_distributions_https_enabled" + ] + }, + { + "Id": "IAM-001", + "Description": "Ensure MFA is enabled for all privileged accounts", + "Name": "Multi-Factor Authentication", + "Attributes": [ + { + "ItemId": "IAM-001", + "Section": "Identity and Access Management", + "SubSection": "Authentication", + "SubGroup": "MFA", + "Service": "iam", + "Type": "Automated" + } + ], + "Checks": [ + "iam_root_mfa_enabled", + "iam_user_mfa_enabled_console_access" + ] + }, + { + "Id": "LOG-001", + "Description": "Ensure logging is enabled for all critical services", + "Name": "Centralized Logging", + "Attributes": [ + { + "ItemId": "LOG-001", + "Section": "Logging and Monitoring", + "SubSection": "Audit Logs", + "SubGroup": "CloudTrail", + "Service": "cloudtrail", + "Type": "Automated" + } + ], + "Checks": [ + "cloudtrail_multi_region_enabled", + "cloudtrail_s3_dataevents_read_enabled", + "cloudtrail_s3_dataevents_write_enabled" + ] + }, + { + "Id": "MANUAL-001", + "Description": "Ensure security policies are reviewed annually", + "Name": "Policy Review", + "Attributes": [ + { + "ItemId": "MANUAL-001", + "Section": "Governance", + "SubSection": "Policy Management", + "Service": "manual", + "Type": "Manual" + } + ], + "Checks": [] + } + ] +} diff --git a/skills/prowler-compliance/assets/iso27001_framework.json b/skills/prowler-compliance/assets/iso27001_framework.json new file mode 100644 index 0000000000..1459b5836f --- /dev/null +++ b/skills/prowler-compliance/assets/iso27001_framework.json @@ -0,0 +1,91 @@ +{ + "Framework": "ISO27001", + "Name": "ISO/IEC 27001 Information Security Management Standard 2022", + "Version": "2022", + "Provider": "AWS", + "Description": "ISO (the International Organization for Standardization) and IEC (the International Electrotechnical Commission) form the specialized system for worldwide standardization. This framework maps AWS security controls to ISO 27001:2022 requirements.", + "Requirements": [ + { + "Id": "A.5.1", + "Description": "Information security policy and topic-specific policies should be defined, approved by management, published, communicated to and acknowledged by relevant personnel and relevant interested parties, and reviewed at planned intervals and if significant changes occur.", + "Name": "Policies for information security", + "Attributes": [ + { + "Category": "A.5 Organizational controls", + "Objetive_ID": "A.5.1", + "Objetive_Name": "Policies for information security", + "Check_Summary": "Verify that information security policies are defined and implemented through security monitoring services." + } + ], + "Checks": [ + "securityhub_enabled", + "wellarchitected_workload_no_high_or_medium_risks" + ] + }, + { + "Id": "A.5.2", + "Description": "Information security roles and responsibilities should be defined and allocated according to the organisation needs.", + "Name": "Roles and Responsibilities", + "Attributes": [ + { + "Category": "A.5 Organizational controls", + "Objetive_ID": "A.5.2", + "Objetive_Name": "Roles and Responsibilities", + "Check_Summary": "Verify that IAM roles and responsibilities are properly defined." + } + ], + "Checks": [] + }, + { + "Id": "A.5.3", + "Description": "Conflicting duties and conflicting areas of responsibility should be segregated.", + "Name": "Segregation of Duties", + "Attributes": [ + { + "Category": "A.5 Organizational controls", + "Objetive_ID": "A.5.3", + "Objetive_Name": "Segregation of Duties", + "Check_Summary": "Verify that duties are segregated through separate IAM roles." + } + ], + "Checks": [ + "iam_securityaudit_role_created" + ] + }, + { + "Id": "A.8.1", + "Description": "User end point devices should be protected.", + "Name": "User End Point Devices", + "Attributes": [ + { + "Category": "A.8 Technological controls", + "Objetive_ID": "A.8.1", + "Objetive_Name": "User End Point Devices", + "Check_Summary": "Verify that endpoint protection and monitoring are enabled." + } + ], + "Checks": [ + "guardduty_is_enabled", + "ssm_managed_compliant_patching" + ] + }, + { + "Id": "A.8.24", + "Description": "Rules for the effective use of cryptography, including cryptographic key management, should be defined and implemented.", + "Name": "Use of Cryptography", + "Attributes": [ + { + "Category": "A.8 Technological controls", + "Objetive_ID": "A.8.24", + "Objetive_Name": "Use of Cryptography", + "Check_Summary": "Verify that encryption is enabled for data at rest and in transit." + } + ], + "Checks": [ + "s3_bucket_default_encryption", + "rds_instance_storage_encrypted", + "ec2_ebs_volume_encryption" + ] + } + ] +} diff --git a/skills/prowler-compliance/assets/mitre_attack_framework.json b/skills/prowler-compliance/assets/mitre_attack_framework.json new file mode 100644 index 0000000000..8eefa7d2a9 --- /dev/null +++ b/skills/prowler-compliance/assets/mitre_attack_framework.json @@ -0,0 +1,142 @@ +{ + "Framework": "MITRE-ATTACK", + "Name": "MITRE ATT&CK compliance framework", + "Version": "", + "Provider": "AWS", + "Description": "MITRE ATT&CK is a globally-accessible knowledge base of adversary tactics and techniques based on real-world observations. The ATT&CK knowledge base is used as a foundation for the development of specific threat models and methodologies in the private sector, in government, and in the cybersecurity product and service community.", + "Requirements": [ + { + "Name": "Exploit Public-Facing Application", + "Id": "T1190", + "Tactics": [ + "Initial Access" + ], + "SubTechniques": [], + "Platforms": [ + "Containers", + "IaaS", + "Linux", + "Network", + "Windows", + "macOS" + ], + "Description": "Adversaries may attempt to exploit a weakness in an Internet-facing host or system to initially access a network. The weakness in the system can be a software bug, a temporary glitch, or a misconfiguration.", + "TechniqueURL": "https://attack.mitre.org/techniques/T1190/", + "Checks": [ + "guardduty_is_enabled", + "inspector2_is_enabled", + "securityhub_enabled", + "elbv2_waf_acl_attached", + "awslambda_function_not_publicly_accessible", + "ec2_instance_public_ip" + ], + "Attributes": [ + { + "AWSService": "Amazon GuardDuty", + "Category": "Detect", + "Value": "Minimal", + "Comment": "GuardDuty can detect when vulnerable publicly facing resources are leveraged to capture data not intended to be viewable." + }, + { + "AWSService": "AWS Web Application Firewall", + "Category": "Protect", + "Value": "Significant", + "Comment": "AWS WAF protects public-facing applications against vulnerabilities including OWASP Top 10 via managed rule sets." + }, + { + "AWSService": "Amazon Inspector", + "Category": "Protect", + "Value": "Partial", + "Comment": "Amazon Inspector can detect known vulnerabilities on various Windows and Linux endpoints." + } + ] + }, + { + "Name": "Valid Accounts", + "Id": "T1078", + "Tactics": [ + "Defense Evasion", + "Persistence", + "Privilege Escalation", + "Initial Access" + ], + "SubTechniques": [ + "T1078.001", + "T1078.002", + "T1078.003", + "T1078.004" + ], + "Platforms": [ + "Azure AD", + "Containers", + "Google Workspace", + "IaaS", + "Linux", + "Network", + "Office 365", + "SaaS", + "Windows", + "macOS" + ], + "Description": "Adversaries may obtain and abuse credentials of existing accounts as a means of gaining Initial Access, Persistence, Privilege Escalation, or Defense Evasion.", + "TechniqueURL": "https://attack.mitre.org/techniques/T1078/", + "Checks": [ + "iam_root_mfa_enabled", + "iam_user_mfa_enabled_console_access", + "iam_no_root_access_key", + "iam_rotate_access_key_90_days", + "iam_user_accesskey_unused", + "cloudtrail_multi_region_enabled" + ], + "Attributes": [ + { + "AWSService": "AWS IAM", + "Category": "Protect", + "Value": "Significant", + "Comment": "IAM MFA and access key rotation help prevent unauthorized access with valid credentials." + }, + { + "AWSService": "AWS CloudTrail", + "Category": "Detect", + "Value": "Significant", + "Comment": "CloudTrail logs all API calls, enabling detection of unauthorized account usage." + } + ] + }, + { + "Name": "Data from Cloud Storage", + "Id": "T1530", + "Tactics": [ + "Collection" + ], + "SubTechniques": [], + "Platforms": [ + "IaaS", + "SaaS" + ], + "Description": "Adversaries may access data from improperly secured cloud storage. Many cloud service providers offer solutions for online data object storage.", + "TechniqueURL": "https://attack.mitre.org/techniques/T1530/", + "Checks": [ + "s3_bucket_public_access", + "s3_bucket_policy_public_write_access", + "s3_bucket_acl_prohibited", + "s3_bucket_default_encryption", + "macie_is_enabled" + ], + "Attributes": [ + { + "AWSService": "Amazon S3", + "Category": "Protect", + "Value": "Significant", + "Comment": "S3 bucket policies and ACLs can prevent public access to sensitive data." + }, + { + "AWSService": "Amazon Macie", + "Category": "Detect", + "Value": "Significant", + "Comment": "Macie can detect and alert on sensitive data exposure in S3 buckets." + } + ] + } + ] +} diff --git a/skills/prowler-compliance/assets/prowler_threatscore_framework.json b/skills/prowler-compliance/assets/prowler_threatscore_framework.json new file mode 100644 index 0000000000..1a9aa4ac6f --- /dev/null +++ b/skills/prowler-compliance/assets/prowler_threatscore_framework.json @@ -0,0 +1,189 @@ +{ + "Framework": "ProwlerThreatScore", + "Name": "Prowler ThreatScore Compliance Framework for AWS", + "Version": "1.0", + "Provider": "AWS", + "Description": "Prowler ThreatScore Compliance Framework for AWS ensures that the AWS account is compliant taking into account four main pillars: Identity and Access Management, Attack Surface, Logging and Monitoring, and Encryption. Each check has a LevelOfRisk (1-5) and Weight that contribute to calculating the overall threat score.", + "Requirements": [ + { + "Id": "1.1.1", + "Description": "Ensure MFA is enabled for the 'root' user account", + "Checks": [ + "iam_root_mfa_enabled" + ], + "Attributes": [ + { + "Title": "MFA enabled for 'root'", + "Section": "1. IAM", + "SubSection": "1.1 Authentication", + "AttributeDescription": "The root user account holds the highest level of privileges within an AWS account. Enabling Multi-Factor Authentication (MFA) enhances security by adding an additional layer of protection beyond just a username and password.", + "AdditionalInformation": "Enabling MFA enhances console security by requiring the authenticating user to both possess a time-sensitive key-generating device and have knowledge of their credentials.", + "LevelOfRisk": 5, + "Weight": 1000 + } + ] + }, + { + "Id": "1.1.2", + "Description": "Ensure hardware MFA is enabled for the 'root' user account", + "Checks": [ + "iam_root_hardware_mfa_enabled" + ], + "Attributes": [ + { + "Title": "Hardware MFA enabled for 'root'", + "Section": "1. IAM", + "SubSection": "1.1 Authentication", + "AttributeDescription": "The root user account in AWS has the highest level of privileges. A hardware MFA has a smaller attack surface compared to a virtual MFA.", + "AdditionalInformation": "Unlike a virtual MFA, which relies on a mobile device that may be vulnerable to malware, a hardware MFA operates independently, reducing exposure to potential security threats.", + "LevelOfRisk": 5, + "Weight": 1000 + } + ] + }, + { + "Id": "1.1.13", + "Description": "Ensure no root account access key exists", + "Checks": [ + "iam_no_root_access_key" + ], + "Attributes": [ + { + "Title": "No root access key", + "Section": "1. IAM", + "SubSection": "1.1 Authentication", + "AttributeDescription": "The root account in AWS has unrestricted administrative privileges. It is recommended that no access keys be associated with the root account.", + "AdditionalInformation": "Eliminating root access keys reduces the risk of unauthorized access and enforces the use of role-based IAM accounts with least privilege.", + "LevelOfRisk": 5, + "Weight": 1000 + } + ] + }, + { + "Id": "2.1.1", + "Description": "Ensure EC2 instances do not have public IP addresses", + "Checks": [ + "ec2_instance_public_ip" + ], + "Attributes": [ + { + "Title": "EC2 without public IP", + "Section": "2. Attack Surface", + "SubSection": "2.1 Network Exposure", + "AttributeDescription": "EC2 instances with public IP addresses are directly accessible from the internet, increasing the attack surface.", + "AdditionalInformation": "Use private subnets and NAT gateways or VPC endpoints for internet access when needed.", + "LevelOfRisk": 4, + "Weight": 100 + } + ] + }, + { + "Id": "2.2.1", + "Description": "Ensure S3 buckets are not publicly accessible", + "Checks": [ + "s3_bucket_public_access" + ], + "Attributes": [ + { + "Title": "S3 bucket not public", + "Section": "2. Attack Surface", + "SubSection": "2.2 Storage Exposure", + "AttributeDescription": "Publicly accessible S3 buckets can lead to data breaches and unauthorized access to sensitive information.", + "AdditionalInformation": "Enable S3 Block Public Access settings at the account and bucket level.", + "LevelOfRisk": 5, + "Weight": 1000 + } + ] + }, + { + "Id": "3.1.1", + "Description": "Ensure CloudTrail is enabled in all regions", + "Checks": [ + "cloudtrail_multi_region_enabled" + ], + "Attributes": [ + { + "Title": "CloudTrail multi-region enabled", + "Section": "3. Logging and Monitoring", + "SubSection": "3.1 Audit Logging", + "AttributeDescription": "CloudTrail provides a record of API calls made in your AWS account. Multi-region trails ensure all activity is captured.", + "AdditionalInformation": "Without comprehensive logging, security incidents may go undetected and forensic analysis becomes impossible.", + "LevelOfRisk": 5, + "Weight": 1000 + } + ] + }, + { + "Id": "3.2.1", + "Description": "Ensure GuardDuty is enabled", + "Checks": [ + "guardduty_is_enabled" + ], + "Attributes": [ + { + "Title": "GuardDuty enabled", + "Section": "3. Logging and Monitoring", + "SubSection": "3.2 Threat Detection", + "AttributeDescription": "Amazon GuardDuty is a threat detection service that continuously monitors for malicious activity and unauthorized behavior.", + "AdditionalInformation": "GuardDuty analyzes CloudTrail, VPC Flow Logs, and DNS logs to identify threats.", + "LevelOfRisk": 4, + "Weight": 100 + } + ] + }, + { + "Id": "4.1.1", + "Description": "Ensure S3 buckets have default encryption enabled", + "Checks": [ + "s3_bucket_default_encryption" + ], + "Attributes": [ + { + "Title": "S3 default encryption", + "Section": "4. Encryption", + "SubSection": "4.1 Data at Rest", + "AttributeDescription": "Enabling default encryption on S3 buckets ensures all objects are encrypted when stored.", + "AdditionalInformation": "Use SSE-S3, SSE-KMS, or SSE-C depending on your key management requirements.", + "LevelOfRisk": 3, + "Weight": 10 + } + ] + }, + { + "Id": "4.1.2", + "Description": "Ensure EBS volumes are encrypted", + "Checks": [ + "ec2_ebs_volume_encryption" + ], + "Attributes": [ + { + "Title": "EBS volume encryption", + "Section": "4. Encryption", + "SubSection": "4.1 Data at Rest", + "AttributeDescription": "EBS volume encryption protects data at rest on EC2 instance storage.", + "AdditionalInformation": "Enable default EBS encryption at the account level to ensure all new volumes are encrypted.", + "LevelOfRisk": 3, + "Weight": 10 + } + ] + }, + { + "Id": "4.2.1", + "Description": "Ensure data in transit is encrypted using TLS", + "Checks": [ + "s3_bucket_secure_transport_policy" + ], + "Attributes": [ + { + "Title": "S3 secure transport", + "Section": "4. Encryption", + "SubSection": "4.2 Data in Transit", + "AttributeDescription": "Requiring HTTPS for S3 bucket access ensures data is encrypted during transmission.", + "AdditionalInformation": "Use bucket policies to deny requests that do not use TLS.", + "LevelOfRisk": 3, + "Weight": 10 + } + ] + } + ] +} diff --git a/skills/prowler-compliance/references/compliance-docs.md b/skills/prowler-compliance/references/compliance-docs.md index 6d53252cfe..62d619953f 100644 --- a/skills/prowler-compliance/references/compliance-docs.md +++ b/skills/prowler-compliance/references/compliance-docs.md @@ -1,15 +1,137 @@ # Compliance Framework Documentation -## Local Documentation +## Code References -For detailed compliance framework patterns, see: +Key files for understanding and modifying compliance frameworks: -- `docs/developer-guide/security-compliance-framework.mdx` - Complete guide for creating compliance frameworks (CIS, NIST, PCI-DSS, SOC2, GDPR) +| File | Purpose | +|------|---------| +| `prowler/lib/check/compliance_models.py` | Pydantic models defining attribute structures for each framework type | +| `prowler/lib/check/compliance.py` | Core compliance processing logic | +| `prowler/lib/check/utils.py` | Utility functions including `list_compliance_modules()` | +| `prowler/lib/outputs/compliance/` | Framework-specific output generators | +| `prowler/compliance/{provider}/` | JSON compliance framework definitions | -## Contents +## Attribute Model Classes -The documentation covers: -- Framework JSON structure -- Framework metadata (name, version, provider) -- Requirements array with IDs, descriptions, and attributes -- Check mappings for each requirement +Each framework type has a specific Pydantic model in `compliance_models.py`: + +| Framework | Model Class | +|-----------|-------------| +| CIS | `CIS_Requirement_Attribute` | +| ISO 27001 | `ISO27001_2013_Requirement_Attribute` | +| ENS | `ENS_Requirement_Attribute` | +| MITRE ATT&CK | `Mitre_Requirement` (uses different structure) | +| AWS Well-Architected | `AWS_Well_Architected_Requirement_Attribute` | +| KISA ISMS-P | `KISA_ISMSP_Requirement_Attribute` | +| Prowler ThreatScore | `Prowler_ThreatScore_Requirement_Attribute` | +| CCC | `CCC_Requirement_Attribute` | +| C5 Germany | `C5Germany_Requirement_Attribute` | +| Generic/Fallback | `Generic_Compliance_Requirement_Attribute` | + +## How Compliance Frameworks are Loaded + +1. `Compliance.get_bulk(provider)` is called at startup +2. Scans `prowler/compliance/{provider}/` for `.json` files +3. Each file is parsed using `load_compliance_framework()` +4. Pydantic validates against `Compliance` model +5. Framework is stored in dictionary with filename (without `.json`) as key + +## How Checks Map to Compliance + +1. After loading, `update_checks_metadata_with_compliance()` is called +2. For each check, it finds all compliance requirements that reference it +3. Compliance info is attached to `CheckMetadata.Compliance` list +4. During output, `get_check_compliance()` retrieves mappings per finding + +## File Naming Convention + +``` +{framework}_{version}_{provider}.json +``` + +Examples: +- `cis_5.0_aws.json` +- `iso27001_2022_azure.json` +- `mitre_attack_gcp.json` +- `ens_rd2022_aws.json` +- `nist_800_53_revision_5_aws.json` + +## Validation + +Prowler validates compliance JSON at startup. Invalid files cause: +- `ValidationError` logged with details +- Application exit with error code + +Common validation errors: +- Missing required fields (`Id`, `Description`, `Checks`, `Attributes`) +- Invalid enum values (e.g., `Profile` must be "Level 1" or "Level 2" for CIS) +- Type mismatches (e.g., `Checks` must be array of strings) + +## Adding a New Framework + +1. Create JSON file in `prowler/compliance/{provider}/` +2. Use appropriate attribute model (see table above) +3. Map existing checks to requirements via `Checks` array +4. Use empty `Checks: []` for manual-only requirements +5. Test with `prowler {provider} --list-compliance` to verify loading +6. Run `prowler {provider} --compliance {framework_name}` to test execution + +## Templates + +See `assets/` directory for example templates: +- `cis_framework.json` - CIS Benchmark template +- `iso27001_framework.json` - ISO 27001 template +- `ens_framework.json` - ENS (Spain) template +- `mitre_attack_framework.json` - MITRE ATT&CK template +- `prowler_threatscore_framework.json` - Prowler ThreatScore template +- `generic_framework.json` - Generic/custom framework template + +## Prowler ThreatScore Details + +Prowler ThreatScore is a custom security scoring framework that calculates an overall security posture score based on: + +### Four Pillars +1. **IAM (Identity and Access Management)** + - SubSections: Authentication, Authorization, Credentials Management + +2. **Attack Surface** + - SubSections: Network Exposure, Storage Exposure, Service Exposure + +3. **Logging and Monitoring** + - SubSections: Audit Logging, Threat Detection, Alerting + +4. **Encryption** + - SubSections: Data at Rest, Data in Transit + +### Scoring Algorithm +The ThreatScore uses `LevelOfRisk` and `Weight` to calculate severity: + +| LevelOfRisk | Weight | Example Controls | +|-------------|--------|------------------| +| 5 (Critical) | 1000 | Root MFA, No root access keys, Public S3 buckets | +| 4 (High) | 100 | User MFA, Public EC2, GuardDuty enabled | +| 3 (Medium) | 10 | Password policies, EBS encryption, CloudTrail | +| 2 (Low) | 1-10 | Best practice recommendations | +| 1 (Info) | 1 | Informational controls | + +### ID Numbering Convention +- `1.x.x` - IAM controls +- `2.x.x` - Attack Surface controls +- `3.x.x` - Logging and Monitoring controls +- `4.x.x` - Encryption controls + +## External Resources + +### Official Framework Documentation +- [CIS Benchmarks](https://www.cisecurity.org/cis-benchmarks) +- [ISO 27001:2022](https://www.iso.org/standard/27001) +- [NIST 800-53](https://csrc.nist.gov/publications/detail/sp/800-53/rev-5/final) +- [NIST CSF](https://www.nist.gov/cyberframework) +- [PCI DSS](https://www.pcisecuritystandards.org/) +- [MITRE ATT&CK](https://attack.mitre.org/) +- [ENS (Spain)](https://www.ccn-cert.cni.es/es/ens.html) + +### Prowler Documentation +- [Prowler Docs - Compliance](https://docs.prowler.com/projects/prowler-open-source/en/latest/) +- [Prowler GitHub](https://github.com/prowler-cloud/prowler) From c2a938c8a5bf9c8c7ae25f636f9142a8c38816f4 Mon Sep 17 00:00:00 2001 From: pedrooot Date: Mon, 12 Jan 2026 11:07:49 +0100 Subject: [PATCH 09/19] chore(changelog): update with latest changes --- prowler/CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/prowler/CHANGELOG.md b/prowler/CHANGELOG.md index b202455fa0..c22bb739e7 100644 --- a/prowler/CHANGELOG.md +++ b/prowler/CHANGELOG.md @@ -16,6 +16,7 @@ All notable changes to the **Prowler SDK** are documented in this file. - `compute_instance_group_load_balancer_attached` check for GCP provider [(#9695)](https://github.com/prowler-cloud/prowler/pull/9695) - `compute_instance_single_network_interface` check for GCP provider [(#9702)](https://github.com/prowler-cloud/prowler/pull/9702) - `compute_image_not_publicly_shared` check for GCP provider [(#9718)](https://github.com/prowler-cloud/prowler/pull/9718) +- Improve prowler-compliance skill with complete framework attribute structures and templates [(#9772)](https://github.com/prowler-cloud/prowler/pull/9772) ### Changed - Update AWS Step Functions service metadata to new format [(#9432)](https://github.com/prowler-cloud/prowler/pull/9432) From 117a0f69e8458bade91af0320ad04703c6fcc518 Mon Sep 17 00:00:00 2001 From: pedrooot Date: Tue, 13 Jan 2026 10:28:06 +0100 Subject: [PATCH 10/19] chore(skills): update with compliance changes --- skills/prowler-compliance/SKILL.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/skills/prowler-compliance/SKILL.md b/skills/prowler-compliance/SKILL.md index 0a1a14541d..31088c4f74 100644 --- a/skills/prowler-compliance/SKILL.md +++ b/skills/prowler-compliance/SKILL.md @@ -7,10 +7,6 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.1" - scope: [root, sdk] - auto_invoke: - - "Creating/updating compliance frameworks" - - "Mapping checks to compliance controls" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- From 9eff350921ac92b17e9c079198bc0be15cbb8e4c Mon Sep 17 00:00:00 2001 From: pedrooot Date: Tue, 13 Jan 2026 10:30:14 +0100 Subject: [PATCH 11/19] chore(skills): update with compliance changes --- skills/prowler-compliance/SKILL.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/skills/prowler-compliance/SKILL.md b/skills/prowler-compliance/SKILL.md index 31088c4f74..0a1a14541d 100644 --- a/skills/prowler-compliance/SKILL.md +++ b/skills/prowler-compliance/SKILL.md @@ -7,6 +7,10 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.1" + scope: [root, sdk] + auto_invoke: + - "Creating/updating compliance frameworks" + - "Mapping checks to compliance controls" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- From 63d6f45550d98913fc265693abdd914585b33c08 Mon Sep 17 00:00:00 2001 From: "Andoni A." <14891798+andoniaf@users.noreply.github.com> Date: Tue, 13 Jan 2026 09:22:37 +0100 Subject: [PATCH 12/19] feat(skills): add prowler-compliance-review skill Add new Agent Skill for reviewing PRs that add or modify compliance frameworks. This skill provides: - Review checklist with pass/fail criteria - Validation script to verify check existence and JSON validity - Decision tree for review recommendations - Dashboard file pattern reference The skill complements prowler-compliance (creates frameworks) by focusing on the review/validation process. --- AGENTS.md | 1 + skills/prowler-compliance-review/SKILL.md | 181 +++++++++++++++ .../assets/validate_compliance.py | 218 ++++++++++++++++++ .../references/review-checklist.md | 57 +++++ 4 files changed, 457 insertions(+) create mode 100644 skills/prowler-compliance-review/SKILL.md create mode 100644 skills/prowler-compliance-review/assets/validate_compliance.py create mode 100644 skills/prowler-compliance-review/references/review-checklist.md diff --git a/AGENTS.md b/AGENTS.md index 1367a5dc5d..dbd4767041 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -36,6 +36,7 @@ Use these skills for detailed patterns on-demand: | `prowler-test-api` | API testing (pytest-django + RLS) | [SKILL.md](skills/prowler-test-api/SKILL.md) | | `prowler-test-ui` | E2E testing (Playwright) | [SKILL.md](skills/prowler-test-ui/SKILL.md) | | `prowler-compliance` | Compliance framework structure | [SKILL.md](skills/prowler-compliance/SKILL.md) | +| `prowler-compliance-review` | Review compliance framework PRs | [SKILL.md](skills/prowler-compliance-review/SKILL.md) | | `prowler-provider` | Add new cloud providers | [SKILL.md](skills/prowler-provider/SKILL.md) | | `prowler-ci` | CI checks and PR gates (GitHub Actions) | [SKILL.md](skills/prowler-ci/SKILL.md) | | `prowler-pr` | Pull request conventions | [SKILL.md](skills/prowler-pr/SKILL.md) | diff --git a/skills/prowler-compliance-review/SKILL.md b/skills/prowler-compliance-review/SKILL.md new file mode 100644 index 0000000000..1d1c6a8922 --- /dev/null +++ b/skills/prowler-compliance-review/SKILL.md @@ -0,0 +1,181 @@ +--- +name: prowler-compliance-review +description: > + Reviews Pull Requests that add or modify compliance frameworks. + Trigger: When reviewing PRs with compliance framework changes, CIS/NIST/PCI-DSS additions, or compliance JSON files. +license: Apache-2.0 +metadata: + author: prowler-cloud + version: "1.0" +allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task +--- + +## When to Use + +- Reviewing PRs that add new compliance frameworks +- Reviewing PRs that modify existing compliance frameworks +- Validating compliance framework JSON structure before merge + +--- + +## Review Checklist (Critical) + +| Check | Command/Method | Pass Criteria | +|-------|----------------|---------------| +| JSON Valid | `python3 -m json.tool file.json` | No syntax errors | +| All Checks Exist | Run validation script | 0 missing checks | +| No Duplicate IDs | Run validation script | 0 duplicate requirement IDs | +| CHANGELOG Entry | Manual review | Present under correct version | +| Dashboard File | Compare with existing | Follows established pattern | +| Framework Metadata | Manual review | All required fields populated | + +--- + +## Validation Commands + +```bash +# 1. Validate JSON syntax +python3 -m json.tool prowler/compliance/{provider}/{framework}.json > /dev/null \ + && echo "Valid JSON" || echo "INVALID JSON" + +# 2. Run full validation script +python3 skills/prowler-compliance-review/assets/validate_compliance.py \ + prowler/compliance/{provider}/{framework}.json + +# 3. Compare dashboard with existing (find similar framework) +diff dashboard/compliance/{new_framework}.py \ + dashboard/compliance/{existing_framework}.py +``` + +--- + +## Decision Tree + +``` +JSON Valid? +├── No → FAIL: Fix JSON syntax errors +└── Yes ↓ + All Checks Exist in Codebase? + ├── Missing checks → FAIL: Add missing checks or remove from framework + └── All exist ↓ + Duplicate Requirement IDs? + ├── Yes → FAIL: Fix duplicate IDs + └── No ↓ + CHANGELOG Entry Present? + ├── No → REQUEST CHANGES: Add CHANGELOG entry + └── Yes ↓ + Dashboard File Follows Pattern? + ├── No → REQUEST CHANGES: Fix dashboard pattern + └── Yes ↓ + Framework Metadata Complete? + ├── No → REQUEST CHANGES: Add missing metadata + └── Yes → APPROVE +``` + +--- + +## Framework Structure Reference + +Compliance frameworks are JSON files in: `prowler/compliance/{provider}/{framework}.json` + +```json +{ + "Framework": "CIS", + "Name": "CIS Provider Benchmark vX.Y.Z", + "Version": "X.Y", + "Provider": "AWS|Azure|GCP|...", + "Description": "Framework description...", + "Requirements": [ + { + "Id": "1.1", + "Description": "Requirement description", + "Checks": ["check_name_1", "check_name_2"], + "Attributes": [ + { + "Section": "1 Section Name", + "SubSection": "1.1 Subsection (optional)", + "Profile": "Level 1|Level 2", + "AssessmentStatus": "Automated|Manual", + "Description": "...", + "RationaleStatement": "...", + "ImpactStatement": "...", + "RemediationProcedure": "...", + "AuditProcedure": "...", + "AdditionalInformation": "...", + "References": "...", + "DefaultValue": "..." + } + ] + } + ] +} +``` + +--- + +## Common Issues + +| Issue | How to Detect | Resolution | +|-------|---------------|------------| +| Missing checks | Validation script reports missing | Add check implementation or remove from Checks array | +| Duplicate IDs | Validation script reports duplicates | Ensure each requirement has unique ID | +| Empty Checks for Automated | AssessmentStatus is Automated but Checks is empty | Add checks or change to Manual | +| Wrong file location | Framework not in `prowler/compliance/{provider}/` | Move to correct directory | +| Missing dashboard file | No corresponding `dashboard/compliance/{framework}.py` | Create dashboard file following pattern | +| CHANGELOG missing | Not under correct version section | Add entry to prowler/CHANGELOG.md | + +--- + +## Dashboard File Pattern + +Dashboard files must be in `dashboard/compliance/` and follow this exact pattern: + +```python +import warnings + +from dashboard.common_methods import get_section_containers_cis + +warnings.filterwarnings("ignore") + + +def get_table(data): + + aux = data[ + [ + "REQUIREMENTS_ID", + "REQUIREMENTS_DESCRIPTION", + "REQUIREMENTS_ATTRIBUTES_SECTION", + "CHECKID", + "STATUS", + "REGION", + "ACCOUNTID", + "RESOURCEID", + ] + ].copy() + + return get_section_containers_cis( + aux, "REQUIREMENTS_ID", "REQUIREMENTS_ATTRIBUTES_SECTION" + ) +``` + +--- + +## Agent Integration + +When using `prowler-pr-reviewer` agent for compliance PRs: + +1. Agent detects compliance file changes in PR +2. Loads this skill for review patterns +3. Creates worktree to checkout PR branch +4. Runs `validate_compliance.py` from assets/ +5. Checks CHANGELOG and dashboard files +6. Generates structured review report +7. Returns recommendation (APPROVE / REQUEST CHANGES) + +--- + +## Resources + +- **Validation Script**: See [assets/validate_compliance.py](assets/validate_compliance.py) +- **Related Skills**: See [prowler-compliance](../prowler-compliance/SKILL.md) for creating frameworks +- **Documentation**: See [references/review-checklist.md](references/review-checklist.md) diff --git a/skills/prowler-compliance-review/assets/validate_compliance.py b/skills/prowler-compliance-review/assets/validate_compliance.py new file mode 100644 index 0000000000..92a5809b0f --- /dev/null +++ b/skills/prowler-compliance-review/assets/validate_compliance.py @@ -0,0 +1,218 @@ +#!/usr/bin/env python3 +""" +Prowler Compliance Framework Validator + +Validates compliance framework JSON files for: +- JSON syntax validity +- Check existence in codebase +- Duplicate requirement IDs +- Required field completeness +- Assessment status consistency + +Usage: + python validate_compliance.py + +Example: + python validate_compliance.py prowler/compliance/azure/cis_5.0_azure.json +""" + +import json +import os +import sys +from pathlib import Path + + +def find_project_root(): + """Find the Prowler project root directory.""" + current = Path(__file__).resolve() + for parent in current.parents: + if (parent / "prowler" / "providers").exists(): + return parent + return None + + +def get_existing_checks(project_root: Path, provider: str) -> set: + """Find all existing checks for a provider in the codebase.""" + checks = set() + services_path = project_root / "prowler" / "providers" / provider.lower() / "services" + + if not services_path.exists(): + return checks + + for service_dir in services_path.iterdir(): + if service_dir.is_dir() and not service_dir.name.startswith("__"): + for check_dir in service_dir.iterdir(): + if check_dir.is_dir() and not check_dir.name.startswith("__"): + check_file = check_dir / f"{check_dir.name}.py" + if check_file.exists(): + checks.add(check_dir.name) + + return checks + + +def validate_compliance_framework(json_path: str) -> dict: + """Validate a compliance framework JSON file.""" + results = { + "valid": True, + "errors": [], + "warnings": [], + "stats": {} + } + + # 1. Check file exists + if not os.path.exists(json_path): + results["valid"] = False + results["errors"].append(f"File not found: {json_path}") + return results + + # 2. Validate JSON syntax + try: + with open(json_path, "r") as f: + data = json.load(f) + except json.JSONDecodeError as e: + results["valid"] = False + results["errors"].append(f"Invalid JSON syntax: {e}") + return results + + # 3. Check required top-level fields + required_fields = ["Framework", "Name", "Version", "Provider", "Description", "Requirements"] + for field in required_fields: + if field not in data: + results["valid"] = False + results["errors"].append(f"Missing required field: {field}") + + if not results["valid"]: + return results + + # 4. Extract provider + provider = data.get("Provider", "").lower() + + # 5. Find project root and existing checks + project_root = find_project_root() + if project_root: + existing_checks = get_existing_checks(project_root, provider) + else: + existing_checks = set() + results["warnings"].append("Could not find project root - skipping check existence validation") + + # 6. Validate requirements + requirements = data.get("Requirements", []) + all_checks = set() + requirement_ids = [] + automated_count = 0 + manual_count = 0 + empty_automated = [] + + for req in requirements: + req_id = req.get("Id", "UNKNOWN") + requirement_ids.append(req_id) + + # Collect checks + checks = req.get("Checks", []) + all_checks.update(checks) + + # Check assessment status + attributes = req.get("Attributes", [{}]) + if attributes: + status = attributes[0].get("AssessmentStatus", "Unknown") + if status == "Automated": + automated_count += 1 + if not checks: + empty_automated.append(req_id) + elif status == "Manual": + manual_count += 1 + + # 7. Check for duplicate IDs + seen_ids = set() + duplicates = [] + for req_id in requirement_ids: + if req_id in seen_ids: + duplicates.append(req_id) + seen_ids.add(req_id) + + if duplicates: + results["valid"] = False + results["errors"].append(f"Duplicate requirement IDs: {duplicates}") + + # 8. Check for missing checks + if existing_checks: + missing_checks = all_checks - existing_checks + if missing_checks: + results["valid"] = False + results["errors"].append(f"Missing checks in codebase ({len(missing_checks)}): {sorted(missing_checks)}") + + # 9. Warn about empty automated + if empty_automated: + results["warnings"].append(f"Automated requirements with no checks: {empty_automated}") + + # 10. Compile statistics + results["stats"] = { + "framework": data.get("Framework"), + "name": data.get("Name"), + "version": data.get("Version"), + "provider": data.get("Provider"), + "total_requirements": len(requirements), + "automated_requirements": automated_count, + "manual_requirements": manual_count, + "unique_checks_referenced": len(all_checks), + "checks_found_in_codebase": len(all_checks - (all_checks - existing_checks)) if existing_checks else "N/A", + "missing_checks": len(all_checks - existing_checks) if existing_checks else "N/A" + } + + return results + + +def print_report(results: dict): + """Print a formatted validation report.""" + print("\n" + "=" * 60) + print("PROWLER COMPLIANCE FRAMEWORK VALIDATION REPORT") + print("=" * 60) + + stats = results.get("stats", {}) + if stats: + print(f"\nFramework: {stats.get('name', 'N/A')}") + print(f"Provider: {stats.get('provider', 'N/A')}") + print(f"Version: {stats.get('version', 'N/A')}") + print("-" * 40) + print(f"Total Requirements: {stats.get('total_requirements', 0)}") + print(f" - Automated: {stats.get('automated_requirements', 0)}") + print(f" - Manual: {stats.get('manual_requirements', 0)}") + print(f"Unique Checks: {stats.get('unique_checks_referenced', 0)}") + print(f"Checks in Codebase: {stats.get('checks_found_in_codebase', 'N/A')}") + print(f"Missing Checks: {stats.get('missing_checks', 'N/A')}") + + print("\n" + "-" * 40) + + if results["errors"]: + print("\nERRORS:") + for error in results["errors"]: + print(f" [X] {error}") + + if results["warnings"]: + print("\nWARNINGS:") + for warning in results["warnings"]: + print(f" [!] {warning}") + + print("\n" + "-" * 40) + if results["valid"]: + print("RESULT: PASS - Framework is valid") + else: + print("RESULT: FAIL - Framework has errors") + print("=" * 60 + "\n") + + +def main(): + if len(sys.argv) < 2: + print("Usage: python validate_compliance.py ") + print("Example: python validate_compliance.py prowler/compliance/azure/cis_5.0_azure.json") + sys.exit(1) + + json_path = sys.argv[1] + results = validate_compliance_framework(json_path) + print_report(results) + + sys.exit(0 if results["valid"] else 1) + + +if __name__ == "__main__": + main() diff --git a/skills/prowler-compliance-review/references/review-checklist.md b/skills/prowler-compliance-review/references/review-checklist.md new file mode 100644 index 0000000000..d8673d8c03 --- /dev/null +++ b/skills/prowler-compliance-review/references/review-checklist.md @@ -0,0 +1,57 @@ +# Compliance PR Review References + +## Related Skills + +- [prowler-compliance](../../prowler-compliance/SKILL.md) - Creating compliance frameworks +- [prowler-pr](../../prowler-pr/SKILL.md) - PR conventions and checklist + +## Documentation + +- [Prowler Developer Guide](https://docs.prowler.com/developer-guide/introduction) +- [Compliance Framework Structure](https://docs.prowler.com/developer-guide/compliance) + +## File Locations + +| File Type | Location | +|-----------|----------| +| Compliance JSON | `prowler/compliance/{provider}/{framework}.json` | +| Dashboard | `dashboard/compliance/{framework}_{provider}.py` | +| CHANGELOG | `prowler/CHANGELOG.md` | +| Checks | `prowler/providers/{provider}/services/{service}/{check}/` | + +## Validation Script + +Run the validation script from the project root: + +```bash +python3 skills/prowler-compliance-review/assets/validate_compliance.py \ + prowler/compliance/{provider}/{framework}.json +``` + +## PR Review Summary Template + +When completing a compliance framework review, use this summary format: + +```markdown +## Compliance Framework Review Summary + +| Check | Result | +|-------|--------| +| JSON Valid | PASS/FAIL | +| All Checks Exist | PASS/FAIL (N missing) | +| No Duplicate IDs | PASS/FAIL | +| CHANGELOG Entry | PASS/FAIL | +| Dashboard File | PASS/FAIL | + +### Statistics +- Total Requirements: N +- Automated: N +- Manual: N +- Unique Checks: N + +### Recommendation +APPROVE / REQUEST CHANGES / FAIL + +### Issues Found +1. ... +``` From ef74be54dab8d4a7f9ef440afaa692e7c9a60602 Mon Sep 17 00:00:00 2001 From: "Andoni A." <14891798+andoniaf@users.noreply.github.com> Date: Tue, 13 Jan 2026 09:29:17 +0100 Subject: [PATCH 13/19] fix(skills): align prowler-compliance-review with repo patterns - Rename "Validation Commands" to "Commands" - Replace "Agent Integration" with "Testing the Compliance Framework" --- skills/prowler-compliance-review/SKILL.md | 26 ++++++++++++++--------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/skills/prowler-compliance-review/SKILL.md b/skills/prowler-compliance-review/SKILL.md index 1d1c6a8922..eab77efd35 100644 --- a/skills/prowler-compliance-review/SKILL.md +++ b/skills/prowler-compliance-review/SKILL.md @@ -31,7 +31,7 @@ allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- -## Validation Commands +## Commands ```bash # 1. Validate JSON syntax @@ -160,17 +160,23 @@ def get_table(data): --- -## Agent Integration +## Testing the Compliance Framework -When using `prowler-pr-reviewer` agent for compliance PRs: +After validation passes, test the framework with Prowler: -1. Agent detects compliance file changes in PR -2. Loads this skill for review patterns -3. Creates worktree to checkout PR branch -4. Runs `validate_compliance.py` from assets/ -5. Checks CHANGELOG and dashboard files -6. Generates structured review report -7. Returns recommendation (APPROVE / REQUEST CHANGES) +```bash +# Verify framework is detected +prowler {provider} --list-compliance | grep {framework} + +# Run a quick test with a single check from the framework +prowler {provider} --compliance {framework} --check {check_name} + +# Run full compliance scan (dry-run with limited checks) +prowler {provider} --compliance {framework} --checks-limit 5 + +# Generate compliance report in multiple formats +prowler {provider} --compliance {framework} -M csv json html +``` --- From 5ffdf26c0fd75fcd56590ceaa791b1162943433f Mon Sep 17 00:00:00 2001 From: "Andoni A." <14891798+andoniaf@users.noreply.github.com> Date: Tue, 13 Jan 2026 09:31:54 +0100 Subject: [PATCH 14/19] fix(skills): use consistent command style in prowler-compliance-review --- skills/prowler-compliance-review/SKILL.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/skills/prowler-compliance-review/SKILL.md b/skills/prowler-compliance-review/SKILL.md index eab77efd35..47a270b410 100644 --- a/skills/prowler-compliance-review/SKILL.md +++ b/skills/prowler-compliance-review/SKILL.md @@ -166,16 +166,16 @@ After validation passes, test the framework with Prowler: ```bash # Verify framework is detected -prowler {provider} --list-compliance | grep {framework} +poetry run python prowler-cli.py {provider} --list-compliance | grep {framework} # Run a quick test with a single check from the framework -prowler {provider} --compliance {framework} --check {check_name} +poetry run python prowler-cli.py {provider} --compliance {framework} --check {check_name} # Run full compliance scan (dry-run with limited checks) -prowler {provider} --compliance {framework} --checks-limit 5 +poetry run python prowler-cli.py {provider} --compliance {framework} --checks-limit 5 # Generate compliance report in multiple formats -prowler {provider} --compliance {framework} -M csv json html +poetry run python prowler-cli.py {provider} --compliance {framework} -M csv json html ``` --- From 747bbb856e5f388a608027faa525d6c49ef56ee4 Mon Sep 17 00:00:00 2001 From: "Andoni A." <14891798+andoniaf@users.noreply.github.com> Date: Tue, 13 Jan 2026 10:41:31 +0100 Subject: [PATCH 15/19] fix(skills): fix sync.sh macOS compatibility and add missing metadata - Fix awk multi-line string handling in sync.sh by using temp file - Add scope and auto_invoke metadata to prowler-compliance-review skill - Regenerate AGENTS.md Auto-invoke sections --- AGENTS.md | 1 + prowler/AGENTS.md | 1 + skills/prowler-compliance-review/SKILL.md | 2 ++ skills/skill-sync/assets/sync.sh | 16 ++++++++++++---- 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index dbd4767041..62ebb587f1 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -71,6 +71,7 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST: | Mocking AWS with moto in tests | `prowler-test-sdk` | | Regenerate AGENTS.md Auto-invoke tables (sync.sh) | `skill-sync` | | Review PR requirements: template, title conventions, changelog gate | `prowler-pr` | +| Reviewing compliance framework PRs | `prowler-compliance-review` | | Testing RLS tenant isolation | `prowler-test-api` | | Troubleshoot why a skill is missing from AGENTS.md auto-invoke | `skill-sync` | | Understand CODEOWNERS/labeler-based automation | `prowler-ci` | diff --git a/prowler/AGENTS.md b/prowler/AGENTS.md index 85217c9c8e..3b43e1c5d0 100644 --- a/prowler/AGENTS.md +++ b/prowler/AGENTS.md @@ -19,6 +19,7 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST: | Creating/updating compliance frameworks | `prowler-compliance` | | Mapping checks to compliance controls | `prowler-compliance` | | Mocking AWS with moto in tests | `prowler-test-sdk` | +| Reviewing compliance framework PRs | `prowler-compliance-review` | | Updating existing checks and metadata | `prowler-sdk-check` | | Writing Prowler SDK tests | `prowler-test-sdk` | | Writing Python tests with pytest | `pytest` | diff --git a/skills/prowler-compliance-review/SKILL.md b/skills/prowler-compliance-review/SKILL.md index 47a270b410..a494858eba 100644 --- a/skills/prowler-compliance-review/SKILL.md +++ b/skills/prowler-compliance-review/SKILL.md @@ -7,6 +7,8 @@ license: Apache-2.0 metadata: author: prowler-cloud version: "1.0" + scope: [root, sdk] + auto_invoke: "Reviewing compliance framework PRs" allowed-tools: Read, Edit, Write, Glob, Grep, Bash, WebFetch, WebSearch, Task --- diff --git a/skills/skill-sync/assets/sync.sh b/skills/skill-sync/assets/sync.sh index f2e14a3dc4..99997d52d6 100755 --- a/skills/skill-sync/assets/sync.sh +++ b/skills/skill-sync/assets/sync.sh @@ -253,12 +253,17 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST: echo "$auto_invoke_section" echo "" else + # Write new section to temp file (avoids awk multi-line string issues on macOS) + section_file=$(mktemp) + echo "$auto_invoke_section" > "$section_file" + # Check if Auto-invoke section exists if grep -q "### Auto-invoke Skills" "$agents_path"; then # Replace existing section (up to next --- or ## heading) - awk -v new_section="$auto_invoke_section" ' + awk ' /^### Auto-invoke Skills/ { - print new_section + while ((getline line < "'"$section_file"'") > 0) print line + close("'"$section_file"'") skip = 1 next } @@ -272,13 +277,14 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST: echo -e "${GREEN} ✓ Updated Auto-invoke section${NC}" else # Insert after Skills Reference blockquote - awk -v new_section="$auto_invoke_section" ' + awk ' /^>.*SKILL\.md\)$/ && !inserted { print getline if (/^$/) { print "" - print new_section + while ((getline line < "'"$section_file"'") > 0) print line + close("'"$section_file"'") print "" inserted = 1 next @@ -289,6 +295,8 @@ When performing these actions, ALWAYS invoke the corresponding skill FIRST: mv "$agents_path.tmp" "$agents_path" echo -e "${GREEN} ✓ Inserted Auto-invoke section${NC}" fi + + rm -f "$section_file" fi done From 0cab0dbeb13de4660b576aa61b5a7d21bd77190e Mon Sep 17 00:00:00 2001 From: "Andoni A." <14891798+andoniaf@users.noreply.github.com> Date: Tue, 13 Jan 2026 10:45:14 +0100 Subject: [PATCH 16/19] docs(skills): update Mermaid diagram with new skills Add prowler-compliance-review, prowler-ci, and skill-sync to the AI Skills architecture diagram and Skills Included table. --- docs/developer-guide/ai-skills.mdx | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/docs/developer-guide/ai-skills.mdx b/docs/developer-guide/ai-skills.mdx index d595c3ab11..6a0787dac8 100644 --- a/docs/developer-guide/ai-skills.mdx +++ b/docs/developer-guide/ai-skills.mdx @@ -128,8 +128,10 @@ flowchart TB P5["prowler-mcp"] P6["prowler-provider"] P7["prowler-compliance"] - P8["prowler-docs"] - P9["prowler-pr"] + P8["prowler-compliance-review"] + P9["prowler-docs"] + P10["prowler-pr"] + P11["prowler-ci"] end subgraph TESTING["Testing Skills"] @@ -140,6 +142,7 @@ flowchart TB subgraph META["Meta Skills"] M1["skill-creator"] + M2["skill-sync"] end end @@ -189,9 +192,9 @@ flowchart TB | Type | Skills | |------|--------| | **Generic** | typescript, react-19, nextjs-15, tailwind-4, pytest, playwright, django-drf, zod-4, zustand-5, ai-sdk-5 | -| **Prowler** | prowler, prowler-sdk-check, prowler-api, prowler-ui, prowler-mcp, prowler-provider, prowler-compliance, prowler-docs, prowler-pr | +| **Prowler** | prowler, prowler-sdk-check, prowler-api, prowler-ui, prowler-mcp, prowler-provider, prowler-compliance, prowler-compliance-review, prowler-docs, prowler-pr, prowler-ci | | **Testing** | prowler-test-sdk, prowler-test-api, prowler-test-ui | -| **Meta** | skill-creator | +| **Meta** | skill-creator, skill-sync | ## Skill Structure From 3ff14ab275edc02ae409160d0ce2a45d8c7e49fe Mon Sep 17 00:00:00 2001 From: pedrooot Date: Tue, 13 Jan 2026 11:00:47 +0100 Subject: [PATCH 17/19] chore(merge): fix format --- .../assets/validate_compliance.py | 44 +++++++++++++------ 1 file changed, 30 insertions(+), 14 deletions(-) diff --git a/skills/prowler-compliance-review/assets/validate_compliance.py b/skills/prowler-compliance-review/assets/validate_compliance.py index 92a5809b0f..5a8bcfcaa5 100644 --- a/skills/prowler-compliance-review/assets/validate_compliance.py +++ b/skills/prowler-compliance-review/assets/validate_compliance.py @@ -34,7 +34,9 @@ def find_project_root(): def get_existing_checks(project_root: Path, provider: str) -> set: """Find all existing checks for a provider in the codebase.""" checks = set() - services_path = project_root / "prowler" / "providers" / provider.lower() / "services" + services_path = ( + project_root / "prowler" / "providers" / provider.lower() / "services" + ) if not services_path.exists(): return checks @@ -52,12 +54,7 @@ def get_existing_checks(project_root: Path, provider: str) -> set: def validate_compliance_framework(json_path: str) -> dict: """Validate a compliance framework JSON file.""" - results = { - "valid": True, - "errors": [], - "warnings": [], - "stats": {} - } + results = {"valid": True, "errors": [], "warnings": [], "stats": {}} # 1. Check file exists if not os.path.exists(json_path): @@ -75,7 +72,14 @@ def validate_compliance_framework(json_path: str) -> dict: return results # 3. Check required top-level fields - required_fields = ["Framework", "Name", "Version", "Provider", "Description", "Requirements"] + required_fields = [ + "Framework", + "Name", + "Version", + "Provider", + "Description", + "Requirements", + ] for field in required_fields: if field not in data: results["valid"] = False @@ -93,7 +97,9 @@ def validate_compliance_framework(json_path: str) -> dict: existing_checks = get_existing_checks(project_root, provider) else: existing_checks = set() - results["warnings"].append("Could not find project root - skipping check existence validation") + results["warnings"].append( + "Could not find project root - skipping check existence validation" + ) # 6. Validate requirements requirements = data.get("Requirements", []) @@ -139,11 +145,15 @@ def validate_compliance_framework(json_path: str) -> dict: missing_checks = all_checks - existing_checks if missing_checks: results["valid"] = False - results["errors"].append(f"Missing checks in codebase ({len(missing_checks)}): {sorted(missing_checks)}") + results["errors"].append( + f"Missing checks in codebase ({len(missing_checks)}): {sorted(missing_checks)}" + ) # 9. Warn about empty automated if empty_automated: - results["warnings"].append(f"Automated requirements with no checks: {empty_automated}") + results["warnings"].append( + f"Automated requirements with no checks: {empty_automated}" + ) # 10. Compile statistics results["stats"] = { @@ -155,8 +165,12 @@ def validate_compliance_framework(json_path: str) -> dict: "automated_requirements": automated_count, "manual_requirements": manual_count, "unique_checks_referenced": len(all_checks), - "checks_found_in_codebase": len(all_checks - (all_checks - existing_checks)) if existing_checks else "N/A", - "missing_checks": len(all_checks - existing_checks) if existing_checks else "N/A" + "checks_found_in_codebase": len(all_checks - (all_checks - existing_checks)) + if existing_checks + else "N/A", + "missing_checks": len(all_checks - existing_checks) + if existing_checks + else "N/A", } return results @@ -204,7 +218,9 @@ def print_report(results: dict): def main(): if len(sys.argv) < 2: print("Usage: python validate_compliance.py ") - print("Example: python validate_compliance.py prowler/compliance/azure/cis_5.0_azure.json") + print( + "Example: python validate_compliance.py prowler/compliance/azure/cis_5.0_azure.json" + ) sys.exit(1) json_path = sys.argv[1] From 3628eeb82ac30f7bbafd62d8682cf9c02cc45701 Mon Sep 17 00:00:00 2001 From: "Andoni A." <14891798+andoniaf@users.noreply.github.com> Date: Tue, 13 Jan 2026 11:02:01 +0100 Subject: [PATCH 18/19] ci(sdk): skip CI for skills and agent docs changes Add skills/** and **/AGENTS.md to files_ignore in SDK workflows to skip unnecessary CI runs for documentation-only changes. --- .github/workflows/sdk-code-quality.yml | 2 ++ .github/workflows/sdk-container-checks.yml | 2 ++ .github/workflows/sdk-security.yml | 2 ++ .github/workflows/sdk-tests.yml | 2 ++ 4 files changed, 8 insertions(+) diff --git a/.github/workflows/sdk-code-quality.yml b/.github/workflows/sdk-code-quality.yml index 068b482f1c..85ef158f3b 100644 --- a/.github/workflows/sdk-code-quality.yml +++ b/.github/workflows/sdk-code-quality.yml @@ -47,6 +47,7 @@ jobs: ui/** dashboard/** mcp_server/** + skills/** README.md mkdocs.yml .backportrc.json @@ -55,6 +56,7 @@ jobs: examples/** .gitignore contrib/** + **/AGENTS.md - name: Install Poetry if: steps.check-changes.outputs.any_changed == 'true' diff --git a/.github/workflows/sdk-container-checks.yml b/.github/workflows/sdk-container-checks.yml index b85f0a6ebe..7a0323c216 100644 --- a/.github/workflows/sdk-container-checks.yml +++ b/.github/workflows/sdk-container-checks.yml @@ -78,6 +78,7 @@ jobs: ui/** dashboard/** mcp_server/** + skills/** README.md mkdocs.yml .backportrc.json @@ -86,6 +87,7 @@ jobs: examples/** .gitignore contrib/** + **/AGENTS.md - name: Set up Docker Buildx if: steps.check-changes.outputs.any_changed == 'true' diff --git a/.github/workflows/sdk-security.yml b/.github/workflows/sdk-security.yml index 8a102d34ac..01f94d842a 100644 --- a/.github/workflows/sdk-security.yml +++ b/.github/workflows/sdk-security.yml @@ -42,6 +42,7 @@ jobs: ui/** dashboard/** mcp_server/** + skills/** README.md mkdocs.yml .backportrc.json @@ -50,6 +51,7 @@ jobs: examples/** .gitignore contrib/** + **/AGENTS.md - name: Install Poetry if: steps.check-changes.outputs.any_changed == 'true' diff --git a/.github/workflows/sdk-tests.yml b/.github/workflows/sdk-tests.yml index d55af082a8..b49ffd39f5 100644 --- a/.github/workflows/sdk-tests.yml +++ b/.github/workflows/sdk-tests.yml @@ -47,6 +47,7 @@ jobs: ui/** dashboard/** mcp_server/** + skills/** README.md mkdocs.yml .backportrc.json @@ -55,6 +56,7 @@ jobs: examples/** .gitignore contrib/** + **/AGENTS.md - name: Install Poetry if: steps.check-changes.outputs.any_changed == 'true' From 622397c6d34d1adcbcd828f735322b5e3f508f00 Mon Sep 17 00:00:00 2001 From: "Andoni A." <14891798+andoniaf@users.noreply.github.com> Date: Tue, 13 Jan 2026 11:11:03 +0100 Subject: [PATCH 19/19] ci(api,ui): skip CI for AGENTS.md changes Add AGENTS.md to files_ignore in API and UI workflows to skip unnecessary CI runs for agent documentation changes. --- .github/workflows/api-code-quality.yml | 1 + .github/workflows/api-container-checks.yml | 1 + .github/workflows/api-security.yml | 1 + .github/workflows/api-tests.yml | 1 + .github/workflows/ui-container-checks.yml | 1 + .github/workflows/ui-tests.yml | 1 + 6 files changed, 6 insertions(+) diff --git a/.github/workflows/api-code-quality.yml b/.github/workflows/api-code-quality.yml index 0b6131538d..c5cc02a298 100644 --- a/.github/workflows/api-code-quality.yml +++ b/.github/workflows/api-code-quality.yml @@ -46,6 +46,7 @@ jobs: api/docs/** api/README.md api/CHANGELOG.md + api/AGENTS.md - name: Setup Python with Poetry if: steps.check-changes.outputs.any_changed == 'true' diff --git a/.github/workflows/api-container-checks.yml b/.github/workflows/api-container-checks.yml index d63f75894d..e1bca8091c 100644 --- a/.github/workflows/api-container-checks.yml +++ b/.github/workflows/api-container-checks.yml @@ -74,6 +74,7 @@ jobs: api/docs/** api/README.md api/CHANGELOG.md + api/AGENTS.md - name: Set up Docker Buildx if: steps.check-changes.outputs.any_changed == 'true' diff --git a/.github/workflows/api-security.yml b/.github/workflows/api-security.yml index d764f82236..285262ce7f 100644 --- a/.github/workflows/api-security.yml +++ b/.github/workflows/api-security.yml @@ -46,6 +46,7 @@ jobs: api/docs/** api/README.md api/CHANGELOG.md + api/AGENTS.md - name: Setup Python with Poetry if: steps.check-changes.outputs.any_changed == 'true' diff --git a/.github/workflows/api-tests.yml b/.github/workflows/api-tests.yml index 44b4141dc2..ec235fd33b 100644 --- a/.github/workflows/api-tests.yml +++ b/.github/workflows/api-tests.yml @@ -86,6 +86,7 @@ jobs: api/docs/** api/README.md api/CHANGELOG.md + api/AGENTS.md - name: Setup Python with Poetry if: steps.check-changes.outputs.any_changed == 'true' diff --git a/.github/workflows/ui-container-checks.yml b/.github/workflows/ui-container-checks.yml index 8329132450..4c027f1cb2 100644 --- a/.github/workflows/ui-container-checks.yml +++ b/.github/workflows/ui-container-checks.yml @@ -73,6 +73,7 @@ jobs: files_ignore: | ui/CHANGELOG.md ui/README.md + ui/AGENTS.md - name: Set up Docker Buildx if: steps.check-changes.outputs.any_changed == 'true' diff --git a/.github/workflows/ui-tests.yml b/.github/workflows/ui-tests.yml index c8382f6711..e657e56960 100644 --- a/.github/workflows/ui-tests.yml +++ b/.github/workflows/ui-tests.yml @@ -42,6 +42,7 @@ jobs: files_ignore: | ui/CHANGELOG.md ui/README.md + ui/AGENTS.md - name: Setup Node.js ${{ env.NODE_VERSION }} if: steps.check-changes.outputs.any_changed == 'true'