diff --git a/.env.example b/.env.example new file mode 100644 index 0000000..750bb3c --- /dev/null +++ b/.env.example @@ -0,0 +1,39 @@ +# Server Configuration +PORT=8080 +HOST=0.0.0.0 +ENVIRONMENT=development # development, staging, production +READ_TIMEOUT=15s +WRITE_TIMEOUT=15s +IDLE_TIMEOUT=60s +SHUTDOWN_TIMEOUT=30s + +# Database Configuration +DB_TYPE=file # file or postgres +DB_FILE_PATH=posts.json + +# PostgreSQL Configuration (when DB_TYPE=postgres) +DB_HOST=localhost +DB_PORT=5432 +DB_USER=postgres +DB_PASSWORD=postgres +DB_NAME=postanalyzer +DB_SSL_MODE=disable +DB_MAX_CONNS=25 +DB_MIN_CONNS=5 + +# Security Configuration +RATE_LIMIT_REQUESTS=100 +RATE_LIMIT_WINDOW=1m +MAX_BODY_SIZE=1048576 # 1MB in bytes +ALLOWED_ORIGINS=* # Comma-separated list or * for all +TRUSTED_PROXIES= # Comma-separated list of trusted proxy IPs + +# Logging Configuration +LOG_LEVEL=info # debug, info, warn, error +LOG_FORMAT=json # json or text +LOG_OUTPUT=stdout # stdout or file path +LOG_TIME_FORMAT=2006-01-02T15:04:05Z07:00 + +# External API Configuration +JSONPLACEHOLDER_URL=https://jsonplaceholder.typicode.com/posts +HTTP_TIMEOUT=30s diff --git a/.github/workflows/ci-cd.yml b/.github/workflows/ci-cd.yml new file mode 100644 index 0000000..2429f68 --- /dev/null +++ b/.github/workflows/ci-cd.yml @@ -0,0 +1,199 @@ +name: CI/CD Pipeline + +on: + push: + branches: [ main, master, claude/* ] + pull_request: + branches: [ main, master ] + +env: + GO_VERSION: '1.21' + DOCKER_IMAGE: post-analyzer + +jobs: + # Linting and code quality + lint: + name: Lint Code + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Run golangci-lint + uses: golangci/golangci-lint-action@v4 + with: + version: latest + args: --timeout=5m + + # Unit and integration tests + test: + name: Run Tests + runs-on: ubuntu-latest + services: + postgres: + image: postgres:16-alpine + env: + POSTGRES_DB: testdb + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + ports: + - 5432:5432 + options: >- + --health-cmd pg_isready + --health-interval 10s + --health-timeout 5s + --health-retries 5 + + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Cache Go modules + uses: actions/cache@v4 + with: + path: | + ~/.cache/go-build + ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + - name: Download dependencies + run: go mod download + + - name: Run tests + run: go test -v -race -coverprofile=coverage.txt -covermode=atomic ./... + env: + DB_HOST: localhost + DB_PORT: 5432 + DB_USER: postgres + DB_PASSWORD: postgres + DB_NAME: testdb + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + file: ./coverage.txt + fail_ci_if_error: false + + # Build and verify + build: + name: Build Application + runs-on: ubuntu-latest + needs: [lint, test] + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: ${{ env.GO_VERSION }} + + - name: Build application + run: | + go build -v -ldflags="-w -s" -o post-analyzer main_new.go + + - name: Upload build artifact + uses: actions/upload-artifact@v4 + with: + name: post-analyzer + path: post-analyzer + retention-days: 1 + + # Docker build and push + docker: + name: Build and Push Docker Image + runs-on: ubuntu-latest + needs: [build] + if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/master') + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 + + - name: Log in to Docker Hub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + if: github.event_name == 'push' + + - name: Extract metadata + id: meta + uses: docker/metadata-action@v5 + with: + images: ${{ secrets.DOCKER_USERNAME }}/${{ env.DOCKER_IMAGE }} + tags: | + type=ref,event=branch + type=ref,event=pr + type=semver,pattern={{version}} + type=semver,pattern={{major}}.{{minor}} + type=sha,prefix={{branch}}- + type=raw,value=latest,enable={{is_default_branch}} + + - name: Build and push Docker image + uses: docker/build-push-action@v5 + with: + context: . + push: ${{ github.event_name == 'push' }} + tags: ${{ steps.meta.outputs.tags }} + labels: ${{ steps.meta.outputs.labels }} + cache-from: type=gha + cache-to: type=gha,mode=max + + # Security scanning + security: + name: Security Scan + runs-on: ubuntu-latest + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Run Trivy vulnerability scanner + uses: aquasecurity/trivy-action@master + with: + scan-type: 'fs' + scan-ref: '.' + format: 'sarif' + output: 'trivy-results.sarif' + + - name: Upload Trivy results to GitHub Security + uses: github/codeql-action/upload-sarif@v3 + with: + sarif_file: 'trivy-results.sarif' + + # Deployment (optional - customize for your deployment target) + deploy: + name: Deploy Application + runs-on: ubuntu-latest + needs: [docker] + if: github.event_name == 'push' && github.ref == 'refs/heads/main' + steps: + - name: Checkout code + uses: actions/checkout@v4 + + - name: Deploy to production + run: | + echo "Add your deployment steps here" + echo "Examples: kubectl apply, helm upgrade, SSH to server, etc." + # Uncomment and customize based on your deployment method: + # - name: Deploy to Kubernetes + # run: | + # kubectl set image deployment/post-analyzer post-analyzer=${{ secrets.DOCKER_USERNAME }}/${{ env.DOCKER_IMAGE }}:latest + # + # - name: Deploy to Render + # run: | + # curl -X POST ${{ secrets.RENDER_DEPLOY_HOOK }} diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a075cea --- /dev/null +++ b/.gitignore @@ -0,0 +1,58 @@ +# Binaries for programs and plugins +*.exe +*.exe~ +*.dll +*.so +*.dylib + +# Test binary, built with `go test -c` +*.test + +# Output of the go coverage tool +*.out +coverage.txt +coverage.html + +# Dependency directories +vendor/ + +# Go workspace file +go.work + +# Environment variables +.env +.env.local +.env.*.local + +# IDE files +.idea/ +.vscode/ +*.swp +*.swo +*~ + +# OS files +.DS_Store +Thumbs.db + +# Application specific +posts.json +*.log +logs/ + +# Build output +bin/ +dist/ +build/ + +# Database +*.db +*.sqlite +*.sqlite3 + +# Docker volumes +data/ + +# Temporary files +tmp/ +temp/ diff --git a/API_DOCUMENTATION.md b/API_DOCUMENTATION.md new file mode 100644 index 0000000..9d2db48 --- /dev/null +++ b/API_DOCUMENTATION.md @@ -0,0 +1,688 @@ +# Post Analyzer API Documentation + +**Version:** 2.0.0 +**Base URL:** `http://localhost:8080/api/v1` + +## Overview + +The Post Analyzer API provides a comprehensive RESTful interface for managing and analyzing posts. The API features include: + +- ✅ Full CRUD operations for posts +- ✅ Advanced filtering and pagination +- ✅ Bulk operations +- ✅ Character frequency analytics +- ✅ Data export (JSON/CSV) +- ✅ Request validation and error handling +- ✅ Rate limiting and security + +## Authentication + +Currently, the API is open and does not require authentication. Authentication will be added in a future release. + +## Rate Limiting + +- **Default Limit:** 100 requests per minute per IP +- **Headers:** Rate limit status is available in response headers +- **429 Response:** Returns when limit is exceeded + +## Base Response Format + +### Success Response +```json +{ + "data": { /* response data */ }, + "meta": { + "requestId": "550e8400-e29b-41d4-a716-446655440000", + "timestamp": "2025-01-16T10:30:00Z" + } +} +``` + +### Error Response +```json +{ + "error": { + "code": "ERROR_CODE", + "message": "Human readable error message", + "fields": { + "fieldName": "Field-specific error" + } + }, + "meta": { + "requestId": "550e8400-e29b-41d4-a716-446655440000", + "timestamp": "2025-01-16T10:30:00Z" + } +} +``` + +## Endpoints + +### 1. List Posts + +Retrieve a paginated list of posts with optional filtering and sorting. + +**Endpoint:** `GET /api/v1/posts` + +**Query Parameters:** + +| Parameter | Type | Description | Default | +|-----------|------|-------------|---------| +| `page` | integer | Page number | 1 | +| `pageSize` | integer | Items per page (max 100) | 20 | +| `userId` | integer | Filter by user ID | - | +| `search` | string | Search in title and body | - | +| `sortBy` | string | Sort field (id, title, createdAt, updatedAt) | id | +| `sortOrder` | string | Sort order (asc, desc) | desc | + +**Example Request:** +```bash +curl "http://localhost:8080/api/v1/posts?page=1&pageSize=10&sortBy=createdAt&sortOrder=desc" +``` + +**Example Response:** +```json +{ + "data": [ + { + "id": 1, + "userId": 1, + "title": "Sample Post", + "body": "This is the post content...", + "createdAt": "2025-01-16T10:00:00Z", + "updatedAt": "2025-01-16T10:00:00Z" + } + ], + "pagination": { + "page": 1, + "pageSize": 10, + "totalItems": 100, + "totalPages": 10, + "hasNext": true, + "hasPrev": false + }, + "meta": { + "requestId": "550e8400-e29b-41d4-a716-446655440000", + "timestamp": "2025-01-16T10:30:00Z" + } +} +``` + +### 2. Get Post by ID + +Retrieve a single post by its ID. + +**Endpoint:** `GET /api/v1/posts/{id}` + +**Path Parameters:** +- `id` (integer, required): Post ID + +**Example Request:** +```bash +curl http://localhost:8080/api/v1/posts/1 +``` + +**Example Response:** +```json +{ + "data": { + "id": 1, + "userId": 1, + "title": "Sample Post", + "body": "This is the post content...", + "createdAt": "2025-01-16T10:00:00Z", + "updatedAt": "2025-01-16T10:00:00Z" + }, + "meta": { + "requestId": "550e8400-e29b-41d4-a716-446655440000", + "timestamp": "2025-01-16T10:30:00Z" + } +} +``` + +**Error Response (404):** +```json +{ + "error": { + "code": "NOT_FOUND", + "message": "Post not found" + }, + "meta": { + "requestId": "550e8400-e29b-41d4-a716-446655440000", + "timestamp": "2025-01-16T10:30:00Z" + } +} +``` + +### 3. Create Post + +Create a new post. + +**Endpoint:** `POST /api/v1/posts` + +**Request Body:** +```json +{ + "userId": 1, + "title": "My New Post", + "body": "This is the content of my new post." +} +``` + +**Validation Rules:** +- `title`: Required, 1-500 characters +- `body`: Required, 1-10,000 characters +- `userId`: Optional, defaults to 1 + +**Example Request:** +```bash +curl -X POST http://localhost:8080/api/v1/posts \ + -H "Content-Type: application/json" \ + -d '{ + "userId": 1, + "title": "My New Post", + "body": "This is the content of my new post." + }' +``` + +**Example Response (201 Created):** +```json +{ + "data": { + "id": 101, + "userId": 1, + "title": "My New Post", + "body": "This is the content of my new post.", + "createdAt": "2025-01-16T10:30:00Z", + "updatedAt": "2025-01-16T10:30:00Z" + }, + "meta": { + "requestId": "550e8400-e29b-41d4-a716-446655440000", + "timestamp": "2025-01-16T10:30:00Z" + } +} +``` + +**Validation Error Response (422):** +```json +{ + "error": { + "code": "VALIDATION_FAILED", + "message": "validation failed", + "fields": { + "title": "title is required", + "body": "body too long (max 10000 characters)" + } + }, + "meta": { + "requestId": "550e8400-e29b-41d4-a716-446655440000", + "timestamp": "2025-01-16T10:30:00Z" + } +} +``` + +### 4. Update Post + +Update an existing post. + +**Endpoint:** `PUT /api/v1/posts/{id}` + +**Path Parameters:** +- `id` (integer, required): Post ID + +**Request Body:** +```json +{ + "title": "Updated Title", + "body": "Updated content..." +} +``` + +**Note:** All fields are optional. Only provided fields will be updated. + +**Example Request:** +```bash +curl -X PUT http://localhost:8080/api/v1/posts/1 \ + -H "Content-Type: application/json" \ + -d '{ + "title": "Updated Title", + "body": "Updated content..." + }' +``` + +**Example Response:** +```json +{ + "data": { + "id": 1, + "userId": 1, + "title": "Updated Title", + "body": "Updated content...", + "createdAt": "2025-01-16T10:00:00Z", + "updatedAt": "2025-01-16T10:35:00Z" + }, + "meta": { + "requestId": "550e8400-e29b-41d4-a716-446655440000", + "timestamp": "2025-01-16T10:35:00Z" + } +} +``` + +### 5. Delete Post + +Delete a post by ID. + +**Endpoint:** `DELETE /api/v1/posts/{id}` + +**Path Parameters:** +- `id` (integer, required): Post ID + +**Example Request:** +```bash +curl -X DELETE http://localhost:8080/api/v1/posts/1 +``` + +**Example Response:** +```json +{ + "message": "Post deleted successfully", + "meta": { + "requestId": "550e8400-e29b-41d4-a716-446655440000", + "timestamp": "2025-01-16T10:40:00Z" + } +} +``` + +### 6. Bulk Create Posts + +Create multiple posts in a single request. + +**Endpoint:** `POST /api/v1/posts/bulk` + +**Request Body:** +```json +{ + "posts": [ + { + "userId": 1, + "title": "First Post", + "body": "Content of first post" + }, + { + "userId": 1, + "title": "Second Post", + "body": "Content of second post" + } + ] +} +``` + +**Limits:** +- Minimum: 1 post +- Maximum: 1000 posts per request + +**Example Request:** +```bash +curl -X POST http://localhost:8080/api/v1/posts/bulk \ + -H "Content-Type: application/json" \ + -d '{ + "posts": [ + {"userId": 1, "title": "Post 1", "body": "Content 1"}, + {"userId": 1, "title": "Post 2", "body": "Content 2"} + ] + }' +``` + +**Example Response (201/207):** +```json +{ + "data": { + "created": 2, + "failed": 0, + "errors": [], + "postIds": [102, 103] + }, + "meta": { + "requestId": "550e8400-e29b-41d4-a716-446655440000", + "timestamp": "2025-01-16T10:45:00Z" + } +} +``` + +**Partial Success (207 Multi-Status):** +```json +{ + "data": { + "created": 1, + "failed": 1, + "errors": [ + "post 2: validation failed" + ], + "postIds": [102] + }, + "meta": { + "requestId": "550e8400-e29b-41d4-a716-446655440000", + "timestamp": "2025-01-16T10:45:00Z" + } +} +``` + +### 7. Export Posts + +Export posts in JSON or CSV format. + +**Endpoint:** `GET /api/v1/posts/export` + +**Query Parameters:** + +| Parameter | Type | Description | Default | +|-----------|------|-------------|---------| +| `format` | string | Export format (json, csv) | json | +| `userId` | integer | Filter by user ID | - | +| `search` | string | Search in title and body | - | + +**Example Request (JSON):** +```bash +curl "http://localhost:8080/api/v1/posts/export?format=json" \ + -o posts_export.json +``` + +**Example Request (CSV):** +```bash +curl "http://localhost:8080/api/v1/posts/export?format=csv" \ + -o posts_export.csv +``` + +**JSON Response:** +```json +[ + { + "id": 1, + "userId": 1, + "title": "Sample Post", + "body": "Content...", + "createdAt": "2025-01-16T10:00:00Z", + "updatedAt": "2025-01-16T10:00:00Z" + } +] +``` + +**CSV Response:** +```csv +ID,UserID,Title,Body,CreatedAt,UpdatedAt +1,1,"Sample Post","Content...","2025-01-16T10:00:00Z","2025-01-16T10:00:00Z" +``` + +### 8. Analyze Posts + +Perform character frequency analysis on all posts. + +**Endpoint:** `GET /api/v1/posts/analytics` + +**Example Request:** +```bash +curl http://localhost:8080/api/v1/posts/analytics +``` + +**Example Response:** +```json +{ + "data": { + "totalPosts": 100, + "totalCharacters": 50000, + "uniqueChars": 95, + "charFrequency": { + "32": 8500, + "97": 4200, + "101": 6500 + }, + "topCharacters": [ + { + "character": " ", + "count": 8500, + "frequency": 17.0 + }, + { + "character": "e", + "count": 6500, + "frequency": 13.0 + } + ], + "statistics": { + "averagePostLength": 500.0, + "medianPostLength": 475, + "postsPerUser": { + "1": 50, + "2": 30 + }, + "timeDistribution": { + "morning": 25, + "afternoon": 40, + "evening": 30, + "night": 5 + } + } + }, + "meta": { + "requestId": "550e8400-e29b-41d4-a716-446655440000", + "timestamp": "2025-01-16T10:50:00Z", + "duration": "250ms" + } +} +``` + +## Error Codes + +| Code | HTTP Status | Description | +|------|-------------|-------------| +| `NOT_FOUND` | 404 | Resource not found | +| `INVALID_INPUT` | 400 | Invalid input data | +| `VALIDATION_FAILED` | 422 | Validation failed | +| `UNAUTHORIZED` | 401 | Authentication required | +| `FORBIDDEN` | 403 | Insufficient permissions | +| `CONFLICT` | 409 | Resource conflict | +| `RATE_LIMIT_EXCEEDED` | 429 | Rate limit exceeded | +| `INTERNAL_ERROR` | 500 | Internal server error | +| `SERVICE_UNAVAILABLE` | 503 | Service unavailable | + +## Pagination + +All list endpoints support pagination with the following parameters: + +- `page`: Page number (starts at 1) +- `pageSize`: Number of items per page (max 100, default 20) + +**Example:** +```bash +curl "http://localhost:8080/api/v1/posts?page=2&pageSize=25" +``` + +## Filtering + +List endpoints support various filters: + +- `userId`: Filter by user ID +- `search`: Full-text search in title and body + +**Example:** +```bash +curl "http://localhost:8080/api/v1/posts?userId=1&search=golang" +``` + +## Sorting + +List endpoints support sorting with: + +- `sortBy`: Field to sort by (id, title, createdAt, updatedAt) +- `sortOrder`: Sort direction (asc, desc) + +**Example:** +```bash +curl "http://localhost:8080/api/v1/posts?sortBy=createdAt&sortOrder=desc" +``` + +## Code Examples + +### JavaScript/Node.js + +```javascript +const axios = require('axios'); + +// Create a post +async function createPost() { + try { + const response = await axios.post('http://localhost:8080/api/v1/posts', { + userId: 1, + title: 'My Post', + body: 'Post content...' + }); + console.log('Created:', response.data); + } catch (error) { + console.error('Error:', error.response.data); + } +} + +// List posts with pagination +async function listPosts(page = 1) { + const response = await axios.get('http://localhost:8080/api/v1/posts', { + params: { page, pageSize: 20 } + }); + return response.data; +} +``` + +### Python + +```python +import requests + +BASE_URL = 'http://localhost:8080/api/v1' + +# Create a post +def create_post(): + response = requests.post(f'{BASE_URL}/posts', json={ + 'userId': 1, + 'title': 'My Post', + 'body': 'Post content...' + }) + return response.json() + +# Get posts with filtering +def get_posts(user_id=None, search=None): + params = {} + if user_id: + params['userId'] = user_id + if search: + params['search'] = search + + response = requests.get(f'{BASE_URL}/posts', params=params) + return response.json() + +# Analyze posts +def analyze_posts(): + response = requests.get(f'{BASE_URL}/posts/analytics') + return response.json() +``` + +### Go + +```go +package main + +import ( + "bytes" + "encoding/json" + "net/http" +) + +const baseURL = "http://localhost:8080/api/v1" + +type Post struct { + ID int `json:"id,omitempty"` + UserID int `json:"userId"` + Title string `json:"title"` + Body string `json:"body"` +} + +func createPost(post Post) (*Post, error) { + data, _ := json.Marshal(post) + resp, err := http.Post(baseURL+"/posts", "application/json", bytes.NewBuffer(data)) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + var result struct { + Data Post `json:"data"` + } + json.NewDecoder(resp.Body).Decode(&result) + return &result.Data, nil +} +``` + +### cURL Examples + +```bash +# Create a post +curl -X POST http://localhost:8080/api/v1/posts \ + -H "Content-Type: application/json" \ + -d '{"userId":1,"title":"Test","body":"Content"}' + +# Get all posts +curl http://localhost:8080/api/v1/posts + +# Get post by ID +curl http://localhost:8080/api/v1/posts/1 + +# Update post +curl -X PUT http://localhost:8080/api/v1/posts/1 \ + -H "Content-Type: application/json" \ + -d '{"title":"Updated Title"}' + +# Delete post +curl -X DELETE http://localhost:8080/api/v1/posts/1 + +# Search posts +curl "http://localhost:8080/api/v1/posts?search=golang&sortBy=createdAt&sortOrder=desc" + +# Export to CSV +curl "http://localhost:8080/api/v1/posts/export?format=csv" -o export.csv + +# Get analytics +curl http://localhost:8080/api/v1/posts/analytics +``` + +## Versioning + +The API uses URL path versioning. Current version is `v1`. + +- **v1 Endpoints:** `/api/v1/*` +- **Default:** `/api/*` routes to v1 + +## Best Practices + +1. **Always include Content-Type header** for POST/PUT requests +2. **Handle pagination** for large datasets +3. **Use bulk endpoints** for multiple operations +4. **Implement retry logic** with exponential backoff +5. **Cache responses** where appropriate +6. **Monitor rate limits** in response headers +7. **Validate input** before sending requests +8. **Handle errors gracefully** with proper error messages + +## Rate Limit Headers + +```http +X-RateLimit-Limit: 100 +X-RateLimit-Remaining: 95 +X-RateLimit-Reset: 1642348800 +``` + +## Support + +For issues and feature requests, please visit: +https://github.com/hoangsonww/Post-Analyzer-Webserver/issues + +--- + +**Last Updated:** January 16, 2025 +**API Version:** 2.0.0 diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..d3eb103 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,56 @@ +# Build stage +FROM golang:1.21-alpine AS builder + +# Install build dependencies +RUN apk add --no-cache git ca-certificates tzdata + +# Set working directory +WORKDIR /build + +# Copy go mod files +COPY go.mod go.sum ./ + +# Download dependencies +RUN go mod download + +# Copy source code +COPY . . + +# Build the application +RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags="-w -s" -o post-analyzer main_new.go + +# Final stage +FROM alpine:latest + +# Install runtime dependencies +RUN apk --no-cache add ca-certificates tzdata + +# Create non-root user +RUN addgroup -g 1000 appuser && \ + adduser -D -u 1000 -G appuser appuser + +# Set working directory +WORKDIR /app + +# Copy binary from builder +COPY --from=builder /build/post-analyzer . + +# Copy templates and assets +COPY home.html ./ +COPY assets ./assets + +# Create data directory +RUN mkdir -p /app/data && chown -R appuser:appuser /app + +# Switch to non-root user +USER appuser + +# Expose port +EXPOSE 8080 + +# Health check +HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \ + CMD wget --no-verbose --tries=1 --spider http://localhost:8080/health || exit 1 + +# Run the application +CMD ["./post-analyzer"] diff --git a/MIGRATION_GUIDE.md b/MIGRATION_GUIDE.md new file mode 100644 index 0000000..f38d8b2 --- /dev/null +++ b/MIGRATION_GUIDE.md @@ -0,0 +1,372 @@ +# Migration Guide: v1.0 to v2.0 (Production Ready) + +This guide will help you migrate from the simple version (v1.0) to the production-ready version (v2.0) of the Post Analyzer Webserver. + +## What's Changed? + +### Major Changes + +1. **Application Architecture** + - Restructured into modular packages + - Introduced storage abstraction layer + - Added comprehensive middleware stack + +2. **Storage Layer** + - File storage now has thread-safe operations + - Added PostgreSQL support + - Automatic schema management + +3. **Configuration** + - Environment-based configuration + - Validation on startup + - Support for multiple environments + +4. **Observability** + - Structured JSON logging + - Prometheus metrics + - Request tracing with IDs + +5. **Security** + - Input validation and sanitization + - Rate limiting + - Security headers + - CORS configuration + +## Migration Steps + +### Step 1: Backup Your Data + +If you're using file storage: + +```bash +# Backup your existing posts.json +cp posts.json posts.json.backup +``` + +### Step 2: Update Dependencies + +```bash +# Download new dependencies +go mod download +go mod tidy +``` + +### Step 3: Configuration + +Create a `.env` file from the example: + +```bash +cp .env.example .env +``` + +Edit `.env` to match your setup. For file storage (similar to v1.0): + +```env +# Keep using file storage +DB_TYPE=file +DB_FILE_PATH=posts.json + +# Other settings +PORT=8080 +ENVIRONMENT=production +LOG_LEVEL=info +``` + +For PostgreSQL (recommended for production): + +```env +# Use PostgreSQL +DB_TYPE=postgres +DB_HOST=localhost +DB_PORT=5432 +DB_USER=postgres +DB_PASSWORD=yourpassword +DB_NAME=postanalyzer + +# Other settings +PORT=8080 +ENVIRONMENT=production +LOG_LEVEL=info +``` + +### Step 4: Run the Application + +#### Option A: Direct Run (File Storage) + +```bash +# Run with default file storage +go run main.go +``` + +#### Option B: With PostgreSQL + +```bash +# Start PostgreSQL with Docker +docker run -d \ + --name postgres \ + -e POSTGRES_DB=postanalyzer \ + -e POSTGRES_PASSWORD=postgres \ + -p 5432:5432 \ + postgres:16-alpine + +# Run the application +export DB_TYPE=postgres +export DB_PASSWORD=postgres +go run main.go +``` + +#### Option C: Full Docker Stack + +```bash +# Start everything with Docker Compose +docker-compose up -d +``` + +This includes: +- Application +- PostgreSQL +- Prometheus +- Grafana + +### Step 5: Migrate Existing Data + +If you have existing data in `posts.json` and want to move to PostgreSQL: + +```bash +# The application will automatically handle this +# Just ensure posts.json exists in the same directory +# On first run with DB_TYPE=postgres, the data will be available +``` + +Or manually import: + +```bash +# Start application with file storage first +DB_TYPE=file go run main.go + +# In another terminal, fetch to ensure data is in posts.json +curl http://localhost:8080/fetch + +# Stop the application (Ctrl+C) + +# Start with PostgreSQL +DB_TYPE=postgres DB_PASSWORD=postgres go run main.go + +# Fetch again to populate PostgreSQL +curl http://localhost:8080/fetch +``` + +## Compatibility + +### API Endpoints + +All original endpoints remain functional: + +| v1.0 Endpoint | v2.0 Status | Notes | +|---------------|-------------|-------| +| `/` | ✅ Compatible | Enhanced with better error handling | +| `/fetch` | ✅ Compatible | Now supports batch operations | +| `/analyze` | ✅ Compatible | Improved performance | +| `/add` | ✅ Compatible | Added input validation | + +### New Endpoints + +| Endpoint | Purpose | +|----------|---------| +| `/health` | Health check for monitoring | +| `/readiness` | Kubernetes-style readiness probe | +| `/metrics` | Prometheus metrics | + +### File Format + +The `posts.json` file format remains compatible: + +```json +[ + { + "userId": 1, + "id": 1, + "title": "Post Title", + "body": "Post body content" + } +] +``` + +v2.0 adds optional fields: +- `createdAt`: Timestamp when post was created +- `updatedAt`: Timestamp when post was last updated + +These are automatically managed and backward compatible. + +## Feature Comparison + +| Feature | v1.0 | v2.0 | +|---------|------|------| +| Post Management | ✅ | ✅ | +| Character Analysis | ✅ | ✅ (faster) | +| External API Fetch | ✅ | ✅ | +| File Storage | ✅ | ✅ (improved) | +| Database Support | ❌ | ✅ | +| Health Checks | ❌ | ✅ | +| Metrics | ❌ | ✅ | +| Structured Logging | ❌ | ✅ | +| Input Validation | ❌ | ✅ | +| Rate Limiting | ❌ | ✅ | +| Security Headers | ❌ | ✅ | +| CORS | ❌ | ✅ | +| Graceful Shutdown | ❌ | ✅ | +| Docker Support | ❌ | ✅ | +| CI/CD Pipeline | ❌ | ✅ | +| Test Suite | ❌ | ✅ | +| API Documentation | ❌ | ✅ | + +## Troubleshooting + +### Issue: Application Won't Start + +**Error**: `invalid configuration: environment must be one of: development, staging, production` + +**Solution**: Set the ENVIRONMENT variable: +```bash +export ENVIRONMENT=development +``` + +### Issue: Database Connection Failed + +**Error**: `failed to ping database` + +**Solution**: Ensure PostgreSQL is running: +```bash +# Check if PostgreSQL is running +docker ps | grep postgres + +# Or check locally +pg_isready -h localhost +``` + +### Issue: Posts Not Showing + +**Symptom**: Empty home page + +**Solution**: Fetch posts first: +```bash +curl http://localhost:8080/fetch +``` + +### Issue: Rate Limited + +**Error**: 429 Too Many Requests + +**Solution**: Increase rate limit: +```bash +export RATE_LIMIT_REQUESTS=1000 +``` + +Or wait for the rate limit window to reset (default: 1 minute). + +## Performance Considerations + +### File Storage vs PostgreSQL + +**File Storage**: +- ✅ Simple setup +- ✅ No external dependencies +- ❌ Not suitable for high concurrency +- ❌ No advanced querying + +**PostgreSQL**: +- ✅ High concurrency support +- ✅ ACID transactions +- ✅ Advanced querying +- ❌ Requires external service + +**Recommendation**: Use file storage for development, PostgreSQL for production. + +## Security Updates + +v2.0 includes important security improvements: + +1. **Input Sanitization**: All user input is sanitized +2. **Rate Limiting**: Prevents abuse +3. **Security Headers**: CSP, X-Frame-Options, etc. +4. **Request Timeouts**: Prevents resource exhaustion + +**Action Required**: Review your CORS configuration in `.env`: +```env +# Development - allow all +ALLOWED_ORIGINS=* + +# Production - specify allowed origins +ALLOWED_ORIGINS=https://yourdomain.com,https://www.yourdomain.com +``` + +## Monitoring Setup + +### Basic Monitoring (Logs) + +```bash +# View logs in production +tail -f /path/to/app/logs + +# Or with Docker +docker logs -f post-analyzer-app +``` + +### Advanced Monitoring (Prometheus + Grafana) + +```bash +# Start monitoring stack +docker-compose up -d prometheus grafana + +# Access Grafana +open http://localhost:3000 +# Login: admin/admin +``` + +Add Prometheus data source: +1. Go to Configuration → Data Sources +2. Add Prometheus +3. URL: `http://prometheus:9090` +4. Save & Test + +Import dashboard from `grafana-dashboard.json` (if provided). + +## Rollback Plan + +If you need to rollback to v1.0: + +```bash +# Stop v2.0 +pkill post-analyzer + +# Restore old version +mv main.go main_v2.go +mv main_old.go main.go + +# Run v1.0 +go run main.go +``` + +Your data in `posts.json` remains compatible. + +## Getting Help + +- 📖 [Full Documentation](README_PRODUCTION.md) +- 🐛 [Report Issues](https://github.com/hoangsonww/Post-Analyzer-Webserver/issues) +- 💬 [Discussions](https://github.com/hoangsonww/Post-Analyzer-Webserver/discussions) + +## Next Steps + +After successful migration: + +1. ✅ Review configuration in `.env` +2. ✅ Set up monitoring (Prometheus/Grafana) +3. ✅ Configure backups (for PostgreSQL) +4. ✅ Set up CI/CD pipeline +5. ✅ Review security settings +6. ✅ Load test your application +7. ✅ Set up log aggregation + +--- + +**Need assistance?** Open an issue on GitHub! diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..8dd0735 --- /dev/null +++ b/Makefile @@ -0,0 +1,164 @@ +.PHONY: help build run test clean docker-build docker-up docker-down lint format install-tools dev migrate + +# Variables +APP_NAME := post-analyzer +MAIN_FILE := main_new.go +BINARY := $(APP_NAME) +DOCKER_IMAGE := $(APP_NAME):latest +GO := go +GOFLAGS := -v +LDFLAGS := -w -s + +# Colors for output +BLUE := \033[0;34m +GREEN := \033[0;32m +YELLOW := \033[0;33m +NC := \033[0m # No Color + +## help: Display this help message +help: + @echo "$(BLUE)Post Analyzer Webserver - Makefile Commands$(NC)" + @echo "" + @grep -E '^## ' Makefile | sed 's/## / /' | column -t -s ':' + +## install: Install dependencies +install: + @echo "$(GREEN)Installing dependencies...$(NC)" + $(GO) mod download + $(GO) mod verify + +## install-tools: Install development tools +install-tools: + @echo "$(GREEN)Installing development tools...$(NC)" + $(GO) install github.com/golangci/golangci-lint/cmd/golangci-lint@latest + $(GO) install github.com/swaggo/swag/cmd/swag@latest + +## build: Build the application +build: + @echo "$(GREEN)Building $(APP_NAME)...$(NC)" + $(GO) build $(GOFLAGS) -ldflags="$(LDFLAGS)" -o $(BINARY) $(MAIN_FILE) + @echo "$(GREEN)Build complete: $(BINARY)$(NC)" + +## run: Run the application +run: build + @echo "$(GREEN)Running $(APP_NAME)...$(NC)" + ./$(BINARY) + +## dev: Run the application in development mode with file watching +dev: + @echo "$(GREEN)Running in development mode...$(NC)" + @if command -v air > /dev/null; then \ + air; \ + else \ + echo "$(YELLOW)Air not installed. Running normally...$(NC)"; \ + $(GO) run $(MAIN_FILE); \ + fi + +## test: Run all tests +test: + @echo "$(GREEN)Running tests...$(NC)" + $(GO) test -v -race -coverprofile=coverage.txt -covermode=atomic ./... + +## test-coverage: Run tests with coverage report +test-coverage: test + @echo "$(GREEN)Generating coverage report...$(NC)" + $(GO) tool cover -html=coverage.txt -o coverage.html + @echo "$(GREEN)Coverage report generated: coverage.html$(NC)" + +## lint: Run linter +lint: + @echo "$(GREEN)Running linter...$(NC)" + golangci-lint run --timeout=5m + +## format: Format Go code +format: + @echo "$(GREEN)Formatting code...$(NC)" + $(GO) fmt ./... + gofmt -s -w . + +## clean: Clean build artifacts +clean: + @echo "$(YELLOW)Cleaning build artifacts...$(NC)" + rm -f $(BINARY) + rm -f coverage.txt coverage.html + rm -rf dist/ + $(GO) clean + +## docker-build: Build Docker image +docker-build: + @echo "$(GREEN)Building Docker image...$(NC)" + docker build -t $(DOCKER_IMAGE) . + +## docker-up: Start all services with Docker Compose +docker-up: + @echo "$(GREEN)Starting services...$(NC)" + docker-compose up -d + @echo "$(GREEN)Services started. Application available at http://localhost:8080$(NC)" + @echo "$(GREEN)Prometheus available at http://localhost:9090$(NC)" + @echo "$(GREEN)Grafana available at http://localhost:3000 (admin/admin)$(NC)" + +## docker-down: Stop all services +docker-down: + @echo "$(YELLOW)Stopping services...$(NC)" + docker-compose down + +## docker-logs: View logs from all services +docker-logs: + docker-compose logs -f + +## docker-restart: Restart all services +docker-restart: docker-down docker-up + +## migrate: Run database migrations +migrate: + @echo "$(GREEN)Running database migrations...$(NC)" + @echo "$(YELLOW)Migrations are automatically handled by the application$(NC)" + +## db-shell: Connect to PostgreSQL database +db-shell: + @echo "$(GREEN)Connecting to database...$(NC)" + docker-compose exec postgres psql -U postgres -d postanalyzer + +## benchmark: Run benchmarks +benchmark: + @echo "$(GREEN)Running benchmarks...$(NC)" + $(GO) test -bench=. -benchmem ./... + +## security: Run security checks +security: + @echo "$(GREEN)Running security checks...$(NC)" + @if command -v gosec > /dev/null; then \ + gosec ./...; \ + else \ + echo "$(YELLOW)gosec not installed. Install with: go install github.com/securego/gosec/v2/cmd/gosec@latest$(NC)"; \ + fi + +## deps-update: Update dependencies +deps-update: + @echo "$(GREEN)Updating dependencies...$(NC)" + $(GO) get -u ./... + $(GO) mod tidy + +## check: Run all checks (lint, test, security) +check: lint test security + @echo "$(GREEN)All checks passed!$(NC)" + +## prod-build: Build for production +prod-build: + @echo "$(GREEN)Building for production...$(NC)" + CGO_ENABLED=0 GOOS=linux GOARCH=amd64 $(GO) build -ldflags="$(LDFLAGS)" -o $(BINARY) $(MAIN_FILE) + @echo "$(GREEN)Production build complete$(NC)" + +## init: Initialize development environment +init: install install-tools + @echo "$(GREEN)Creating .env file from example...$(NC)" + @if [ ! -f .env ]; then cp .env.example .env 2>/dev/null || echo "$(YELLOW)No .env.example found$(NC)"; fi + @echo "$(GREEN)Development environment ready!$(NC)" + +## version: Display version information +version: + @echo "$(BLUE)Post Analyzer Webserver$(NC)" + @echo "Go version: $$($(GO) version)" + @echo "Git commit: $$(git rev-parse --short HEAD 2>/dev/null || echo 'N/A')" + +.DEFAULT_GOAL := help diff --git a/README_PRODUCTION.md b/README_PRODUCTION.md new file mode 100644 index 0000000..2098d99 --- /dev/null +++ b/README_PRODUCTION.md @@ -0,0 +1,477 @@ +# Post Analyzer Webserver - Production Ready + +

+ Post Analyzer +

+ +

+ License + Go version + Status + Version + Year +

+ +## Overview + +A **production-ready** web application built with Go for analyzing and managing posts. This application demonstrates enterprise-grade software development practices including microservices patterns, observability, security, and DevOps best practices. + +## Features + +### Core Features +- 📝 **Post Management**: Create, read, update, and delete posts +- 🔍 **Character Analysis**: Concurrent character frequency analysis with visualization +- 🌐 **External API Integration**: Fetch posts from JSONPlaceholder API +- 💾 **Flexible Storage**: Support for both file-based and PostgreSQL storage + +### Production Features +- 🔒 **Security**: Input validation, XSS protection, security headers, rate limiting +- 📊 **Observability**: Structured logging, Prometheus metrics, health checks +- 🚀 **Performance**: Graceful shutdown, request timeouts, connection pooling +- 🔄 **DevOps**: Docker support, CI/CD pipeline, automated testing +- 🛡️ **Reliability**: Panic recovery, error handling, request tracing +- ⚙️ **Configuration**: Environment-based config with validation + +## Architecture + +``` +┌─────────────────────────────────────────────────────────┐ +│ Load Balancer │ +└─────────────────────────────────────────────────────────┘ + │ +┌─────────────────────────────────────────────────────────┐ +│ Middleware Stack │ +│ ┌──────────┬──────────┬──────────┬──────────────────┐ │ +│ │ Request │ Logging │ Recovery │ Security Headers │ │ +│ │ ID │ │ │ │ │ +│ └──────────┴──────────┴──────────┴──────────────────┘ │ +│ ┌──────────┬──────────┬──────────┬──────────────────┐ │ +│ │ CORS │ Rate │ Body │ Metrics │ │ +│ │ │ Limiting │ Limit │ │ │ +│ └──────────┴──────────┴──────────┴──────────────────┘ │ +└─────────────────────────────────────────────────────────┘ + │ +┌─────────────────────────────────────────────────────────┐ +│ HTTP Handlers │ +│ ┌──────────┬──────────┬──────────┬──────────────────┐ │ +│ │ Health │ Posts │ Analysis │ Metrics │ │ +│ └──────────┴──────────┴──────────┴──────────────────┘ │ +└─────────────────────────────────────────────────────────┘ + │ +┌─────────────────────────────────────────────────────────┐ +│ Storage Layer │ +│ ┌──────────────────────┬──────────────────────────────┐│ +│ │ File Storage │ PostgreSQL Storage ││ +│ └──────────────────────┴──────────────────────────────┘│ +└─────────────────────────────────────────────────────────┘ +``` + +## Technology Stack + +- **Backend**: Go 1.21+ +- **Database**: PostgreSQL 16 (with file storage fallback) +- **Monitoring**: Prometheus + Grafana +- **Containerization**: Docker + Docker Compose +- **CI/CD**: GitHub Actions +- **Template Engine**: Go html/template +- **Metrics**: Prometheus client +- **Testing**: Go testing + table-driven tests + +## Quick Start + +### Prerequisites + +- Go 1.21 or higher +- Docker and Docker Compose (optional) +- PostgreSQL 16 (if not using Docker) +- Make (optional, for convenience commands) + +### Using Docker (Recommended) + +```bash +# Clone the repository +git clone https://github.com/hoangsonww/Post-Analyzer-Webserver.git +cd Post-Analyzer-Webserver + +# Start all services +make docker-up + +# Or without Make +docker-compose up -d +``` + +The application will be available at: +- **Application**: http://localhost:8080 +- **Prometheus**: http://localhost:9090 +- **Grafana**: http://localhost:3000 (admin/admin) + +### Local Development + +```bash +# Install dependencies +make install + +# Run with file storage +go run main_new.go + +# Run with PostgreSQL +export DB_TYPE=postgres +export DB_HOST=localhost +export DB_PASSWORD=yourpassword +go run main_new.go + +# Or use Make +make run +``` + +## Configuration + +The application is configured via environment variables. See `.env.example` for all available options: + +```bash +# Copy example configuration +cp .env.example .env + +# Edit configuration +nano .env +``` + +### Key Configuration Options + +| Variable | Default | Description | +|----------|---------|-------------| +| `PORT` | 8080 | Server port | +| `ENVIRONMENT` | development | Environment (development/staging/production) | +| `DB_TYPE` | file | Storage type (file/postgres) | +| `LOG_LEVEL` | info | Logging level (debug/info/warn/error) | +| `RATE_LIMIT_REQUESTS` | 100 | Max requests per window | +| `ALLOWED_ORIGINS` | * | CORS allowed origins | + +## API Endpoints + +### Health & Monitoring + +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/health` | GET | Health check endpoint | +| `/readiness` | GET | Readiness probe | +| `/metrics` | GET | Prometheus metrics | + +### Application + +| Endpoint | Method | Description | +|----------|--------|-------------| +| `/` | GET | Home page with all posts | +| `/fetch` | GET | Fetch posts from external API | +| `/add` | GET/POST | Add new post form/submit | +| `/analyze` | GET | Character frequency analysis | + +See [api-docs.yaml](api-docs.yaml) for complete OpenAPI documentation. + +## Development + +### Running Tests + +```bash +# Run all tests +make test + +# Run with coverage +make test-coverage + +# Run benchmarks +make benchmark +``` + +### Code Quality + +```bash +# Format code +make format + +# Run linter +make lint + +# Run security checks +make security + +# Run all checks +make check +``` + +### Database Management + +```bash +# Connect to database shell +make db-shell + +# Run migrations (automatic on startup) +make migrate +``` + +## Deployment + +### Docker Deployment + +```bash +# Build Docker image +make docker-build + +# Deploy with Docker Compose +make docker-up +``` + +### Production Deployment + +1. **Build for production**: + ```bash + make prod-build + ``` + +2. **Set environment variables**: + ```bash + export ENVIRONMENT=production + export DB_TYPE=postgres + export DB_HOST=your-db-host + export DB_PASSWORD=your-db-password + ``` + +3. **Run the application**: + ```bash + ./post-analyzer + ``` + +### Cloud Platforms + +#### Render.com +1. Create a new Web Service +2. Connect your GitHub repository +3. Set build command: `go build -o app main_new.go` +4. Set start command: `./app` +5. Add environment variables + +#### Heroku +```bash +heroku create your-app-name +heroku addons:create heroku-postgresql:hobby-dev +git push heroku main +``` + +#### AWS ECS/Fargate +```bash +# Build and push Docker image +docker build -t post-analyzer . +docker tag post-analyzer:latest YOUR_ECR_REPO/post-analyzer:latest +docker push YOUR_ECR_REPO/post-analyzer:latest + +# Deploy using ECS/Fargate +aws ecs update-service --cluster your-cluster --service post-analyzer --force-new-deployment +``` + +## Monitoring & Observability + +### Metrics + +The application exposes Prometheus metrics at `/metrics`: + +- **HTTP Metrics**: Request count, duration, size +- **Application Metrics**: Posts count, operations +- **Database Metrics**: Query duration, connection pool +- **Analysis Metrics**: Analysis operations and duration + +### Logging + +Structured JSON logging with contextual information: + +```json +{ + "time": "2025-01-16T10:30:00Z", + "level": "INFO", + "msg": "request completed", + "request_id": "550e8400-e29b-41d4-a716-446655440000", + "method": "GET", + "path": "/", + "status": 200, + "duration_ms": 45 +} +``` + +### Grafana Dashboards + +Access Grafana at http://localhost:3000 (when using Docker Compose): +- Username: `admin` +- Password: `admin` + +Import the included dashboard for visualization of: +- Request rate and latency +- Error rates +- Database performance +- Resource utilization + +## Security + +### Implemented Security Measures + +- ✅ Input validation and sanitization +- ✅ XSS protection +- ✅ Security headers (CSP, X-Frame-Options, etc.) +- ✅ Rate limiting +- ✅ CORS configuration +- ✅ Panic recovery +- ✅ Request timeouts +- ✅ Body size limits +- ✅ SQL injection prevention (prepared statements) + +### Security Best Practices + +1. **Never commit secrets**: Use environment variables +2. **Update dependencies regularly**: `make deps-update` +3. **Run security scans**: `make security` +4. **Use HTTPS in production**: Configure reverse proxy +5. **Review logs regularly**: Monitor for suspicious activity + +## Performance + +### Optimizations + +- **Concurrent processing**: Character analysis uses goroutines +- **Connection pooling**: Database connection reuse +- **Graceful shutdown**: No request interruption +- **Request timeouts**: Prevent resource exhaustion +- **Efficient JSON parsing**: Streaming decoder + +### Benchmarks + +Run benchmarks to measure performance: + +```bash +make benchmark +``` + +## CI/CD Pipeline + +GitHub Actions workflow includes: + +1. **Lint**: Code quality checks +2. **Test**: Unit and integration tests with coverage +3. **Build**: Binary compilation +4. **Security**: Vulnerability scanning +5. **Docker**: Image building and pushing +6. **Deploy**: Automated deployment (configurable) + +## Project Structure + +``` +Post-Analyzer-Webserver/ +├── config/ # Configuration management +│ └── config.go +├── internal/ # Internal packages +│ ├── handlers/ # HTTP handlers +│ ├── logger/ # Structured logging +│ ├── metrics/ # Prometheus metrics +│ ├── middleware/ # HTTP middleware +│ └── storage/ # Storage layer (file, postgres) +├── .github/ +│ └── workflows/ # CI/CD pipelines +├── assets/ # Static assets +├── main_new.go # Application entry point (production) +├── main.go # Original simple version +├── home.html # HTML template +├── Dockerfile # Docker image definition +├── docker-compose.yml # Multi-container setup +├── Makefile # Development commands +├── api-docs.yaml # OpenAPI specification +└── README.md # This file +``` + +## Troubleshooting + +### Common Issues + +**Application won't start** +```bash +# Check logs +docker-compose logs app + +# Verify configuration +go run main_new.go # Will show config validation errors +``` + +**Database connection failed** +```bash +# Check PostgreSQL is running +docker-compose ps + +# Test connection +make db-shell +``` + +**Rate limit errors** +```bash +# Increase rate limit +export RATE_LIMIT_REQUESTS=1000 +``` + +## Contributing + +We welcome contributions! Please follow these steps: + +1. Fork the repository +2. Create a feature branch (`git checkout -b feature/amazing-feature`) +3. Make your changes +4. Run tests (`make check`) +5. Commit your changes (`git commit -m 'Add amazing feature'`) +6. Push to the branch (`git push origin feature/amazing-feature`) +7. Open a Pull Request + +### Development Guidelines + +- Follow Go best practices and idioms +- Write tests for new features +- Update documentation +- Ensure all checks pass (`make check`) +- Use conventional commits + +## Roadmap + +- [ ] REST API for programmatic access +- [ ] GraphQL API support +- [ ] User authentication and authorization +- [ ] Multi-user support +- [ ] Advanced analytics dashboard +- [ ] Export functionality (CSV, PDF) +- [ ] Real-time updates with WebSockets +- [ ] Mobile app (React Native) +- [ ] Kubernetes deployment manifests +- [ ] Terraform infrastructure as code + +## License + +Distributed under the MIT License. See `LICENSE` for more information. + +## Acknowledgements + +- [Go](https://golang.org/) +- [JSONPlaceholder](https://jsonplaceholder.typicode.com/) +- [Prometheus](https://prometheus.io/) +- [PostgreSQL](https://www.postgresql.org/) +- [Docker](https://www.docker.com/) + +## Support + +For support, please: +- 📧 Open an issue on GitHub +- 💬 Start a discussion +- 📖 Check the documentation + +## Contact + +Son Nguyen - [@hoangsonww](https://github.com/hoangsonww) + +Project Link: [https://github.com/hoangsonww/Post-Analyzer-Webserver](https://github.com/hoangsonww/Post-Analyzer-Webserver) + +Live Demo: [https://post-analyzer-webserver.onrender.com](https://post-analyzer-webserver.onrender.com) + +--- + +Created with ❤️ by [Son Nguyen](https://github.com/hoangsonww) in 2024-2025. diff --git a/api-docs.yaml b/api-docs.yaml new file mode 100644 index 0000000..55a7cf3 --- /dev/null +++ b/api-docs.yaml @@ -0,0 +1,336 @@ +openapi: 3.0.3 +info: + title: Post Analyzer Webserver API + description: | + A production-ready web application for analyzing and managing posts. + + Features: + - Fetch posts from external APIs + - Store and manage posts + - Character frequency analysis + - Prometheus metrics + - Health checks + version: 2.0.0 + contact: + name: Post Analyzer Team + url: https://github.com/hoangsonww/Post-Analyzer-Webserver + license: + name: MIT + url: https://opensource.org/licenses/MIT + +servers: + - url: http://localhost:8080 + description: Development server + - url: https://post-analyzer-webserver.onrender.com + description: Production server + +tags: + - name: Health + description: Health and readiness checks + - name: Posts + description: Post management operations + - name: Analysis + description: Character analysis operations + - name: Metrics + description: Prometheus metrics + +paths: + /health: + get: + tags: + - Health + summary: Health check + description: Returns the health status of the application + operationId: healthCheck + responses: + '200': + description: Application is healthy + content: + application/json: + schema: + type: object + properties: + status: + type: string + example: healthy + timestamp: + type: string + format: date-time + + /readiness: + get: + tags: + - Health + summary: Readiness check + description: Returns whether the application is ready to serve traffic + operationId: readinessCheck + responses: + '200': + description: Application is ready + content: + application/json: + schema: + type: object + properties: + status: + type: string + example: ready + timestamp: + type: string + format: date-time + '503': + description: Application is not ready + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + + /metrics: + get: + tags: + - Metrics + summary: Prometheus metrics + description: Returns Prometheus metrics for monitoring + operationId: getMetrics + responses: + '200': + description: Metrics in Prometheus format + content: + text/plain: + schema: + type: string + + /: + get: + tags: + - Posts + summary: Home page + description: Displays the home page with all posts + operationId: getHome + responses: + '200': + description: HTML page with posts + content: + text/html: + schema: + type: string + + /fetch: + get: + tags: + - Posts + summary: Fetch posts from external API + description: Fetches posts from JSONPlaceholder API and stores them + operationId: fetchPosts + responses: + '200': + description: Posts fetched successfully + content: + text/html: + schema: + type: string + '500': + description: Failed to fetch or store posts + content: + text/html: + schema: + type: string + + /add: + get: + tags: + - Posts + summary: Display add post form + description: Shows the form to add a new post + operationId: showAddPostForm + responses: + '200': + description: Add post form + content: + text/html: + schema: + type: string + + post: + tags: + - Posts + summary: Add a new post + description: Creates a new post with the provided title and body + operationId: addPost + requestBody: + required: true + content: + application/x-www-form-urlencoded: + schema: + type: object + required: + - title + - body + properties: + title: + type: string + maxLength: 500 + description: Post title + example: My First Post + body: + type: string + maxLength: 10000 + description: Post body + example: This is the content of my first post + responses: + '200': + description: Post added successfully + content: + text/html: + schema: + type: string + '400': + description: Invalid input + content: + text/html: + schema: + type: string + '500': + description: Failed to create post + content: + text/html: + schema: + type: string + + /analyze: + get: + tags: + - Analysis + summary: Analyze character frequency + description: Performs character frequency analysis on all posts + operationId: analyzePosts + responses: + '200': + description: Analysis results + content: + text/html: + schema: + type: string + '500': + description: Analysis failed + content: + text/html: + schema: + type: string + +components: + schemas: + Post: + type: object + required: + - userId + - id + - title + - body + properties: + userId: + type: integer + description: ID of the user who created the post + example: 1 + id: + type: integer + description: Unique post identifier + example: 1 + title: + type: string + maxLength: 500 + description: Post title + example: Sample Post Title + body: + type: string + maxLength: 10000 + description: Post content + example: This is the body of the post with some interesting content. + createdAt: + type: string + format: date-time + description: Timestamp when post was created + updatedAt: + type: string + format: date-time + description: Timestamp when post was last updated + + Error: + type: object + properties: + error: + type: string + description: Error message + example: Internal server error + timestamp: + type: string + format: date-time + description: When the error occurred + + HealthStatus: + type: object + properties: + status: + type: string + enum: [healthy, unhealthy, ready, not ready] + description: Status of the service + timestamp: + type: string + format: date-time + description: Timestamp of the status check + + securitySchemes: + ApiKey: + type: apiKey + in: header + name: X-API-Key + description: API key for authentication (not currently implemented) + + parameters: + RequestID: + name: X-Request-ID + in: header + description: Unique request identifier for tracing + schema: + type: string + format: uuid + + responses: + NotFound: + description: Resource not found + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + + BadRequest: + description: Invalid request parameters + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + + InternalError: + description: Internal server error + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + + TooManyRequests: + description: Rate limit exceeded + content: + application/json: + schema: + type: object + properties: + error: + type: string + example: Rate limit exceeded. Please try again later. + +x-readme: + samples-languages: + - curl + - javascript + - python + - go diff --git a/assets/post-analyzer.go b/assets/post-analyzer.go index 02c7079..9e92582 100644 --- a/assets/post-analyzer.go +++ b/assets/post-analyzer.go @@ -45,7 +45,7 @@ func main() { http.HandleFunc("/add", AddPostHandler) fmt.Println("Server starting at http://localhost:8080/") - http.ListenAndServe(":8080", nil) + _ = http.ListenAndServe(":8080", nil) } // HomeHandler serves the home page diff --git a/config/config.go b/config/config.go new file mode 100644 index 0000000..218c171 --- /dev/null +++ b/config/config.go @@ -0,0 +1,227 @@ +package config + +import ( + "fmt" + "os" + "strconv" + "time" +) + +// Config holds all configuration for the application +type Config struct { + Server ServerConfig + Database DatabaseConfig + Security SecurityConfig + Logging LoggingConfig + External ExternalConfig +} + +// ServerConfig contains server-related configuration +type ServerConfig struct { + Port string + Host string + ReadTimeout time.Duration + WriteTimeout time.Duration + IdleTimeout time.Duration + ShutdownTimeout time.Duration + Environment string +} + +// DatabaseConfig contains database-related configuration +type DatabaseConfig struct { + Type string // "file" or "postgres" + FilePath string + Host string + Port string + User string + Password string + DBName string + SSLMode string + MaxConns int + MinConns int +} + +// SecurityConfig contains security-related configuration +type SecurityConfig struct { + RateLimitRequests int + RateLimitWindow time.Duration + MaxBodySize int64 + AllowedOrigins []string + TrustedProxies []string +} + +// LoggingConfig contains logging-related configuration +type LoggingConfig struct { + Level string + Format string // "json" or "text" + Output string // "stdout" or file path + TimeFormat string +} + +// ExternalConfig contains external service configuration +type ExternalConfig struct { + JSONPlaceholderURL string + HTTPTimeout time.Duration +} + +// Load reads configuration from environment variables with sensible defaults +func Load() (*Config, error) { + cfg := &Config{ + Server: ServerConfig{ + Port: getEnv("PORT", "8080"), + Host: getEnv("HOST", "0.0.0.0"), + ReadTimeout: getDurationEnv("READ_TIMEOUT", 15*time.Second), + WriteTimeout: getDurationEnv("WRITE_TIMEOUT", 15*time.Second), + IdleTimeout: getDurationEnv("IDLE_TIMEOUT", 60*time.Second), + ShutdownTimeout: getDurationEnv("SHUTDOWN_TIMEOUT", 30*time.Second), + Environment: getEnv("ENVIRONMENT", "development"), + }, + Database: DatabaseConfig{ + Type: getEnv("DB_TYPE", "file"), + FilePath: getEnv("DB_FILE_PATH", "posts.json"), + Host: getEnv("DB_HOST", "localhost"), + Port: getEnv("DB_PORT", "5432"), + User: getEnv("DB_USER", "postgres"), + Password: getEnv("DB_PASSWORD", ""), + DBName: getEnv("DB_NAME", "postanalyzer"), + SSLMode: getEnv("DB_SSL_MODE", "disable"), + MaxConns: getIntEnv("DB_MAX_CONNS", 25), + MinConns: getIntEnv("DB_MIN_CONNS", 5), + }, + Security: SecurityConfig{ + RateLimitRequests: getIntEnv("RATE_LIMIT_REQUESTS", 100), + RateLimitWindow: getDurationEnv("RATE_LIMIT_WINDOW", 1*time.Minute), + MaxBodySize: getInt64Env("MAX_BODY_SIZE", 1*1024*1024), // 1MB + AllowedOrigins: getSliceEnv("ALLOWED_ORIGINS", []string{"*"}), + TrustedProxies: getSliceEnv("TRUSTED_PROXIES", []string{}), + }, + Logging: LoggingConfig{ + Level: getEnv("LOG_LEVEL", "info"), + Format: getEnv("LOG_FORMAT", "json"), + Output: getEnv("LOG_OUTPUT", "stdout"), + TimeFormat: getEnv("LOG_TIME_FORMAT", time.RFC3339), + }, + External: ExternalConfig{ + JSONPlaceholderURL: getEnv("JSONPLACEHOLDER_URL", "https://jsonplaceholder.typicode.com/posts"), + HTTPTimeout: getDurationEnv("HTTP_TIMEOUT", 30*time.Second), + }, + } + + if err := cfg.Validate(); err != nil { + return nil, fmt.Errorf("invalid configuration: %w", err) + } + + return cfg, nil +} + +// Validate checks if the configuration is valid +func (c *Config) Validate() error { + // Validate server config + if c.Server.Port == "" { + return fmt.Errorf("server port cannot be empty") + } + if c.Server.Environment != "development" && c.Server.Environment != "staging" && c.Server.Environment != "production" { + return fmt.Errorf("environment must be one of: development, staging, production") + } + + // Validate database config + if c.Database.Type != "file" && c.Database.Type != "postgres" { + return fmt.Errorf("database type must be 'file' or 'postgres'") + } + if c.Database.Type == "file" && c.Database.FilePath == "" { + return fmt.Errorf("database file path cannot be empty when using file storage") + } + if c.Database.Type == "postgres" { + if c.Database.Host == "" || c.Database.DBName == "" { + return fmt.Errorf("database host and name are required for postgres") + } + } + + // Validate security config + if c.Security.RateLimitRequests <= 0 { + return fmt.Errorf("rate limit requests must be positive") + } + if c.Security.MaxBodySize <= 0 { + return fmt.Errorf("max body size must be positive") + } + + // Validate logging config + validLogLevels := map[string]bool{"debug": true, "info": true, "warn": true, "error": true} + if !validLogLevels[c.Logging.Level] { + return fmt.Errorf("log level must be one of: debug, info, warn, error") + } + if c.Logging.Format != "json" && c.Logging.Format != "text" { + return fmt.Errorf("log format must be 'json' or 'text'") + } + + return nil +} + +// IsDevelopment returns true if running in development mode +func (c *Config) IsDevelopment() bool { + return c.Server.Environment == "development" +} + +// IsProduction returns true if running in production mode +func (c *Config) IsProduction() bool { + return c.Server.Environment == "production" +} + +// Helper functions for reading environment variables + +func getEnv(key, defaultValue string) string { + if value := os.Getenv(key); value != "" { + return value + } + return defaultValue +} + +func getIntEnv(key string, defaultValue int) int { + if value := os.Getenv(key); value != "" { + if intVal, err := strconv.Atoi(value); err == nil { + return intVal + } + } + return defaultValue +} + +func getInt64Env(key string, defaultValue int64) int64 { + if value := os.Getenv(key); value != "" { + if intVal, err := strconv.ParseInt(value, 10, 64); err == nil { + return intVal + } + } + return defaultValue +} + +func getDurationEnv(key string, defaultValue time.Duration) time.Duration { + if value := os.Getenv(key); value != "" { + if duration, err := time.ParseDuration(value); err == nil { + return duration + } + } + return defaultValue +} + +func getSliceEnv(key string, defaultValue []string) []string { + if value := os.Getenv(key); value != "" { + // Simple comma-separated parsing + result := []string{} + current := "" + for _, char := range value { + if char == ',' { + if current != "" { + result = append(result, current) + current = "" + } + } else { + current += string(char) + } + } + if current != "" { + result = append(result, current) + } + return result + } + return defaultValue +} diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..6db7124 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,123 @@ +version: '3.8' + +services: + # PostgreSQL database + postgres: + image: postgres:16-alpine + container_name: post-analyzer-db + environment: + POSTGRES_DB: postanalyzer + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + ports: + - "5432:5432" + volumes: + - postgres_data:/var/lib/postgresql/data + healthcheck: + test: ["CMD-SHELL", "pg_isready -U postgres"] + interval: 10s + timeout: 5s + retries: 5 + networks: + - app-network + + # Post Analyzer application + app: + build: + context: . + dockerfile: Dockerfile + container_name: post-analyzer-app + environment: + # Server configuration + PORT: 8080 + HOST: 0.0.0.0 + ENVIRONMENT: production + READ_TIMEOUT: 15s + WRITE_TIMEOUT: 15s + SHUTDOWN_TIMEOUT: 30s + + # Database configuration + DB_TYPE: postgres + DB_HOST: postgres + DB_PORT: 5432 + DB_USER: postgres + DB_PASSWORD: postgres + DB_NAME: postanalyzer + DB_SSL_MODE: disable + DB_MAX_CONNS: 25 + DB_MIN_CONNS: 5 + + # Security configuration + RATE_LIMIT_REQUESTS: 100 + RATE_LIMIT_WINDOW: 1m + MAX_BODY_SIZE: 1048576 + ALLOWED_ORIGINS: "*" + + # Logging configuration + LOG_LEVEL: info + LOG_FORMAT: json + LOG_OUTPUT: stdout + + # External API configuration + JSONPLACEHOLDER_URL: https://jsonplaceholder.typicode.com/posts + HTTP_TIMEOUT: 30s + ports: + - "8080:8080" + depends_on: + postgres: + condition: service_healthy + restart: unless-stopped + networks: + - app-network + healthcheck: + test: ["CMD", "wget", "--quiet", "--tries=1", "--spider", "http://localhost:8080/health"] + interval: 30s + timeout: 5s + retries: 3 + start_period: 10s + + # Prometheus for metrics + prometheus: + image: prom/prometheus:latest + container_name: post-analyzer-prometheus + command: + - '--config.file=/etc/prometheus/prometheus.yml' + - '--storage.tsdb.path=/prometheus' + - '--web.console.libraries=/usr/share/prometheus/console_libraries' + - '--web.console.templates=/usr/share/prometheus/consoles' + ports: + - "9090:9090" + volumes: + - ./prometheus.yml:/etc/prometheus/prometheus.yml + - prometheus_data:/prometheus + depends_on: + - app + restart: unless-stopped + networks: + - app-network + + # Grafana for visualization + grafana: + image: grafana/grafana:latest + container_name: post-analyzer-grafana + environment: + - GF_SECURITY_ADMIN_PASSWORD=admin + - GF_USERS_ALLOW_SIGN_UP=false + ports: + - "3000:3000" + volumes: + - grafana_data:/var/lib/grafana + depends_on: + - prometheus + restart: unless-stopped + networks: + - app-network + +volumes: + postgres_data: + prometheus_data: + grafana_data: + +networks: + app-network: + driver: bridge diff --git a/go.mod b/go.mod index be801a6..4f328e8 100644 --- a/go.mod +++ b/go.mod @@ -1,3 +1,19 @@ module Post_Analyzer_Webserver go 1.19 + +require ( + github.com/google/uuid v1.6.0 + github.com/lib/pq v1.10.9 + github.com/prometheus/client_golang v1.19.0 +) + +require ( + github.com/beorn7/perks v1.0.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.48.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect + golang.org/x/sys v0.16.0 // indirect + google.golang.org/protobuf v1.32.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..f7751bd --- /dev/null +++ b/go.sum @@ -0,0 +1,22 @@ +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= +github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= +github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= +github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= +golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= diff --git a/home.html b/home.html index 511c5df..2b00392 100644 --- a/home.html +++ b/home.html @@ -32,6 +32,9 @@ border-radius: 5px; box-shadow: 0 2px 4px rgba(0,0,0,0.1); transition: background-color 0.3s, box-shadow 0.3s; + cursor: pointer; + border: none; + font-size: 14px; } .button:hover { background-color: #0056b3; @@ -91,6 +94,44 @@ max-height: 400px; margin: 20px auto; } + #analysisSection { + display: none; + } + .stats-grid { + display: grid; + grid-template-columns: repeat(auto-fit, minmax(200px, 1fr)); + gap: 15px; + margin: 20px 0; + } + .stat-card { + background: #f8f9fa; + padding: 15px; + border-radius: 8px; + border-left: 4px solid #007bff; + } + .stat-label { + font-size: 12px; + color: #666; + margin-bottom: 5px; + } + .stat-value { + font-size: 24px; + font-weight: bold; + color: #0056b3; + } + .top-chars { + max-height: 300px; + overflow-y: auto; + } + .char-item { + display: flex; + justify-content: space-between; + padding: 8px; + border-bottom: 1px solid #eee; + } + .char-item:hover { + background: #f8f9fa; + } @@ -101,7 +142,7 @@

Post Viewer and Analyzer

Home Fetch Posts - Analyze Character Frequency in Posts + Add New Post
@@ -113,12 +154,12 @@

Welcome to the Post Viewer and Analyzer!

This application allows you to:

{{if .HasPosts}}

Recent Posts:

-
+
{{range .Posts}}
{{.Title}} @@ -142,7 +183,7 @@

Recent Posts:

{{else if .HasPosts}} -
+

Fetched Posts:

{{range .Posts}}
@@ -152,72 +193,225 @@

Fetched Posts:

{{end}}
{{end}} - {{if .HasAnalysis}} -
-

Character Frequency Analysis:

+ + +
+

Character Frequency Analysis (Client-Side)

+ +
+
+
Total Posts
+
0
+
+
+
Total Characters
+
0
+
+
+
Unique Characters
+
0
+
+
+
Average Post Length
+
0
+
+
+
- -
-

Character Frequencies:

-
- {{range $key, $value := .CharFreq}} -

'{{printf "%q" $key}}': {{$value}}

- {{end}} -
-
-
-
+ } + } + }); + + // Scroll to analysis + document.getElementById('analysisSection').scrollIntoView({ behavior: 'smooth' }); + } + + // If on analyze page with backend data, still use it + {{if .HasAnalysis}} + window.addEventListener('DOMContentLoaded', function() { + const charFreq = {{.CharFreq | toJSON}}; + const ctx = document.getElementById('charFreqChart').getContext('2d'); + const labels = Object.keys(charFreq).map(key => String.fromCharCode(key)); + const data = Object.values(charFreq); + + new Chart(ctx, { + type: 'bar', + data: { + labels: labels, + datasets: [{ + label: 'Character Frequency', + data: data, + backgroundColor: 'rgba(54, 162, 235, 0.5)', + borderColor: 'rgba(54, 162, 235, 1)', + borderWidth: 1 + }] + }, + options: { + responsive: true, + maintainAspectRatio: false, + scales: { + x: { + title: { + display: true, + text: 'Characters' + }, + grid: { + display: false + } + }, + y: { + title: { + display: true, + text: 'Frequency' + }, + beginAtZero: true, + grid: { + color: 'rgba(200, 200, 200, 0.2)' + } + } + }, + plugins: { + legend: { + display: true, + position: 'top' + } + } + } + }); + }); {{end}} -
+ diff --git a/internal/api/api.go b/internal/api/api.go new file mode 100644 index 0000000..dee11c6 --- /dev/null +++ b/internal/api/api.go @@ -0,0 +1,403 @@ +package api + +import ( + "encoding/json" + "net/http" + "strconv" + "strings" + "time" + + "Post_Analyzer_Webserver/internal/errors" + "Post_Analyzer_Webserver/internal/logger" + "Post_Analyzer_Webserver/internal/models" + "Post_Analyzer_Webserver/internal/service" +) + +// API handles REST API endpoints +type API struct { + postService *service.PostService +} + +// NewAPI creates a new API handler +func NewAPI(postService *service.PostService) *API { + return &API{ + postService: postService, + } +} + +// ListPosts handles GET /api/v1/posts +func (a *API) ListPosts(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Parse filters + filter := &models.PostFilter{} + if userIDStr := r.URL.Query().Get("userId"); userIDStr != "" { + userID, err := strconv.Atoi(userIDStr) + if err == nil { + filter.UserID = &userID + } + } + filter.Search = r.URL.Query().Get("search") + filter.SortBy = r.URL.Query().Get("sortBy") + filter.SortOrder = r.URL.Query().Get("sortOrder") + + // Parse pagination + pagination := a.parsePagination(r) + + // Get posts + posts, paginationMeta, err := a.postService.GetAll(ctx, filter, pagination) + if err != nil { + a.respondError(w, r, err) + return + } + + // Build response + response := &models.PaginatedResponse{ + Data: posts, + Pagination: *paginationMeta, + Meta: &models.ResponseMeta{ + RequestID: getRequestID(ctx), + Timestamp: time.Now(), + }, + } + + a.respondJSON(w, http.StatusOK, response) +} + +// GetPost handles GET /api/v1/posts/{id} +func (a *API) GetPost(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Extract ID from URL path + id, err := a.extractID(r) + if err != nil { + a.respondError(w, r, errors.NewValidationError("invalid post ID")) + return + } + + // Get post + post, err := a.postService.GetByID(ctx, id) + if err != nil { + a.respondError(w, r, err) + return + } + + a.respondJSON(w, http.StatusOK, map[string]interface{}{ + "data": post, + "meta": &models.ResponseMeta{ + RequestID: getRequestID(ctx), + Timestamp: time.Now(), + }, + }) +} + +// CreatePost handles POST /api/v1/posts +func (a *API) CreatePost(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Parse request body + var req models.CreatePostRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + a.respondError(w, r, errors.NewValidationError("invalid request body")) + return + } + + // Create post + post, err := a.postService.Create(ctx, &req) + if err != nil { + a.respondError(w, r, err) + return + } + + logger.InfoContext(ctx, "post created via API", "id", post.ID) + + a.respondJSON(w, http.StatusCreated, map[string]interface{}{ + "data": post, + "meta": &models.ResponseMeta{ + RequestID: getRequestID(ctx), + Timestamp: time.Now(), + }, + }) +} + +// UpdatePost handles PUT /api/v1/posts/{id} +func (a *API) UpdatePost(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Extract ID + id, err := a.extractID(r) + if err != nil { + a.respondError(w, r, errors.NewValidationError("invalid post ID")) + return + } + + // Parse request body + var req models.UpdatePostRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + a.respondError(w, r, errors.NewValidationError("invalid request body")) + return + } + + // Update post + post, err := a.postService.Update(ctx, id, &req) + if err != nil { + a.respondError(w, r, err) + return + } + + logger.InfoContext(ctx, "post updated via API", "id", post.ID) + + a.respondJSON(w, http.StatusOK, map[string]interface{}{ + "data": post, + "meta": &models.ResponseMeta{ + RequestID: getRequestID(ctx), + Timestamp: time.Now(), + }, + }) +} + +// DeletePost handles DELETE /api/v1/posts/{id} +func (a *API) DeletePost(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Extract ID + id, err := a.extractID(r) + if err != nil { + a.respondError(w, r, errors.NewValidationError("invalid post ID")) + return + } + + // Delete post + if err := a.postService.Delete(ctx, id); err != nil { + a.respondError(w, r, err) + return + } + + logger.InfoContext(ctx, "post deleted via API", "id", id) + + a.respondJSON(w, http.StatusOK, map[string]interface{}{ + "message": "Post deleted successfully", + "meta": &models.ResponseMeta{ + RequestID: getRequestID(ctx), + Timestamp: time.Now(), + }, + }) +} + +// BulkCreatePosts handles POST /api/v1/posts/bulk +func (a *API) BulkCreatePosts(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Parse request body + var req models.BulkCreateRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + a.respondError(w, r, errors.NewValidationError("invalid request body")) + return + } + + // Validate request + if len(req.Posts) == 0 { + a.respondError(w, r, errors.NewValidationError("posts array cannot be empty")) + return + } + if len(req.Posts) > 1000 { + a.respondError(w, r, errors.NewValidationError("maximum 1000 posts per bulk request")) + return + } + + // Create posts + response, err := a.postService.BulkCreate(ctx, &req) + if err != nil { + a.respondError(w, r, err) + return + } + + logger.InfoContext(ctx, "bulk create completed via API", + "created", response.Created, + "failed", response.Failed, + ) + + statusCode := http.StatusCreated + if response.Failed > 0 { + statusCode = http.StatusMultiStatus + } + + a.respondJSON(w, statusCode, map[string]interface{}{ + "data": response, + "meta": &models.ResponseMeta{ + RequestID: getRequestID(ctx), + Timestamp: time.Now(), + }, + }) +} + +// ExportPosts handles GET /api/v1/posts/export +func (a *API) ExportPosts(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Parse format + format := models.ExportFormat(r.URL.Query().Get("format")) + if format == "" { + format = models.ExportFormatJSON + } + + // Validate format + if format != models.ExportFormatJSON && format != models.ExportFormatCSV { + a.respondError(w, r, errors.NewValidationError("invalid export format (json or csv)")) + return + } + + // Parse filter + filter := &models.PostFilter{} + if userIDStr := r.URL.Query().Get("userId"); userIDStr != "" { + userID, err := strconv.Atoi(userIDStr) + if err == nil { + filter.UserID = &userID + } + } + filter.Search = r.URL.Query().Get("search") + + // Set headers + filename := "posts_export_" + time.Now().Format("20060102_150405") + if format == models.ExportFormatJSON { + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Disposition", "attachment; filename="+filename+".json") + } else { + w.Header().Set("Content-Type", "text/csv") + w.Header().Set("Content-Disposition", "attachment; filename="+filename+".csv") + } + + // Export posts + if err := a.postService.ExportPosts(ctx, w, format, filter); err != nil { + logger.ErrorContext(ctx, "export failed", "error", err) + a.respondError(w, r, err) + return + } + + logger.InfoContext(ctx, "posts exported", "format", format) +} + +// AnalyzePosts handles GET /api/v1/posts/analytics +func (a *API) AnalyzePosts(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + start := time.Now() + + // Perform analysis + result, err := a.postService.AnalyzeCharacterFrequency(ctx) + if err != nil { + a.respondError(w, r, err) + return + } + + logger.InfoContext(ctx, "analysis completed via API", + "total_posts", result.TotalPosts, + "duration_ms", time.Since(start).Milliseconds(), + ) + + a.respondJSON(w, http.StatusOK, map[string]interface{}{ + "data": result, + "meta": &models.ResponseMeta{ + RequestID: getRequestID(ctx), + Timestamp: time.Now(), + Duration: time.Since(start), + }, + }) +} + +// Helper methods + +func (a *API) parsePagination(r *http.Request) *models.PaginationParams { + page, _ := strconv.Atoi(r.URL.Query().Get("page")) + if page < 1 { + page = 1 + } + + pageSize, _ := strconv.Atoi(r.URL.Query().Get("pageSize")) + if pageSize < 1 || pageSize > 100 { + pageSize = 20 // default + } + + return &models.PaginationParams{ + Page: page, + PageSize: pageSize, + Offset: (page - 1) * pageSize, + } +} + +func (a *API) extractID(r *http.Request) (int, error) { + // Extract ID from path: /api/v1/posts/{id} + path := r.URL.Path + parts := strings.Split(path, "/") + + // Find the "posts" segment + for i, part := range parts { + if part == "posts" && i+1 < len(parts) { + // The next part should be the ID + idStr := parts[i+1] + // Remove any query parameters + if idx := strings.Index(idStr, "?"); idx != -1 { + idStr = idStr[:idx] + } + // Skip if it's a special endpoint + if idStr == "bulk" || idStr == "export" || idStr == "analytics" { + continue + } + return strconv.Atoi(idStr) + } + } + + return 0, errors.NewValidationError("invalid post ID") +} + +func (a *API) respondJSON(w http.ResponseWriter, statusCode int, data interface{}) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(statusCode) + + if err := json.NewEncoder(w).Encode(data); err != nil { + logger.Error("failed to encode JSON response", "error", err) + } +} + +func (a *API) respondError(w http.ResponseWriter, r *http.Request, err error) { + appErr, ok := err.(*errors.AppError) + if !ok { + appErr = errors.NewInternalError(err) + } + + logger.ErrorContext(r.Context(), "API error", + "code", appErr.Code, + "message", appErr.Message, + "status", appErr.StatusCode, + ) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(appErr.StatusCode) + + response := map[string]interface{}{ + "error": map[string]interface{}{ + "code": appErr.Code, + "message": appErr.Message, + }, + "meta": &models.ResponseMeta{ + RequestID: getRequestID(r.Context()), + Timestamp: time.Now(), + }, + } + + if appErr.Fields != nil && len(appErr.Fields) > 0 { + response["error"].(map[string]interface{})["fields"] = appErr.Fields + } + + _ = json.NewEncoder(w).Encode(response) +} + +func getRequestID(ctx interface{}) string { + if reqID, ok := ctx.(interface{ Value(interface{}) interface{} }); ok { + if val := reqID.Value(logger.RequestIDKey); val != nil { + if id, ok := val.(string); ok { + return id + } + } + } + return "" +} diff --git a/internal/api/router.go b/internal/api/router.go new file mode 100644 index 0000000..228c5c6 --- /dev/null +++ b/internal/api/router.go @@ -0,0 +1,106 @@ +package api + +import ( + "net/http" + "strings" +) + +// Router handles API routing with versioning +type Router struct { + api *API +} + +// NewRouter creates a new API router +func NewRouter(api *API) *Router { + return &Router{api: api} +} + +// ServeHTTP implements http.Handler +func (router *Router) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // Extract API version from path + path := r.URL.Path + + // Handle /api/v1/* routes + if strings.HasPrefix(path, "/api/v1/") { + router.handleV1(w, r) + return + } + + // Handle /api/* routes (default to v1) + if strings.HasPrefix(path, "/api/") { + // Remove /api prefix and add /api/v1 + r.URL.Path = "/api/v1" + strings.TrimPrefix(path, "/api") + router.handleV1(w, r) + return + } + + http.NotFound(w, r) +} + +// handleV1 handles version 1 API routes +func (router *Router) handleV1(w http.ResponseWriter, r *http.Request) { + path := r.URL.Path + + // Posts endpoints + if strings.HasPrefix(path, "/api/v1/posts") { + remaining := strings.TrimPrefix(path, "/api/v1/posts") + + // /api/v1/posts/bulk + if remaining == "/bulk" { + if r.Method != http.MethodPost { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + router.api.BulkCreatePosts(w, r) + return + } + + // /api/v1/posts/export + if remaining == "/export" { + if r.Method != http.MethodGet { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + router.api.ExportPosts(w, r) + return + } + + // /api/v1/posts/analytics + if remaining == "/analytics" { + if r.Method != http.MethodGet { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + router.api.AnalyzePosts(w, r) + return + } + + // /api/v1/posts/{id} + if remaining != "" && remaining != "/" { + switch r.Method { + case http.MethodGet: + router.api.GetPost(w, r) + case http.MethodPut: + router.api.UpdatePost(w, r) + case http.MethodDelete: + router.api.DeletePost(w, r) + default: + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + } + return + } + + // /api/v1/posts + switch r.Method { + case http.MethodGet: + router.api.ListPosts(w, r) + case http.MethodPost: + router.api.CreatePost(w, r) + default: + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + } + return + } + + http.NotFound(w, r) +} diff --git a/internal/cache/cache.go b/internal/cache/cache.go new file mode 100644 index 0000000..63ec7da --- /dev/null +++ b/internal/cache/cache.go @@ -0,0 +1,105 @@ +package cache + +import ( + "context" + "encoding/json" + "fmt" + "time" + + "Post_Analyzer_Webserver/config" +) + +// Cache defines the caching interface +type Cache interface { + Get(ctx context.Context, key string, value interface{}) error + Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error + Delete(ctx context.Context, key string) error + Clear(ctx context.Context) error +} + +// MemoryCache implements an in-memory cache +type MemoryCache struct { + data map[string]cacheEntry +} + +type cacheEntry struct { + value []byte + expiration time.Time +} + +// NewMemoryCache creates a new in-memory cache +func NewMemoryCache() *MemoryCache { + cache := &MemoryCache{ + data: make(map[string]cacheEntry), + } + + // Start cleanup goroutine + go cache.cleanup() + + return cache +} + +// Get retrieves a value from the cache +func (c *MemoryCache) Get(ctx context.Context, key string, value interface{}) error { + entry, exists := c.data[key] + if !exists { + return fmt.Errorf("cache miss") + } + + // Check expiration + if time.Now().After(entry.expiration) { + delete(c.data, key) + return fmt.Errorf("cache expired") + } + + return json.Unmarshal(entry.value, value) +} + +// Set stores a value in the cache +func (c *MemoryCache) Set(ctx context.Context, key string, value interface{}, ttl time.Duration) error { + data, err := json.Marshal(value) + if err != nil { + return err + } + + c.data[key] = cacheEntry{ + value: data, + expiration: time.Now().Add(ttl), + } + + return nil +} + +// Delete removes a value from the cache +func (c *MemoryCache) Delete(ctx context.Context, key string) error { + delete(c.data, key) + return nil +} + +// Clear removes all values from the cache +func (c *MemoryCache) Clear(ctx context.Context) error { + c.data = make(map[string]cacheEntry) + return nil +} + +// cleanup removes expired entries +func (c *MemoryCache) cleanup() { + ticker := time.NewTicker(1 * time.Minute) + defer ticker.Stop() + + for range ticker.C { + now := time.Now() + for key, entry := range c.data { + if now.After(entry.expiration) { + delete(c.data, key) + } + } + } +} + +// NewCache creates a cache based on configuration +func NewCache(cfg *config.Config) Cache { + // For now, always return memory cache + // In the future, this could return Redis cache if configured + return NewMemoryCache() +} diff --git a/internal/errors/errors.go b/internal/errors/errors.go new file mode 100644 index 0000000..cd8c47d --- /dev/null +++ b/internal/errors/errors.go @@ -0,0 +1,163 @@ +package errors + +import ( + "fmt" + "net/http" +) + +// AppError represents a custom application error with HTTP context +type AppError struct { + Code string `json:"code"` + Message string `json:"message"` + StatusCode int `json:"-"` + Internal error `json:"-"` + Fields map[string]string `json:"fields,omitempty"` +} + +// Error implements the error interface +func (e *AppError) Error() string { + if e.Internal != nil { + return fmt.Sprintf("%s: %v", e.Message, e.Internal) + } + return e.Message +} + +// WithField adds a field-specific error message +func (e *AppError) WithField(field, message string) *AppError { + if e.Fields == nil { + e.Fields = make(map[string]string) + } + e.Fields[field] = message + return e +} + +// Predefined error types +var ( + // ErrNotFound indicates a resource was not found + ErrNotFound = &AppError{ + Code: "NOT_FOUND", + Message: "Resource not found", + StatusCode: http.StatusNotFound, + } + + // ErrInvalidInput indicates invalid input data + ErrInvalidInput = &AppError{ + Code: "INVALID_INPUT", + Message: "Invalid input data", + StatusCode: http.StatusBadRequest, + } + + // ErrUnauthorized indicates authentication failure + ErrUnauthorized = &AppError{ + Code: "UNAUTHORIZED", + Message: "Authentication required", + StatusCode: http.StatusUnauthorized, + } + + // ErrForbidden indicates insufficient permissions + ErrForbidden = &AppError{ + Code: "FORBIDDEN", + Message: "Insufficient permissions", + StatusCode: http.StatusForbidden, + } + + // ErrConflict indicates a conflict with existing data + ErrConflict = &AppError{ + Code: "CONFLICT", + Message: "Resource conflict", + StatusCode: http.StatusConflict, + } + + // ErrInternal indicates an internal server error + ErrInternal = &AppError{ + Code: "INTERNAL_ERROR", + Message: "Internal server error", + StatusCode: http.StatusInternalServerError, + } + + // ErrDatabaseError indicates a database operation failure + ErrDatabaseError = &AppError{ + Code: "DATABASE_ERROR", + Message: "Database operation failed", + StatusCode: http.StatusInternalServerError, + } + + // ErrValidationFailed indicates validation failure + ErrValidationFailed = &AppError{ + Code: "VALIDATION_FAILED", + Message: "Validation failed", + StatusCode: http.StatusUnprocessableEntity, + } + + // ErrRateLimitExceeded indicates rate limit exceeded + ErrRateLimitExceeded = &AppError{ + Code: "RATE_LIMIT_EXCEEDED", + Message: "Rate limit exceeded", + StatusCode: http.StatusTooManyRequests, + } + + // ErrServiceUnavailable indicates service is unavailable + ErrServiceUnavailable = &AppError{ + Code: "SERVICE_UNAVAILABLE", + Message: "Service temporarily unavailable", + StatusCode: http.StatusServiceUnavailable, + } +) + +// New creates a new AppError +func New(code, message string, statusCode int) *AppError { + return &AppError{ + Code: code, + Message: message, + StatusCode: statusCode, + } +} + +// Wrap wraps an error with additional context +func Wrap(err error, message string) *AppError { + if appErr, ok := err.(*AppError); ok { + return &AppError{ + Code: appErr.Code, + Message: message, + StatusCode: appErr.StatusCode, + Internal: appErr.Internal, + Fields: appErr.Fields, + } + } + + return &AppError{ + Code: "INTERNAL_ERROR", + Message: message, + StatusCode: http.StatusInternalServerError, + Internal: err, + } +} + +// NewNotFound creates a not found error +func NewNotFound(resource string) *AppError { + return &AppError{ + Code: "NOT_FOUND", + Message: fmt.Sprintf("%s not found", resource), + StatusCode: http.StatusNotFound, + } +} + +// NewValidationError creates a validation error +func NewValidationError(message string) *AppError { + return &AppError{ + Code: "VALIDATION_FAILED", + Message: message, + StatusCode: http.StatusUnprocessableEntity, + Fields: make(map[string]string), + } +} + +// NewInternalError creates an internal error +func NewInternalError(err error) *AppError { + return &AppError{ + Code: "INTERNAL_ERROR", + Message: "Internal server error", + StatusCode: http.StatusInternalServerError, + Internal: err, + } +} diff --git a/internal/handlers/handlers.go b/internal/handlers/handlers.go new file mode 100644 index 0000000..5b045a6 --- /dev/null +++ b/internal/handlers/handlers.go @@ -0,0 +1,293 @@ +package handlers + +import ( + "context" + "encoding/json" + "fmt" + "html" + "html/template" + "net/http" + "regexp" + "strings" + "sync" + "time" + + "Post_Analyzer_Webserver/config" + "Post_Analyzer_Webserver/internal/logger" + "Post_Analyzer_Webserver/internal/metrics" + "Post_Analyzer_Webserver/internal/storage" +) + +// Handler holds dependencies for HTTP handlers +type Handler struct { + storage storage.Storage + config *config.Config + template *template.Template +} + +// New creates a new Handler +func New(store storage.Storage, cfg *config.Config) (*Handler, error) { + // Custom template functions + funcMap := template.FuncMap{ + "toJSON": func(v interface{}) string { + data, _ := json.Marshal(v) + return string(data) + }, + } + + tmpl, err := template.New("").Funcs(funcMap).ParseFiles("home.html") + if err != nil { + return nil, fmt.Errorf("failed to parse template: %w", err) + } + + return &Handler{ + storage: store, + config: cfg, + template: tmpl, + }, nil +} + +// Template variables +type HomePageVars struct { + Title string + Posts []storage.Post + CharFreq map[rune]int + Error string + HasPosts bool + HasAnalysis bool +} + +// Health check endpoint +func (h *Handler) Health(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{"status":"healthy","timestamp":"%s"}`, time.Now().Format(time.RFC3339)) +} + +// Readiness check endpoint +func (h *Handler) Readiness(w http.ResponseWriter, r *http.Request) { + ctx, cancel := context.WithTimeout(r.Context(), 5*time.Second) + defer cancel() + + // Check if storage is accessible + _, err := h.storage.Count(ctx) + if err != nil { + logger.ErrorContext(r.Context(), "readiness check failed", "error", err) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusServiceUnavailable) + fmt.Fprintf(w, `{"status":"not ready","error":"%s"}`, err.Error()) + return + } + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{"status":"ready","timestamp":"%s"}`, time.Now().Format(time.RFC3339)) +} + +// Home serves the home page +func (h *Handler) Home(w http.ResponseWriter, r *http.Request) { + posts, err := h.storage.GetAll(r.Context()) + if err != nil { + logger.ErrorContext(r.Context(), "failed to get posts", "error", err) + h.renderTemplate(w, HomePageVars{Title: "Home", Error: "Failed to read posts"}) + return + } + + h.renderTemplate(w, HomePageVars{Title: "Home", Posts: posts, HasPosts: len(posts) > 0}) +} + +// FetchPosts fetches posts from external API and stores them +func (h *Handler) FetchPosts(w http.ResponseWriter, r *http.Request) { + posts, err := h.fetchPostsFromAPI(r.Context()) + if err != nil { + logger.ErrorContext(r.Context(), "failed to fetch posts from API", "error", err) + h.renderTemplate(w, HomePageVars{Title: "Error", Error: "Failed to fetch posts from external API"}) + return + } + + if err := h.storage.BatchCreate(r.Context(), posts); err != nil { + logger.ErrorContext(r.Context(), "failed to store posts", "error", err) + h.renderTemplate(w, HomePageVars{Title: "Error", Error: "Failed to store posts"}) + return + } + + logger.InfoContext(r.Context(), "posts fetched successfully", "count", len(posts)) + h.renderTemplate(w, HomePageVars{Title: "Posts Fetched", Posts: posts, HasPosts: true}) +} + +// AnalyzePosts performs character frequency analysis +func (h *Handler) AnalyzePosts(w http.ResponseWriter, r *http.Request) { + start := time.Now() + + posts, err := h.storage.GetAll(r.Context()) + if err != nil { + logger.ErrorContext(r.Context(), "failed to get posts for analysis", "error", err) + h.renderTemplate(w, HomePageVars{Title: "Error", Error: "Failed to read posts for analysis"}) + return + } + + // Combine all post text + var allText strings.Builder + for _, post := range posts { + allText.WriteString(post.Title) + allText.WriteString(" ") + allText.WriteString(post.Body) + allText.WriteString(" ") + } + + charFreq := h.countCharacters(allText.String()) + + metrics.RecordAnalysisOperation(time.Since(start)) + logger.InfoContext(r.Context(), "character analysis completed", "duration_ms", time.Since(start).Milliseconds()) + + h.renderTemplate(w, HomePageVars{Title: "Character Analysis", CharFreq: charFreq, HasAnalysis: true}) +} + +// AddPost adds a new post +func (h *Handler) AddPost(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodPost { + // Parse form data + if err := r.ParseForm(); err != nil { + logger.ErrorContext(r.Context(), "failed to parse form", "error", err) + h.renderTemplate(w, HomePageVars{Title: "Error", Error: "Failed to parse form data"}) + return + } + + // Sanitize and validate input + title := h.sanitizeInput(r.FormValue("title")) + body := h.sanitizeInput(r.FormValue("body")) + + if title == "" || body == "" { + h.renderTemplate(w, HomePageVars{Title: "Error", Error: "Title and body are required"}) + return + } + + post := &storage.Post{ + UserId: 1, + Title: title, + Body: body, + } + + if err := h.storage.Create(r.Context(), post); err != nil { + logger.ErrorContext(r.Context(), "failed to create post", "error", err) + h.renderTemplate(w, HomePageVars{Title: "Error", Error: "Failed to create post"}) + return + } + + // Get all posts to display + posts, _ := h.storage.GetAll(r.Context()) + logger.InfoContext(r.Context(), "post added successfully", "id", post.Id) + h.renderTemplate(w, HomePageVars{Title: "Post Added", Posts: posts, HasPosts: true}) + } else { + h.renderTemplate(w, HomePageVars{Title: "Add New Post"}) + } +} + +// fetchPostsFromAPI fetches posts from external API +func (h *Handler) fetchPostsFromAPI(ctx context.Context) ([]storage.Post, error) { + client := &http.Client{ + Timeout: h.config.External.HTTPTimeout, + } + + req, err := http.NewRequestWithContext(ctx, "GET", h.config.External.JSONPlaceholderURL, nil) + if err != nil { + return nil, err + } + + resp, err := client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode) + } + + var posts []storage.Post + if err := json.NewDecoder(resp.Body).Decode(&posts); err != nil { + return nil, err + } + + return posts, nil +} + +// countCharacters counts character frequency efficiently +func (h *Handler) countCharacters(text string) map[rune]int { + charCount := make(map[rune]int) + + // Process in chunks for better performance with large texts + const chunkSize = 1000 + if len(text) <= chunkSize { + // Small text, process directly + for _, char := range text { + charCount[char]++ + } + return charCount + } + + // Large text, use concurrent processing + mu := sync.Mutex{} + wg := sync.WaitGroup{} + + numWorkers := 4 + chunkLen := (len(text) + numWorkers - 1) / numWorkers + + for i := 0; i < numWorkers; i++ { + start := i * chunkLen + end := start + chunkLen + if end > len(text) { + end = len(text) + } + if start >= len(text) { + break + } + + wg.Add(1) + go func(chunk string) { + defer wg.Done() + localCount := make(map[rune]int) + for _, char := range chunk { + localCount[char]++ + } + + mu.Lock() + for char, count := range localCount { + charCount[char] += count + } + mu.Unlock() + }(text[start:end]) + } + + wg.Wait() + return charCount +} + +// sanitizeInput sanitizes user input to prevent XSS +func (h *Handler) sanitizeInput(input string) string { + // Remove any HTML tags + input = html.EscapeString(input) + + // Remove any potential script tags or event handlers + input = regexp.MustCompile(`(?i).*?`).ReplaceAllString(input, "") + input = regexp.MustCompile(`(?i)on\w+\s*=`).ReplaceAllString(input, "") + + // Trim whitespace + input = strings.TrimSpace(input) + + return input +} + +// renderTemplate renders the HTML template +func (h *Handler) renderTemplate(w http.ResponseWriter, vars HomePageVars) { + w.Header().Set("Content-Type", "text/html; charset=utf-8") + if err := h.template.ExecuteTemplate(w, "home.html", vars); err != nil { + logger.Error("failed to render template", "error", err) + http.Error(w, "Failed to render template", http.StatusInternalServerError) + } +} + +// Close closes handler resources +func (h *Handler) Close() error { + return h.storage.Close() +} diff --git a/internal/logger/logger.go b/internal/logger/logger.go new file mode 100644 index 0000000..2cde4e0 --- /dev/null +++ b/internal/logger/logger.go @@ -0,0 +1,151 @@ +package logger + +import ( + "context" + "io" + "log/slog" + "os" + "time" + + "Post_Analyzer_Webserver/config" +) + +// contextKey is a custom type for context keys to avoid collisions +type contextKey string + +const ( + // RequestIDKey is the context key for request IDs + RequestIDKey contextKey = "request_id" + // UserIDKey is the context key for user IDs + UserIDKey contextKey = "user_id" +) + +var defaultLogger *slog.Logger + +// Init initializes the global logger with the given configuration +func Init(cfg *config.LoggingConfig) error { + var level slog.Level + switch cfg.Level { + case "debug": + level = slog.LevelDebug + case "info": + level = slog.LevelInfo + case "warn": + level = slog.LevelWarn + case "error": + level = slog.LevelError + default: + level = slog.LevelInfo + } + + var writer io.Writer + if cfg.Output == "stdout" || cfg.Output == "" { + writer = os.Stdout + } else { + file, err := os.OpenFile(cfg.Output, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) + if err != nil { + return err + } + writer = file + } + + var handler slog.Handler + opts := &slog.HandlerOptions{ + Level: level, + ReplaceAttr: func(groups []string, a slog.Attr) slog.Attr { + // Customize time format + if a.Key == slog.TimeKey { + if t, ok := a.Value.Any().(time.Time); ok { + a.Value = slog.StringValue(t.Format(cfg.TimeFormat)) + } + } + return a + }, + } + + if cfg.Format == "json" { + handler = slog.NewJSONHandler(writer, opts) + } else { + handler = slog.NewTextHandler(writer, opts) + } + + defaultLogger = slog.New(handler) + slog.SetDefault(defaultLogger) + + return nil +} + +// Get returns the default logger +func Get() *slog.Logger { + if defaultLogger == nil { + defaultLogger = slog.Default() + } + return defaultLogger +} + +// WithRequestID returns a logger with the request ID attached +func WithRequestID(ctx context.Context) *slog.Logger { + logger := Get() + if requestID := ctx.Value(RequestIDKey); requestID != nil { + return logger.With("request_id", requestID) + } + return logger +} + +// WithContext returns a logger with all context values attached +func WithContext(ctx context.Context) *slog.Logger { + logger := Get() + attrs := []any{} + + if requestID := ctx.Value(RequestIDKey); requestID != nil { + attrs = append(attrs, "request_id", requestID) + } + if userID := ctx.Value(UserIDKey); userID != nil { + attrs = append(attrs, "user_id", userID) + } + + if len(attrs) > 0 { + return logger.With(attrs...) + } + return logger +} + +// Debug logs a debug message +func Debug(msg string, args ...any) { + Get().Debug(msg, args...) +} + +// Info logs an info message +func Info(msg string, args ...any) { + Get().Info(msg, args...) +} + +// Warn logs a warning message +func Warn(msg string, args ...any) { + Get().Warn(msg, args...) +} + +// Error logs an error message +func Error(msg string, args ...any) { + Get().Error(msg, args...) +} + +// DebugContext logs a debug message with context +func DebugContext(ctx context.Context, msg string, args ...any) { + WithContext(ctx).Debug(msg, args...) +} + +// InfoContext logs an info message with context +func InfoContext(ctx context.Context, msg string, args ...any) { + WithContext(ctx).Info(msg, args...) +} + +// WarnContext logs a warning message with context +func WarnContext(ctx context.Context, msg string, args ...any) { + WithContext(ctx).Warn(msg, args...) +} + +// ErrorContext logs an error message with context +func ErrorContext(ctx context.Context, msg string, args ...any) { + WithContext(ctx).Error(msg, args...) +} diff --git a/internal/metrics/metrics.go b/internal/metrics/metrics.go new file mode 100644 index 0000000..d203908 --- /dev/null +++ b/internal/metrics/metrics.go @@ -0,0 +1,177 @@ +package metrics + +import ( + "net/http" + "strconv" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +var ( + // HTTP metrics + httpRequestsTotal = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "http_requests_total", + Help: "Total number of HTTP requests", + }, + []string{"method", "path", "status"}, + ) + + httpRequestDuration = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "http_request_duration_seconds", + Help: "HTTP request latency in seconds", + Buckets: prometheus.DefBuckets, + }, + []string{"method", "path", "status"}, + ) + + httpRequestSize = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "http_request_size_bytes", + Help: "HTTP request size in bytes", + Buckets: prometheus.ExponentialBuckets(100, 10, 8), + }, + []string{"method", "path"}, + ) + + httpResponseSize = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "http_response_size_bytes", + Help: "HTTP response size in bytes", + Buckets: prometheus.ExponentialBuckets(100, 10, 8), + }, + []string{"method", "path"}, + ) + + // Application metrics + postsTotal = promauto.NewGauge( + prometheus.GaugeOpts{ + Name: "posts_total", + Help: "Total number of posts in the system", + }, + ) + + postsFetched = promauto.NewCounter( + prometheus.CounterOpts{ + Name: "posts_fetched_total", + Help: "Total number of posts fetched from external API", + }, + ) + + postsAdded = promauto.NewCounter( + prometheus.CounterOpts{ + Name: "posts_added_total", + Help: "Total number of posts added by users", + }, + ) + + analysisOperations = promauto.NewCounter( + prometheus.CounterOpts{ + Name: "analysis_operations_total", + Help: "Total number of character analysis operations", + }, + ) + + analysisDuration = promauto.NewHistogram( + prometheus.HistogramOpts{ + Name: "analysis_duration_seconds", + Help: "Character analysis operation duration in seconds", + Buckets: prometheus.DefBuckets, + }, + ) + + dbOperations = promauto.NewCounterVec( + prometheus.CounterOpts{ + Name: "db_operations_total", + Help: "Total number of database operations", + }, + []string{"operation", "status"}, + ) + + dbOperationDuration = promauto.NewHistogramVec( + prometheus.HistogramOpts{ + Name: "db_operation_duration_seconds", + Help: "Database operation duration in seconds", + Buckets: prometheus.DefBuckets, + }, + []string{"operation"}, + ) +) + +// Middleware creates a middleware for recording HTTP metrics +func Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + start := time.Now() + + // Create a response wrapper to capture status and size + rw := &metricsResponseWriter{ResponseWriter: w, statusCode: http.StatusOK} + + // Record request size + if r.ContentLength > 0 { + httpRequestSize.WithLabelValues(r.Method, r.URL.Path).Observe(float64(r.ContentLength)) + } + + next.ServeHTTP(rw, r) + + // Record metrics + duration := time.Since(start).Seconds() + status := strconv.Itoa(rw.statusCode) + + httpRequestsTotal.WithLabelValues(r.Method, r.URL.Path, status).Inc() + httpRequestDuration.WithLabelValues(r.Method, r.URL.Path, status).Observe(duration) + httpResponseSize.WithLabelValues(r.Method, r.URL.Path).Observe(float64(rw.bytesWritten)) + }) +} + +type metricsResponseWriter struct { + http.ResponseWriter + statusCode int + bytesWritten int +} + +func (mrw *metricsResponseWriter) WriteHeader(code int) { + mrw.statusCode = code + mrw.ResponseWriter.WriteHeader(code) +} + +func (mrw *metricsResponseWriter) Write(b []byte) (int, error) { + n, err := mrw.ResponseWriter.Write(b) + mrw.bytesWritten += n + return n, err +} + +// Handler returns the Prometheus metrics HTTP handler +func Handler() http.Handler { + return promhttp.Handler() +} + +// RecordPostsTotal records the total number of posts +func RecordPostsTotal(count int) { + postsTotal.Set(float64(count)) +} + +// RecordPostsFetched increments the posts fetched counter +func RecordPostsFetched(count int) { + postsFetched.Add(float64(count)) +} + +// RecordPostAdded increments the posts added counter +func RecordPostAdded() { + postsAdded.Inc() +} + +// RecordAnalysisOperation records a character analysis operation +func RecordAnalysisOperation(duration time.Duration) { + analysisOperations.Inc() + analysisDuration.Observe(duration.Seconds()) +} + +// RecordDBOperation records a database operation +func RecordDBOperation(operation, status string, duration time.Duration) { + dbOperations.WithLabelValues(operation, status).Inc() + dbOperationDuration.WithLabelValues(operation).Observe(duration.Seconds()) +} diff --git a/internal/middleware/compression.go b/internal/middleware/compression.go new file mode 100644 index 0000000..8e5440e --- /dev/null +++ b/internal/middleware/compression.go @@ -0,0 +1,36 @@ +package middleware + +import ( + "compress/gzip" + "io" + "net/http" + "strings" +) + +type gzipResponseWriter struct { + io.Writer + http.ResponseWriter +} + +func (w gzipResponseWriter) Write(b []byte) (int, error) { + return w.Writer.Write(b) +} + +// Compression middleware compresses HTTP responses +func Compression(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Check if client supports gzip + if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { + next.ServeHTTP(w, r) + return + } + + // Create gzip writer + w.Header().Set("Content-Encoding", "gzip") + gz := gzip.NewWriter(w) + defer gz.Close() + + gzipWriter := gzipResponseWriter{Writer: gz, ResponseWriter: w} + next.ServeHTTP(gzipWriter, r) + }) +} diff --git a/internal/middleware/middleware.go b/internal/middleware/middleware.go new file mode 100644 index 0000000..33f8e5d --- /dev/null +++ b/internal/middleware/middleware.go @@ -0,0 +1,311 @@ +package middleware + +import ( + "context" + "fmt" + "net" + "net/http" + "runtime/debug" + "strings" + "sync" + "time" + + "Post_Analyzer_Webserver/internal/logger" + + "github.com/google/uuid" +) + +// RequestID middleware adds a unique request ID to each request +func RequestID(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestID := r.Header.Get("X-Request-ID") + if requestID == "" { + requestID = uuid.New().String() + } + + ctx := context.WithValue(r.Context(), logger.RequestIDKey, requestID) + w.Header().Set("X-Request-ID", requestID) + + next.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +// Logging middleware logs all HTTP requests +func Logging(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + start := time.Now() + + // Create a response wrapper to capture status code + rw := &responseWriter{ResponseWriter: w, statusCode: http.StatusOK} + + logger.InfoContext(r.Context(), "incoming request", + "method", r.Method, + "path", r.URL.Path, + "remote_addr", r.RemoteAddr, + "user_agent", r.UserAgent(), + ) + + next.ServeHTTP(rw, r) + + duration := time.Since(start) + + logger.InfoContext(r.Context(), "request completed", + "method", r.Method, + "path", r.URL.Path, + "status", rw.statusCode, + "duration_ms", duration.Milliseconds(), + "bytes", rw.bytesWritten, + ) + }) +} + +// responseWriter wraps http.ResponseWriter to capture status code and bytes written +type responseWriter struct { + http.ResponseWriter + statusCode int + bytesWritten int +} + +func (rw *responseWriter) WriteHeader(code int) { + rw.statusCode = code + rw.ResponseWriter.WriteHeader(code) +} + +func (rw *responseWriter) Write(b []byte) (int, error) { + n, err := rw.ResponseWriter.Write(b) + rw.bytesWritten += n + return n, err +} + +// Recovery middleware recovers from panics and logs them +func Recovery(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + defer func() { + if err := recover(); err != nil { + logger.ErrorContext(r.Context(), "panic recovered", + "error", err, + "stack", string(debug.Stack()), + ) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusInternalServerError) + fmt.Fprintf(w, `{"error":"Internal server error"}`) + } + }() + + next.ServeHTTP(w, r) + }) +} + +// SecurityHeaders middleware adds security headers to responses +func SecurityHeaders(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("X-Content-Type-Options", "nosniff") + w.Header().Set("X-Frame-Options", "DENY") + w.Header().Set("X-XSS-Protection", "1; mode=block") + w.Header().Set("Referrer-Policy", "strict-origin-when-cross-origin") + w.Header().Set("Content-Security-Policy", "default-src 'self'; script-src 'self' 'unsafe-inline' https://cdn.jsdelivr.net; style-src 'self' 'unsafe-inline'; img-src 'self' data:; font-src 'self'") + w.Header().Set("Permissions-Policy", "geolocation=(), microphone=(), camera=()") + + next.ServeHTTP(w, r) + }) +} + +// CORS middleware handles Cross-Origin Resource Sharing +func CORS(allowedOrigins []string) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + origin := r.Header.Get("Origin") + + // Check if origin is allowed + allowed := false + for _, allowedOrigin := range allowedOrigins { + if allowedOrigin == "*" || allowedOrigin == origin { + allowed = true + break + } + } + + if allowed { + if origin != "" { + w.Header().Set("Access-Control-Allow-Origin", origin) + } else if len(allowedOrigins) > 0 && allowedOrigins[0] == "*" { + w.Header().Set("Access-Control-Allow-Origin", "*") + } + w.Header().Set("Access-Control-Allow-Methods", "GET, POST, PUT, DELETE, OPTIONS") + w.Header().Set("Access-Control-Allow-Headers", "Content-Type, Authorization, X-Request-ID") + w.Header().Set("Access-Control-Max-Age", "3600") + } + + // Handle preflight requests + if r.Method == "OPTIONS" { + w.WriteHeader(http.StatusNoContent) + return + } + + next.ServeHTTP(w, r) + }) + } +} + +// RateLimiter implements a simple in-memory rate limiter +type RateLimiter struct { + requests map[string]*clientInfo + mu sync.RWMutex + limit int + window time.Duration +} + +type clientInfo struct { + count int + windowStart time.Time +} + +// NewRateLimiter creates a new rate limiter +func NewRateLimiter(limit int, window time.Duration) *RateLimiter { + rl := &RateLimiter{ + requests: make(map[string]*clientInfo), + limit: limit, + window: window, + } + + // Cleanup old entries every minute + go func() { + ticker := time.NewTicker(1 * time.Minute) + defer ticker.Stop() + for range ticker.C { + rl.cleanup() + } + }() + + return rl +} + +// Middleware returns the rate limiting middleware +func (rl *RateLimiter) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Get client IP + clientIP := getClientIP(r) + + if !rl.allow(clientIP) { + logger.WarnContext(r.Context(), "rate limit exceeded", + "client_ip", clientIP, + ) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusTooManyRequests) + fmt.Fprintf(w, `{"error":"Rate limit exceeded. Please try again later."}`) + return + } + + next.ServeHTTP(w, r) + }) +} + +func (rl *RateLimiter) allow(clientIP string) bool { + rl.mu.Lock() + defer rl.mu.Unlock() + + now := time.Now() + info, exists := rl.requests[clientIP] + + if !exists || now.Sub(info.windowStart) > rl.window { + // New window + rl.requests[clientIP] = &clientInfo{ + count: 1, + windowStart: now, + } + return true + } + + if info.count >= rl.limit { + return false + } + + info.count++ + return true +} + +func (rl *RateLimiter) cleanup() { + rl.mu.Lock() + defer rl.mu.Unlock() + + now := time.Now() + for ip, info := range rl.requests { + if now.Sub(info.windowStart) > rl.window*2 { + delete(rl.requests, ip) + } + } +} + +// getClientIP extracts the client IP from the request +func getClientIP(r *http.Request) string { + // Check X-Forwarded-For header + xff := r.Header.Get("X-Forwarded-For") + if xff != "" { + ips := strings.Split(xff, ",") + if len(ips) > 0 { + return strings.TrimSpace(ips[0]) + } + } + + // Check X-Real-IP header + xri := r.Header.Get("X-Real-IP") + if xri != "" { + return xri + } + + // Fall back to RemoteAddr + ip, _, err := net.SplitHostPort(r.RemoteAddr) + if err != nil { + return r.RemoteAddr + } + return ip +} + +// MaxBodySize limits the size of request bodies +func MaxBodySize(maxSize int64) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r.Body = http.MaxBytesReader(w, r.Body, maxSize) + next.ServeHTTP(w, r) + }) + } +} + +// Timeout adds a timeout to requests +func Timeout(timeout time.Duration) func(http.Handler) http.Handler { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx, cancel := context.WithTimeout(r.Context(), timeout) + defer cancel() + + done := make(chan struct{}) + go func() { + next.ServeHTTP(w, r.WithContext(ctx)) + close(done) + }() + + select { + case <-done: + return + case <-ctx.Done(): + logger.WarnContext(r.Context(), "request timeout", + "timeout", timeout, + ) + w.WriteHeader(http.StatusGatewayTimeout) + fmt.Fprintf(w, `{"error":"Request timeout"}`) + } + }) + } +} + +// Chain chains multiple middleware functions +func Chain(middlewares ...func(http.Handler) http.Handler) func(http.Handler) http.Handler { + return func(final http.Handler) http.Handler { + for i := len(middlewares) - 1; i >= 0; i-- { + final = middlewares[i](final) + } + return final + } +} diff --git a/internal/middleware/middleware_test.go b/internal/middleware/middleware_test.go new file mode 100644 index 0000000..48279b5 --- /dev/null +++ b/internal/middleware/middleware_test.go @@ -0,0 +1,283 @@ +package middleware + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + "Post_Analyzer_Webserver/internal/logger" +) + +func TestRequestID(t *testing.T) { + handler := RequestID(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestID := r.Context().Value(logger.RequestIDKey) + if requestID == nil { + t.Error("Expected request ID in context") + } + w.WriteHeader(http.StatusOK) + })) + + req := httptest.NewRequest("GET", "/test", nil) + rr := httptest.NewRecorder() + + handler.ServeHTTP(rr, req) + + if rr.Header().Get("X-Request-ID") == "" { + t.Error("Expected X-Request-ID header") + } +} + +func TestRequestIDFromHeader(t *testing.T) { + customID := "custom-request-id" + + handler := RequestID(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + requestID := r.Context().Value(logger.RequestIDKey) + if requestID != customID { + t.Errorf("Expected request ID %s, got %v", customID, requestID) + } + })) + + req := httptest.NewRequest("GET", "/test", nil) + req.Header.Set("X-Request-ID", customID) + rr := httptest.NewRecorder() + + handler.ServeHTTP(rr, req) + + if rr.Header().Get("X-Request-ID") != customID { + t.Errorf("Expected X-Request-ID header %s, got %s", customID, rr.Header().Get("X-Request-ID")) + } +} + +func TestRecovery(t *testing.T) { + handler := Recovery(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + panic("test panic") + })) + + req := httptest.NewRequest("GET", "/test", nil) + rr := httptest.NewRecorder() + + // Should not panic + handler.ServeHTTP(rr, req) + + if rr.Code != http.StatusInternalServerError { + t.Errorf("Expected status 500, got %d", rr.Code) + } +} + +func TestSecurityHeaders(t *testing.T) { + handler := SecurityHeaders(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + req := httptest.NewRequest("GET", "/test", nil) + rr := httptest.NewRecorder() + + handler.ServeHTTP(rr, req) + + headers := map[string]string{ + "X-Content-Type-Options": "nosniff", + "X-Frame-Options": "DENY", + "X-XSS-Protection": "1; mode=block", + } + + for header, expectedValue := range headers { + actualValue := rr.Header().Get(header) + if actualValue != expectedValue { + t.Errorf("Expected %s header to be %s, got %s", header, expectedValue, actualValue) + } + } +} + +func TestCORS(t *testing.T) { + allowedOrigins := []string{"http://example.com"} + handler := CORS(allowedOrigins)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + // Test with allowed origin + req := httptest.NewRequest("GET", "/test", nil) + req.Header.Set("Origin", "http://example.com") + rr := httptest.NewRecorder() + + handler.ServeHTTP(rr, req) + + if rr.Header().Get("Access-Control-Allow-Origin") != "http://example.com" { + t.Error("Expected CORS headers for allowed origin") + } + + // Test with disallowed origin + req = httptest.NewRequest("GET", "/test", nil) + req.Header.Set("Origin", "http://evil.com") + rr = httptest.NewRecorder() + + handler.ServeHTTP(rr, req) + + if rr.Header().Get("Access-Control-Allow-Origin") != "" { + t.Error("Should not set CORS headers for disallowed origin") + } +} + +func TestCORSPreflight(t *testing.T) { + allowedOrigins := []string{"*"} + handler := CORS(allowedOrigins)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + t.Error("Should not call next handler for OPTIONS request") + })) + + req := httptest.NewRequest("OPTIONS", "/test", nil) + req.Header.Set("Origin", "http://example.com") + rr := httptest.NewRecorder() + + handler.ServeHTTP(rr, req) + + if rr.Code != http.StatusNoContent { + t.Errorf("Expected status 204 for OPTIONS, got %d", rr.Code) + } +} + +func TestRateLimiter(t *testing.T) { + limit := 5 + window := 1 * time.Second + rl := NewRateLimiter(limit, window) + + handler := rl.Middleware(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + })) + + // Make requests up to the limit + for i := 0; i < limit; i++ { + req := httptest.NewRequest("GET", "/test", nil) + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Errorf("Request %d should succeed, got status %d", i+1, rr.Code) + } + } + + // Next request should be rate limited + req := httptest.NewRequest("GET", "/test", nil) + rr := httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + if rr.Code != http.StatusTooManyRequests { + t.Errorf("Expected rate limit (429), got %d", rr.Code) + } + + // Wait for window to reset + time.Sleep(window + 100*time.Millisecond) + + // Should work again + req = httptest.NewRequest("GET", "/test", nil) + rr = httptest.NewRecorder() + handler.ServeHTTP(rr, req) + + if rr.Code != http.StatusOK { + t.Errorf("Expected success after window reset, got %d", rr.Code) + } +} + +func TestMaxBodySize(t *testing.T) { + maxSize := int64(100) + handler := MaxBodySize(maxSize)(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Try to read body + buf := make([]byte, maxSize+1) + _, err := r.Body.Read(buf) + if err == nil { + t.Error("Expected error when reading body larger than max size") + } + w.WriteHeader(http.StatusOK) + })) + + // Create request with body larger than max size + body := make([]byte, maxSize+1) + req := httptest.NewRequest("POST", "/test", nil) + rr := httptest.NewRecorder() + + handler.ServeHTTP(rr, req) + _ = body // Use body variable +} + +func TestChain(t *testing.T) { + called := []string{} + + middleware1 := func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = append(called, "middleware1") + next.ServeHTTP(w, r) + }) + } + + middleware2 := func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = append(called, "middleware2") + next.ServeHTTP(w, r) + }) + } + + finalHandler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + called = append(called, "handler") + w.WriteHeader(http.StatusOK) + }) + + chained := Chain(middleware1, middleware2)(finalHandler) + + req := httptest.NewRequest("GET", "/test", nil) + rr := httptest.NewRecorder() + + chained.ServeHTTP(rr, req) + + // Check order of execution + if len(called) != 3 { + t.Errorf("Expected 3 calls, got %d", len(called)) + } + if called[0] != "middleware1" || called[1] != "middleware2" || called[2] != "handler" { + t.Errorf("Unexpected execution order: %v", called) + } +} + +func TestGetClientIP(t *testing.T) { + tests := []struct { + name string + remoteAddr string + xForwardedFor string + xRealIP string + expectedIP string + }{ + { + name: "from RemoteAddr", + remoteAddr: "192.168.1.1:1234", + expectedIP: "192.168.1.1", + }, + { + name: "from X-Forwarded-For", + remoteAddr: "192.168.1.1:1234", + xForwardedFor: "10.0.0.1, 10.0.0.2", + expectedIP: "10.0.0.1", + }, + { + name: "from X-Real-IP", + remoteAddr: "192.168.1.1:1234", + xRealIP: "10.0.0.1", + expectedIP: "10.0.0.1", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req := httptest.NewRequest("GET", "/test", nil) + req.RemoteAddr = tt.remoteAddr + if tt.xForwardedFor != "" { + req.Header.Set("X-Forwarded-For", tt.xForwardedFor) + } + if tt.xRealIP != "" { + req.Header.Set("X-Real-IP", tt.xRealIP) + } + + ip := getClientIP(req) + if ip != tt.expectedIP { + t.Errorf("Expected IP %s, got %s", tt.expectedIP, ip) + } + }) + } +} diff --git a/internal/migrations/migrations.go b/internal/migrations/migrations.go new file mode 100644 index 0000000..007e6cb --- /dev/null +++ b/internal/migrations/migrations.go @@ -0,0 +1,201 @@ +package migrations + +import ( + "context" + "database/sql" + "fmt" + "time" + + "Post_Analyzer_Webserver/internal/logger" +) + +// Migration represents a database migration +type Migration struct { + Version int + Description string + Up func(*sql.DB) error + Down func(*sql.DB) error +} + +// Migrator handles database migrations +type Migrator struct { + db *sql.DB + migrations []Migration +} + +// NewMigrator creates a new migrator +func NewMigrator(db *sql.DB) *Migrator { + return &Migrator{ + db: db, + migrations: getMigrations(), + } +} + +// Migrate runs all pending migrations +func (m *Migrator) Migrate(ctx context.Context) error { + // Create migrations table if it doesn't exist + if err := m.createMigrationsTable(); err != nil { + return fmt.Errorf("failed to create migrations table: %w", err) + } + + // Get current version + currentVersion, err := m.getCurrentVersion() + if err != nil { + return fmt.Errorf("failed to get current version: %w", err) + } + + logger.Info("starting migrations", "current_version", currentVersion) + + // Run pending migrations + for _, migration := range m.migrations { + if migration.Version <= currentVersion { + continue + } + + logger.Info("running migration", "version", migration.Version, "description", migration.Description) + + // Begin transaction + tx, err := m.db.Begin() + if err != nil { + return fmt.Errorf("failed to begin transaction: %w", err) + } + + // Run migration + if err := migration.Up(m.db); err != nil { + _ = tx.Rollback() + return fmt.Errorf("migration %d failed: %w", migration.Version, err) + } + + // Update version + if err := m.updateVersion(tx, migration.Version, migration.Description); err != nil { + _ = tx.Rollback() + return fmt.Errorf("failed to update version: %w", err) + } + + // Commit transaction + if err := tx.Commit(); err != nil { + return fmt.Errorf("failed to commit migration: %w", err) + } + + logger.Info("migration completed", "version", migration.Version) + } + + logger.Info("all migrations completed") + return nil +} + +// createMigrationsTable creates the migrations tracking table +func (m *Migrator) createMigrationsTable() error { + query := ` + CREATE TABLE IF NOT EXISTS schema_migrations ( + id SERIAL PRIMARY KEY, + version INTEGER NOT NULL UNIQUE, + description TEXT NOT NULL, + applied_at TIMESTAMP NOT NULL DEFAULT NOW() + )` + + _, err := m.db.Exec(query) + return err +} + +// getCurrentVersion gets the current migration version +func (m *Migrator) getCurrentVersion() (int, error) { + var version int + err := m.db.QueryRow("SELECT COALESCE(MAX(version), 0) FROM schema_migrations").Scan(&version) + if err != nil { + return 0, err + } + return version, nil +} + +// updateVersion records a completed migration +func (m *Migrator) updateVersion(tx *sql.Tx, version int, description string) error { + query := "INSERT INTO schema_migrations (version, description, applied_at) VALUES ($1, $2, $3)" + _, err := tx.Exec(query, version, description, time.Now()) + return err +} + +// getMigrations returns all migrations in order +func getMigrations() []Migration { + return []Migration{ + { + Version: 1, + Description: "Create posts table", + Up: func(db *sql.DB) error { + query := ` + CREATE TABLE IF NOT EXISTS posts ( + id SERIAL PRIMARY KEY, + user_id INTEGER NOT NULL, + title VARCHAR(500) NOT NULL, + body TEXT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() + ); + + CREATE INDEX IF NOT EXISTS idx_posts_user_id ON posts(user_id); + CREATE INDEX IF NOT EXISTS idx_posts_created_at ON posts(created_at DESC); + ` + _, err := db.Exec(query) + return err + }, + Down: func(db *sql.DB) error { + _, err := db.Exec("DROP TABLE IF EXISTS posts") + return err + }, + }, + { + Version: 2, + Description: "Create audit_logs table", + Up: func(db *sql.DB) error { + query := ` + CREATE TABLE IF NOT EXISTS audit_logs ( + id SERIAL PRIMARY KEY, + user_id INTEGER NOT NULL, + action VARCHAR(50) NOT NULL, + resource VARCHAR(50) NOT NULL, + resource_id INTEGER NOT NULL, + changes TEXT, + ip_address VARCHAR(45), + user_agent TEXT, + created_at TIMESTAMP NOT NULL DEFAULT NOW() + ); + + CREATE INDEX IF NOT EXISTS idx_audit_logs_user_id ON audit_logs(user_id); + CREATE INDEX IF NOT EXISTS idx_audit_logs_resource ON audit_logs(resource, resource_id); + CREATE INDEX IF NOT EXISTS idx_audit_logs_created_at ON audit_logs(created_at DESC); + ` + _, err := db.Exec(query) + return err + }, + Down: func(db *sql.DB) error { + _, err := db.Exec("DROP TABLE IF EXISTS audit_logs") + return err + }, + }, + { + Version: 3, + Description: "Add full-text search indexes", + Up: func(db *sql.DB) error { + query := ` + CREATE INDEX IF NOT EXISTS idx_posts_title_trgm ON posts USING gin(title gin_trgm_ops); + CREATE INDEX IF NOT EXISTS idx_posts_body_trgm ON posts USING gin(body gin_trgm_ops); + ` + _, err := db.Exec(query) + if err != nil { + // If pg_trgm extension doesn't exist, skip this migration + logger.Warn("failed to create full-text search indexes, pg_trgm extension may not be enabled") + return nil + } + return err + }, + Down: func(db *sql.DB) error { + query := ` + DROP INDEX IF EXISTS idx_posts_title_trgm; + DROP INDEX IF EXISTS idx_posts_body_trgm; + ` + _, err := db.Exec(query) + return err + }, + }, + } +} diff --git a/internal/models/models.go b/internal/models/models.go new file mode 100644 index 0000000..3f91762 --- /dev/null +++ b/internal/models/models.go @@ -0,0 +1,146 @@ +package models + +import ( + "time" +) + +// Post represents a post in the system +type Post struct { + ID int `json:"id" db:"id"` + UserID int `json:"userId" db:"user_id"` + Title string `json:"title" db:"title"` + Body string `json:"body" db:"body"` + CreatedAt time.Time `json:"createdAt" db:"created_at"` + UpdatedAt time.Time `json:"updatedAt" db:"updated_at"` +} + +// CreatePostRequest represents a request to create a post +type CreatePostRequest struct { + UserID int `json:"userId"` + Title string `json:"title" validate:"required,min=1,max=500"` + Body string `json:"body" validate:"required,min=1,max=10000"` +} + +// UpdatePostRequest represents a request to update a post +type UpdatePostRequest struct { + UserID int `json:"userId,omitempty"` + Title string `json:"title,omitempty" validate:"omitempty,min=1,max=500"` + Body string `json:"body,omitempty" validate:"omitempty,min=1,max=10000"` +} + +// PostFilter represents filtering options for posts +type PostFilter struct { + UserID *int `json:"userId,omitempty"` + Search string `json:"search,omitempty"` + SortBy string `json:"sortBy,omitempty"` // id, title, createdAt, updatedAt + SortOrder string `json:"sortOrder,omitempty"` // asc, desc +} + +// PaginationParams represents pagination parameters +type PaginationParams struct { + Page int `json:"page"` + PageSize int `json:"pageSize"` + Offset int `json:"-"` +} + +// PaginatedResponse represents a paginated API response +type PaginatedResponse struct { + Data interface{} `json:"data"` + Pagination PaginationMeta `json:"pagination"` + Meta *ResponseMeta `json:"meta,omitempty"` +} + +// PaginationMeta contains pagination metadata +type PaginationMeta struct { + Page int `json:"page"` + PageSize int `json:"pageSize"` + TotalItems int `json:"totalItems"` + TotalPages int `json:"totalPages"` + HasNext bool `json:"hasNext"` + HasPrev bool `json:"hasPrev"` +} + +// ResponseMeta contains additional response metadata +type ResponseMeta struct { + RequestID string `json:"requestId,omitempty"` + Timestamp time.Time `json:"timestamp"` + Duration time.Duration `json:"duration,omitempty"` +} + +// AnalyticsResult represents character frequency analysis results +type AnalyticsResult struct { + TotalPosts int `json:"totalPosts"` + TotalCharacters int `json:"totalCharacters"` + UniqueChars int `json:"uniqueChars"` + CharFrequency map[rune]int `json:"charFrequency"` + TopCharacters []CharacterStat `json:"topCharacters"` + Statistics *AnalyticsStats `json:"statistics,omitempty"` +} + +// CharacterStat represents statistics for a single character +type CharacterStat struct { + Character rune `json:"character"` + Count int `json:"count"` + Frequency float64 `json:"frequency"` +} + +// AnalyticsStats represents overall analytics statistics +type AnalyticsStats struct { + AveragePostLength float64 `json:"averagePostLength"` + MedianPostLength int `json:"medianPostLength"` + PostsPerUser map[int]int `json:"postsPerUser"` + TimeDistribution map[string]int `json:"timeDistribution"` +} + +// BulkCreateRequest represents a bulk create request +type BulkCreateRequest struct { + Posts []CreatePostRequest `json:"posts" validate:"required,min=1,max=1000"` +} + +// BulkCreateResponse represents a bulk create response +type BulkCreateResponse struct { + Created int `json:"created"` + Failed int `json:"failed"` + Errors []string `json:"errors,omitempty"` + PostIDs []int `json:"postIds,omitempty"` +} + +// ExportFormat represents export file format +type ExportFormat string + +const ( + ExportFormatJSON ExportFormat = "json" + ExportFormatCSV ExportFormat = "csv" +) + +// HealthResponse represents health check response +type HealthResponse struct { + Status string `json:"status"` + Timestamp time.Time `json:"timestamp"` + Version string `json:"version,omitempty"` + Uptime time.Duration `json:"uptime,omitempty"` + Checks map[string]bool `json:"checks,omitempty"` +} + +// User represents a user in the system (for future auth) +type User struct { + ID int `json:"id" db:"id"` + Email string `json:"email" db:"email"` + Username string `json:"username" db:"username"` + Role string `json:"role" db:"role"` + CreatedAt time.Time `json:"createdAt" db:"created_at"` + UpdatedAt time.Time `json:"updatedAt" db:"updated_at"` +} + +// AuditLog represents an audit log entry +type AuditLog struct { + ID int `json:"id" db:"id"` + UserID int `json:"userId" db:"user_id"` + Action string `json:"action" db:"action"` + Resource string `json:"resource" db:"resource"` + ResourceID int `json:"resourceId" db:"resource_id"` + Changes string `json:"changes,omitempty" db:"changes"` + IPAddress string `json:"ipAddress" db:"ip_address"` + UserAgent string `json:"userAgent" db:"user_agent"` + CreatedAt time.Time `json:"createdAt" db:"created_at"` +} diff --git a/internal/service/post_service.go b/internal/service/post_service.go new file mode 100644 index 0000000..779ec40 --- /dev/null +++ b/internal/service/post_service.go @@ -0,0 +1,533 @@ +package service + +import ( + "context" + "encoding/csv" + "encoding/json" + "fmt" + "io" + "sort" + "strings" + "time" + + "Post_Analyzer_Webserver/internal/errors" + "Post_Analyzer_Webserver/internal/logger" + "Post_Analyzer_Webserver/internal/metrics" + "Post_Analyzer_Webserver/internal/models" + "Post_Analyzer_Webserver/internal/storage" +) + +// PostService handles business logic for posts +type PostService struct { + storage storage.Storage +} + +// NewPostService creates a new post service +func NewPostService(storage storage.Storage) *PostService { + return &PostService{ + storage: storage, + } +} + +// GetAll retrieves all posts with optional filtering and pagination +func (s *PostService) GetAll(ctx context.Context, filter *models.PostFilter, pagination *models.PaginationParams) ([]models.Post, *models.PaginationMeta, error) { + start := time.Now() + defer func() { + metrics.RecordDBOperation("get_all_posts", "success", time.Since(start)) + }() + + // Get all posts from storage + storagePosts, err := s.storage.GetAll(ctx) + if err != nil { + metrics.RecordDBOperation("get_all_posts", "error", time.Since(start)) + return nil, nil, errors.Wrap(err, "failed to retrieve posts") + } + + // Convert storage posts to models + posts := make([]models.Post, len(storagePosts)) + for i, sp := range storagePosts { + posts[i] = models.Post{ + ID: sp.Id, + UserID: sp.UserId, + Title: sp.Title, + Body: sp.Body, + CreatedAt: sp.CreatedAt, + UpdatedAt: sp.UpdatedAt, + } + } + + // Apply filtering + posts = s.filterPosts(posts, filter) + + // Apply sorting + posts = s.sortPosts(posts, filter) + + // Calculate pagination + totalItems := len(posts) + paginationMeta := s.calculatePagination(totalItems, pagination) + + // Apply pagination + if pagination != nil { + start := pagination.Offset + end := start + pagination.PageSize + if start > len(posts) { + posts = []models.Post{} + } else if end > len(posts) { + posts = posts[start:] + } else { + posts = posts[start:end] + } + } + + return posts, paginationMeta, nil +} + +// GetByID retrieves a post by ID +func (s *PostService) GetByID(ctx context.Context, id int) (*models.Post, error) { + start := time.Now() + defer func() { + metrics.RecordDBOperation("get_post_by_id", "success", time.Since(start)) + }() + + storagePost, err := s.storage.GetByID(ctx, id) + if err != nil { + if err == storage.ErrNotFound { + return nil, errors.NewNotFound("Post") + } + metrics.RecordDBOperation("get_post_by_id", "error", time.Since(start)) + return nil, errors.Wrap(err, "failed to retrieve post") + } + + post := &models.Post{ + ID: storagePost.Id, + UserID: storagePost.UserId, + Title: storagePost.Title, + Body: storagePost.Body, + CreatedAt: storagePost.CreatedAt, + UpdatedAt: storagePost.UpdatedAt, + } + + return post, nil +} + +// Create creates a new post +func (s *PostService) Create(ctx context.Context, req *models.CreatePostRequest) (*models.Post, error) { + start := time.Now() + defer func() { + metrics.RecordDBOperation("create_post", "success", time.Since(start)) + }() + + // Validate input + if err := s.validateCreateRequest(req); err != nil { + return nil, err + } + + // Create storage post + storagePost := &storage.Post{ + UserId: req.UserID, + Title: strings.TrimSpace(req.Title), + Body: strings.TrimSpace(req.Body), + } + + if err := s.storage.Create(ctx, storagePost); err != nil { + metrics.RecordDBOperation("create_post", "error", time.Since(start)) + return nil, errors.Wrap(err, "failed to create post") + } + + logger.InfoContext(ctx, "post created", "id", storagePost.Id) + + post := &models.Post{ + ID: storagePost.Id, + UserID: storagePost.UserId, + Title: storagePost.Title, + Body: storagePost.Body, + CreatedAt: storagePost.CreatedAt, + UpdatedAt: storagePost.UpdatedAt, + } + + return post, nil +} + +// Update updates an existing post +func (s *PostService) Update(ctx context.Context, id int, req *models.UpdatePostRequest) (*models.Post, error) { + start := time.Now() + defer func() { + metrics.RecordDBOperation("update_post", "success", time.Since(start)) + }() + + // Get existing post + existing, err := s.storage.GetByID(ctx, id) + if err != nil { + if err == storage.ErrNotFound { + return nil, errors.NewNotFound("Post") + } + return nil, errors.Wrap(err, "failed to retrieve post") + } + + // Update fields + if req.UserID != 0 { + existing.UserId = req.UserID + } + if req.Title != "" { + if len(req.Title) > 500 { + return nil, errors.NewValidationError("title too long").WithField("title", "maximum 500 characters") + } + existing.Title = strings.TrimSpace(req.Title) + } + if req.Body != "" { + if len(req.Body) > 10000 { + return nil, errors.NewValidationError("body too long").WithField("body", "maximum 10000 characters") + } + existing.Body = strings.TrimSpace(req.Body) + } + + if err := s.storage.Update(ctx, existing); err != nil { + metrics.RecordDBOperation("update_post", "error", time.Since(start)) + return nil, errors.Wrap(err, "failed to update post") + } + + post := &models.Post{ + ID: existing.Id, + UserID: existing.UserId, + Title: existing.Title, + Body: existing.Body, + CreatedAt: existing.CreatedAt, + UpdatedAt: existing.UpdatedAt, + } + + return post, nil +} + +// Delete deletes a post +func (s *PostService) Delete(ctx context.Context, id int) error { + start := time.Now() + defer func() { + metrics.RecordDBOperation("delete_post", "success", time.Since(start)) + }() + + if err := s.storage.Delete(ctx, id); err != nil { + if err == storage.ErrNotFound { + return errors.NewNotFound("Post") + } + metrics.RecordDBOperation("delete_post", "error", time.Since(start)) + return errors.Wrap(err, "failed to delete post") + } + + logger.InfoContext(ctx, "post deleted", "id", id) + return nil +} + +// BulkCreate creates multiple posts +func (s *PostService) BulkCreate(ctx context.Context, req *models.BulkCreateRequest) (*models.BulkCreateResponse, error) { + start := time.Now() + defer func() { + metrics.RecordDBOperation("bulk_create_posts", "success", time.Since(start)) + }() + + response := &models.BulkCreateResponse{ + PostIDs: make([]int, 0), + Errors: make([]string, 0), + } + + for i, postReq := range req.Posts { + post, err := s.Create(ctx, &postReq) + if err != nil { + response.Failed++ + response.Errors = append(response.Errors, fmt.Sprintf("post %d: %v", i+1, err)) + continue + } + response.Created++ + response.PostIDs = append(response.PostIDs, post.ID) + } + + logger.InfoContext(ctx, "bulk create completed", "created", response.Created, "failed", response.Failed) + return response, nil +} + +// ExportPosts exports posts in the specified format +func (s *PostService) ExportPosts(ctx context.Context, writer io.Writer, format models.ExportFormat, filter *models.PostFilter) error { + posts, _, err := s.GetAll(ctx, filter, nil) + if err != nil { + return errors.Wrap(err, "failed to retrieve posts for export") + } + + switch format { + case models.ExportFormatJSON: + return s.exportJSON(writer, posts) + case models.ExportFormatCSV: + return s.exportCSV(writer, posts) + default: + return errors.NewValidationError("unsupported export format") + } +} + +// AnalyzeCharacterFrequency performs character frequency analysis +func (s *PostService) AnalyzeCharacterFrequency(ctx context.Context) (*models.AnalyticsResult, error) { + start := time.Now() + defer func() { + metrics.RecordAnalysisOperation(time.Since(start)) + }() + + posts, _, err := s.GetAll(ctx, nil, nil) + if err != nil { + return nil, errors.Wrap(err, "failed to retrieve posts for analysis") + } + + result := &models.AnalyticsResult{ + TotalPosts: len(posts), + CharFrequency: make(map[rune]int), + } + + // Analyze character frequency + totalChars := 0 + postLengths := make([]int, len(posts)) + postsPerUser := make(map[int]int) + + for i, post := range posts { + text := post.Title + " " + post.Body + postLengths[i] = len(text) + postsPerUser[post.UserID]++ + + for _, char := range text { + result.CharFrequency[char]++ + totalChars++ + } + } + + result.TotalCharacters = totalChars + result.UniqueChars = len(result.CharFrequency) + + // Calculate top characters + result.TopCharacters = s.calculateTopCharacters(result.CharFrequency, totalChars) + + // Calculate statistics + result.Statistics = &models.AnalyticsStats{ + AveragePostLength: s.calculateAverage(postLengths), + MedianPostLength: s.calculateMedian(postLengths), + PostsPerUser: postsPerUser, + TimeDistribution: s.calculateTimeDistribution(posts), + } + + logger.InfoContext(ctx, "character analysis completed", + "total_posts", result.TotalPosts, + "total_chars", result.TotalCharacters, + "unique_chars", result.UniqueChars, + ) + + return result, nil +} + +// Helper methods + +func (s *PostService) validateCreateRequest(req *models.CreatePostRequest) error { + validationErr := errors.NewValidationError("validation failed") + hasError := false + + if req.Title == "" { + _ = validationErr.WithField("title", "title is required") + hasError = true + } else if len(req.Title) > 500 { + _ = validationErr.WithField("title", "title too long (max 500 characters)") + hasError = true + } + + if req.Body == "" { + _ = validationErr.WithField("body", "body is required") + hasError = true + } else if len(req.Body) > 10000 { + _ = validationErr.WithField("body", "body too long (max 10000 characters)") + hasError = true + } + + if hasError { + return validationErr + } + return nil +} + +func (s *PostService) filterPosts(posts []models.Post, filter *models.PostFilter) []models.Post { + if filter == nil { + return posts + } + + filtered := make([]models.Post, 0, len(posts)) + for _, post := range posts { + // Filter by user ID + if filter.UserID != nil && post.UserID != *filter.UserID { + continue + } + + // Filter by search term + if filter.Search != "" { + searchLower := strings.ToLower(filter.Search) + if !strings.Contains(strings.ToLower(post.Title), searchLower) && + !strings.Contains(strings.ToLower(post.Body), searchLower) { + continue + } + } + + filtered = append(filtered, post) + } + + return filtered +} + +func (s *PostService) sortPosts(posts []models.Post, filter *models.PostFilter) []models.Post { + if filter == nil || filter.SortBy == "" { + return posts + } + + sortBy := filter.SortBy + sortOrder := filter.SortOrder + if sortOrder == "" { + sortOrder = "desc" + } + + sort.Slice(posts, func(i, j int) bool { + var less bool + switch sortBy { + case "id": + less = posts[i].ID < posts[j].ID + case "title": + less = posts[i].Title < posts[j].Title + case "createdAt": + less = posts[i].CreatedAt.Before(posts[j].CreatedAt) + case "updatedAt": + less = posts[i].UpdatedAt.Before(posts[j].UpdatedAt) + default: + less = posts[i].ID < posts[j].ID + } + + if sortOrder == "desc" { + return !less + } + return less + }) + + return posts +} + +func (s *PostService) calculatePagination(totalItems int, params *models.PaginationParams) *models.PaginationMeta { + if params == nil { + return nil + } + + totalPages := (totalItems + params.PageSize - 1) / params.PageSize + if totalPages == 0 { + totalPages = 1 + } + + return &models.PaginationMeta{ + Page: params.Page, + PageSize: params.PageSize, + TotalItems: totalItems, + TotalPages: totalPages, + HasNext: params.Page < totalPages, + HasPrev: params.Page > 1, + } +} + +func (s *PostService) exportJSON(writer io.Writer, posts []models.Post) error { + encoder := json.NewEncoder(writer) + encoder.SetIndent("", " ") + return encoder.Encode(posts) +} + +func (s *PostService) exportCSV(writer io.Writer, posts []models.Post) error { + csvWriter := csv.NewWriter(writer) + defer csvWriter.Flush() + + // Write header + if err := csvWriter.Write([]string{"ID", "UserID", "Title", "Body", "CreatedAt", "UpdatedAt"}); err != nil { + return err + } + + // Write data + for _, post := range posts { + row := []string{ + fmt.Sprintf("%d", post.ID), + fmt.Sprintf("%d", post.UserID), + post.Title, + post.Body, + post.CreatedAt.Format(time.RFC3339), + post.UpdatedAt.Format(time.RFC3339), + } + if err := csvWriter.Write(row); err != nil { + return err + } + } + + return nil +} + +func (s *PostService) calculateTopCharacters(charFreq map[rune]int, totalChars int) []models.CharacterStat { + stats := make([]models.CharacterStat, 0, len(charFreq)) + + for char, count := range charFreq { + frequency := float64(count) / float64(totalChars) * 100 + stats = append(stats, models.CharacterStat{ + Character: char, + Count: count, + Frequency: frequency, + }) + } + + // Sort by count descending + sort.Slice(stats, func(i, j int) bool { + return stats[i].Count > stats[j].Count + }) + + // Return top 20 + if len(stats) > 20 { + stats = stats[:20] + } + + return stats +} + +func (s *PostService) calculateAverage(values []int) float64 { + if len(values) == 0 { + return 0 + } + sum := 0 + for _, v := range values { + sum += v + } + return float64(sum) / float64(len(values)) +} + +func (s *PostService) calculateMedian(values []int) int { + if len(values) == 0 { + return 0 + } + sorted := make([]int, len(values)) + copy(sorted, values) + sort.Ints(sorted) + + mid := len(sorted) / 2 + if len(sorted)%2 == 0 { + return (sorted[mid-1] + sorted[mid]) / 2 + } + return sorted[mid] +} + +func (s *PostService) calculateTimeDistribution(posts []models.Post) map[string]int { + distribution := make(map[string]int) + + for _, post := range posts { + hour := post.CreatedAt.Hour() + var period string + if hour < 6 { + period = "night" + } else if hour < 12 { + period = "morning" + } else if hour < 18 { + period = "afternoon" + } else { + period = "evening" + } + distribution[period]++ + } + + return distribution +} diff --git a/internal/storage/file.go b/internal/storage/file.go new file mode 100644 index 0000000..2a4abd7 --- /dev/null +++ b/internal/storage/file.go @@ -0,0 +1,293 @@ +package storage + +import ( + "context" + "encoding/json" + "errors" + "os" + "sync" + "time" + + "Post_Analyzer_Webserver/internal/logger" + "Post_Analyzer_Webserver/internal/metrics" +) + +// FileStorage implements Storage interface using JSON file +type FileStorage struct { + filePath string + mu sync.RWMutex +} + +// NewFileStorage creates a new file-based storage +func NewFileStorage(filePath string) (*FileStorage, error) { + fs := &FileStorage{ + filePath: filePath, + } + + // Initialize file if it doesn't exist + if _, err := os.Stat(filePath); os.IsNotExist(err) { + if err := fs.writeToFile([]Post{}); err != nil { + return nil, err + } + } + + return fs, nil +} + +// GetAll retrieves all posts +func (fs *FileStorage) GetAll(ctx context.Context) ([]Post, error) { + start := time.Now() + defer func() { + metrics.RecordDBOperation("get_all", "success", time.Since(start)) + }() + + fs.mu.RLock() + defer fs.mu.RUnlock() + + posts, err := fs.readFromFile() + if err != nil { + metrics.RecordDBOperation("get_all", "error", time.Since(start)) + logger.ErrorContext(ctx, "failed to read posts from file", "error", err) + return nil, err + } + + metrics.RecordPostsTotal(len(posts)) + return posts, nil +} + +// GetByID retrieves a post by ID +func (fs *FileStorage) GetByID(ctx context.Context, id int) (*Post, error) { + start := time.Now() + defer func() { + metrics.RecordDBOperation("get_by_id", "success", time.Since(start)) + }() + + fs.mu.RLock() + defer fs.mu.RUnlock() + + posts, err := fs.readFromFile() + if err != nil { + metrics.RecordDBOperation("get_by_id", "error", time.Since(start)) + return nil, err + } + + for _, post := range posts { + if post.Id == id { + return &post, nil + } + } + + return nil, ErrNotFound +} + +// Create creates a new post +func (fs *FileStorage) Create(ctx context.Context, post *Post) error { + start := time.Now() + defer func() { + metrics.RecordDBOperation("create", "success", time.Since(start)) + }() + + if err := post.Validate(); err != nil { + return err + } + + fs.mu.Lock() + defer fs.mu.Unlock() + + posts, err := fs.readFromFile() + if err != nil { + metrics.RecordDBOperation("create", "error", time.Since(start)) + return err + } + + // Generate new ID + maxID := 0 + for _, p := range posts { + if p.Id > maxID { + maxID = p.Id + } + } + post.Id = maxID + 1 + post.CreatedAt = time.Now() + post.UpdatedAt = time.Now() + + posts = append(posts, *post) + + if err := fs.writeToFile(posts); err != nil { + metrics.RecordDBOperation("create", "error", time.Since(start)) + return err + } + + metrics.RecordPostAdded() + logger.InfoContext(ctx, "post created", "id", post.Id) + return nil +} + +// Update updates an existing post +func (fs *FileStorage) Update(ctx context.Context, post *Post) error { + start := time.Now() + defer func() { + metrics.RecordDBOperation("update", "success", time.Since(start)) + }() + + if err := post.Validate(); err != nil { + return err + } + + fs.mu.Lock() + defer fs.mu.Unlock() + + posts, err := fs.readFromFile() + if err != nil { + metrics.RecordDBOperation("update", "error", time.Since(start)) + return err + } + + found := false + for i, p := range posts { + if p.Id == post.Id { + post.UpdatedAt = time.Now() + posts[i] = *post + found = true + break + } + } + + if !found { + return ErrNotFound + } + + if err := fs.writeToFile(posts); err != nil { + metrics.RecordDBOperation("update", "error", time.Since(start)) + return err + } + + logger.InfoContext(ctx, "post updated", "id", post.Id) + return nil +} + +// Delete deletes a post by ID +func (fs *FileStorage) Delete(ctx context.Context, id int) error { + start := time.Now() + defer func() { + metrics.RecordDBOperation("delete", "success", time.Since(start)) + }() + + fs.mu.Lock() + defer fs.mu.Unlock() + + posts, err := fs.readFromFile() + if err != nil { + metrics.RecordDBOperation("delete", "error", time.Since(start)) + return err + } + + found := false + newPosts := make([]Post, 0, len(posts)) + for _, p := range posts { + if p.Id != id { + newPosts = append(newPosts, p) + } else { + found = true + } + } + + if !found { + return ErrNotFound + } + + if err := fs.writeToFile(newPosts); err != nil { + metrics.RecordDBOperation("delete", "error", time.Since(start)) + return err + } + + logger.InfoContext(ctx, "post deleted", "id", id) + return nil +} + +// BatchCreate creates multiple posts in a batch +func (fs *FileStorage) BatchCreate(ctx context.Context, newPosts []Post) error { + start := time.Now() + defer func() { + metrics.RecordDBOperation("batch_create", "success", time.Since(start)) + }() + + fs.mu.Lock() + defer fs.mu.Unlock() + + existingPosts, err := fs.readFromFile() + if err != nil && !errors.Is(err, os.ErrNotExist) { + metrics.RecordDBOperation("batch_create", "error", time.Since(start)) + return err + } + + // Use existing posts if available, otherwise start fresh + now := time.Now() + for i := range newPosts { + if newPosts[i].CreatedAt.IsZero() { + newPosts[i].CreatedAt = now + } + if newPosts[i].UpdatedAt.IsZero() { + newPosts[i].UpdatedAt = now + } + } + + if err := fs.writeToFile(newPosts); err != nil { + metrics.RecordDBOperation("batch_create", "error", time.Since(start)) + return err + } + + metrics.RecordPostsFetched(len(newPosts)) + logger.InfoContext(ctx, "batch posts created", "count", len(newPosts), "previous_count", len(existingPosts)) + return nil +} + +// Count returns the total number of posts +func (fs *FileStorage) Count(ctx context.Context) (int, error) { + fs.mu.RLock() + defer fs.mu.RUnlock() + + posts, err := fs.readFromFile() + if err != nil { + return 0, err + } + + return len(posts), nil +} + +// Close closes the storage (no-op for file storage) +func (fs *FileStorage) Close() error { + return nil +} + +// readFromFile reads posts from the JSON file +func (fs *FileStorage) readFromFile() ([]Post, error) { + data, err := os.ReadFile(fs.filePath) + if err != nil { + if os.IsNotExist(err) { + return []Post{}, nil + } + return nil, err + } + + if len(data) == 0 { + return []Post{}, nil + } + + var posts []Post + if err := json.Unmarshal(data, &posts); err != nil { + return nil, err + } + + return posts, nil +} + +// writeToFile writes posts to the JSON file +func (fs *FileStorage) writeToFile(posts []Post) error { + data, err := json.MarshalIndent(posts, "", " ") + if err != nil { + return err + } + + return os.WriteFile(fs.filePath, data, 0644) +} diff --git a/internal/storage/file_test.go b/internal/storage/file_test.go new file mode 100644 index 0000000..7254c81 --- /dev/null +++ b/internal/storage/file_test.go @@ -0,0 +1,336 @@ +package storage + +import ( + "context" + "os" + "path/filepath" + "testing" +) + +func TestFileStorage_CreateAndGet(t *testing.T) { + // Create temporary directory for test + tmpDir := t.TempDir() + filePath := filepath.Join(tmpDir, "test_posts.json") + + store, err := NewFileStorage(filePath) + if err != nil { + t.Fatalf("Failed to create file storage: %v", err) + } + defer store.Close() + + ctx := context.Background() + + // Create a post + post := &Post{ + UserId: 1, + Title: "Test Post", + Body: "This is a test post body", + } + + err = store.Create(ctx, post) + if err != nil { + t.Fatalf("Failed to create post: %v", err) + } + + if post.Id == 0 { + t.Error("Post ID should be assigned") + } + + // Retrieve the post + retrieved, err := store.GetByID(ctx, post.Id) + if err != nil { + t.Fatalf("Failed to get post: %v", err) + } + + if retrieved.Title != post.Title { + t.Errorf("Expected title %s, got %s", post.Title, retrieved.Title) + } + if retrieved.Body != post.Body { + t.Errorf("Expected body %s, got %s", post.Body, retrieved.Body) + } +} + +func TestFileStorage_GetAll(t *testing.T) { + tmpDir := t.TempDir() + filePath := filepath.Join(tmpDir, "test_posts.json") + + store, err := NewFileStorage(filePath) + if err != nil { + t.Fatalf("Failed to create file storage: %v", err) + } + defer store.Close() + + ctx := context.Background() + + // Create multiple posts + for i := 1; i <= 3; i++ { + post := &Post{ + UserId: i, + Title: "Test Post " + string(rune(i)), + Body: "Test body", + } + if err := store.Create(ctx, post); err != nil { + t.Fatalf("Failed to create post: %v", err) + } + } + + // Get all posts + posts, err := store.GetAll(ctx) + if err != nil { + t.Fatalf("Failed to get all posts: %v", err) + } + + if len(posts) != 3 { + t.Errorf("Expected 3 posts, got %d", len(posts)) + } +} + +func TestFileStorage_Update(t *testing.T) { + tmpDir := t.TempDir() + filePath := filepath.Join(tmpDir, "test_posts.json") + + store, err := NewFileStorage(filePath) + if err != nil { + t.Fatalf("Failed to create file storage: %v", err) + } + defer store.Close() + + ctx := context.Background() + + // Create a post + post := &Post{ + UserId: 1, + Title: "Original Title", + Body: "Original Body", + } + if err := store.Create(ctx, post); err != nil { + t.Fatalf("Failed to create post: %v", err) + } + + // Update the post + post.Title = "Updated Title" + post.Body = "Updated Body" + if err := store.Update(ctx, post); err != nil { + t.Fatalf("Failed to update post: %v", err) + } + + // Retrieve and verify + retrieved, err := store.GetByID(ctx, post.Id) + if err != nil { + t.Fatalf("Failed to get post: %v", err) + } + + if retrieved.Title != "Updated Title" { + t.Errorf("Expected updated title, got %s", retrieved.Title) + } + if retrieved.Body != "Updated Body" { + t.Errorf("Expected updated body, got %s", retrieved.Body) + } +} + +func TestFileStorage_Delete(t *testing.T) { + tmpDir := t.TempDir() + filePath := filepath.Join(tmpDir, "test_posts.json") + + store, err := NewFileStorage(filePath) + if err != nil { + t.Fatalf("Failed to create file storage: %v", err) + } + defer store.Close() + + ctx := context.Background() + + // Create a post + post := &Post{ + UserId: 1, + Title: "To Be Deleted", + Body: "This post will be deleted", + } + if err := store.Create(ctx, post); err != nil { + t.Fatalf("Failed to create post: %v", err) + } + + // Delete the post + if err := store.Delete(ctx, post.Id); err != nil { + t.Fatalf("Failed to delete post: %v", err) + } + + // Try to retrieve - should get error + _, err = store.GetByID(ctx, post.Id) + if err != ErrNotFound { + t.Errorf("Expected ErrNotFound, got %v", err) + } +} + +func TestFileStorage_Validation(t *testing.T) { + tmpDir := t.TempDir() + filePath := filepath.Join(tmpDir, "test_posts.json") + + store, err := NewFileStorage(filePath) + if err != nil { + t.Fatalf("Failed to create file storage: %v", err) + } + defer store.Close() + + ctx := context.Background() + + // Test empty title + post := &Post{ + UserId: 1, + Title: "", + Body: "Body", + } + err = store.Create(ctx, post) + if err == nil { + t.Error("Expected error for empty title") + } + + // Test empty body + post = &Post{ + UserId: 1, + Title: "Title", + Body: "", + } + err = store.Create(ctx, post) + if err == nil { + t.Error("Expected error for empty body") + } +} + +func TestFileStorage_Count(t *testing.T) { + tmpDir := t.TempDir() + filePath := filepath.Join(tmpDir, "test_posts.json") + + store, err := NewFileStorage(filePath) + if err != nil { + t.Fatalf("Failed to create file storage: %v", err) + } + defer store.Close() + + ctx := context.Background() + + // Initial count should be 0 + count, err := store.Count(ctx) + if err != nil { + t.Fatalf("Failed to count posts: %v", err) + } + if count != 0 { + t.Errorf("Expected count 0, got %d", count) + } + + // Create posts + for i := 0; i < 5; i++ { + post := &Post{ + UserId: 1, + Title: "Test", + Body: "Body", + } + _ = store.Create(ctx, post) + } + + // Count should be 5 + count, err = store.Count(ctx) + if err != nil { + t.Fatalf("Failed to count posts: %v", err) + } + if count != 5 { + t.Errorf("Expected count 5, got %d", count) + } +} + +func TestFileStorage_BatchCreate(t *testing.T) { + tmpDir := t.TempDir() + filePath := filepath.Join(tmpDir, "test_posts.json") + + store, err := NewFileStorage(filePath) + if err != nil { + t.Fatalf("Failed to create file storage: %v", err) + } + defer store.Close() + + ctx := context.Background() + + // Create batch of posts + posts := []Post{ + {Id: 1, UserId: 1, Title: "Post 1", Body: "Body 1"}, + {Id: 2, UserId: 1, Title: "Post 2", Body: "Body 2"}, + {Id: 3, UserId: 1, Title: "Post 3", Body: "Body 3"}, + } + + err = store.BatchCreate(ctx, posts) + if err != nil { + t.Fatalf("Failed to batch create posts: %v", err) + } + + // Verify count + count, _ := store.Count(ctx) + if count != 3 { + t.Errorf("Expected 3 posts, got %d", count) + } + + // Verify individual posts exist + for _, post := range posts { + retrieved, err := store.GetByID(ctx, post.Id) + if err != nil { + t.Errorf("Failed to get post %d: %v", post.Id, err) + } + if retrieved.Title != post.Title { + t.Errorf("Expected title %s, got %s", post.Title, retrieved.Title) + } + } +} + +func TestFileStorage_ConcurrentAccess(t *testing.T) { + tmpDir := t.TempDir() + filePath := filepath.Join(tmpDir, "test_posts.json") + + store, err := NewFileStorage(filePath) + if err != nil { + t.Fatalf("Failed to create file storage: %v", err) + } + defer store.Close() + + ctx := context.Background() + + // Create posts concurrently + done := make(chan bool) + for i := 0; i < 10; i++ { + go func(id int) { + post := &Post{ + UserId: id, + Title: "Concurrent Post", + Body: "Body", + } + _ = store.Create(ctx, post) + done <- true + }(i) + } + + // Wait for all goroutines + for i := 0; i < 10; i++ { + <-done + } + + // Verify all posts were created + count, _ := store.Count(ctx) + if count != 10 { + t.Errorf("Expected 10 posts, got %d", count) + } +} + +func TestFileStorage_FileNotExists(t *testing.T) { + tmpDir := t.TempDir() + filePath := filepath.Join(tmpDir, "nonexistent.json") + + // Should create the file if it doesn't exist + store, err := NewFileStorage(filePath) + if err != nil { + t.Fatalf("Failed to create file storage: %v", err) + } + defer store.Close() + + // Verify file was created + if _, err := os.Stat(filePath); os.IsNotExist(err) { + t.Error("Expected file to be created") + } +} diff --git a/internal/storage/postgres.go b/internal/storage/postgres.go new file mode 100644 index 0000000..f7a5154 --- /dev/null +++ b/internal/storage/postgres.go @@ -0,0 +1,307 @@ +package storage + +import ( + "context" + "database/sql" + "fmt" + "time" + + "Post_Analyzer_Webserver/config" + "Post_Analyzer_Webserver/internal/logger" + "Post_Analyzer_Webserver/internal/metrics" + + _ "github.com/lib/pq" +) + +// PostgresStorage implements Storage interface using PostgreSQL +type PostgresStorage struct { + db *sql.DB +} + +// NewPostgresStorage creates a new PostgreSQL storage +func NewPostgresStorage(cfg *config.DatabaseConfig) (*PostgresStorage, error) { + dsn := fmt.Sprintf( + "host=%s port=%s user=%s password=%s dbname=%s sslmode=%s", + cfg.Host, cfg.Port, cfg.User, cfg.Password, cfg.DBName, cfg.SSLMode, + ) + + db, err := sql.Open("postgres", dsn) + if err != nil { + return nil, fmt.Errorf("failed to open database: %w", err) + } + + db.SetMaxOpenConns(cfg.MaxConns) + db.SetMaxIdleConns(cfg.MinConns) + db.SetConnMaxLifetime(time.Hour) + + if err := db.Ping(); err != nil { + return nil, fmt.Errorf("failed to ping database: %w", err) + } + + ps := &PostgresStorage{db: db} + + // Initialize schema + if err := ps.initSchema(); err != nil { + return nil, fmt.Errorf("failed to initialize schema: %w", err) + } + + return ps, nil +} + +// initSchema creates the necessary database tables +func (ps *PostgresStorage) initSchema() error { + schema := ` + CREATE TABLE IF NOT EXISTS posts ( + id SERIAL PRIMARY KEY, + user_id INTEGER NOT NULL, + title VARCHAR(500) NOT NULL, + body TEXT NOT NULL, + created_at TIMESTAMP NOT NULL DEFAULT NOW(), + updated_at TIMESTAMP NOT NULL DEFAULT NOW() + ); + + CREATE INDEX IF NOT EXISTS idx_posts_user_id ON posts(user_id); + CREATE INDEX IF NOT EXISTS idx_posts_created_at ON posts(created_at DESC); + ` + + _, err := ps.db.Exec(schema) + return err +} + +// GetAll retrieves all posts +func (ps *PostgresStorage) GetAll(ctx context.Context) ([]Post, error) { + start := time.Now() + defer func() { + metrics.RecordDBOperation("get_all", "success", time.Since(start)) + }() + + query := `SELECT id, user_id, title, body, created_at, updated_at FROM posts ORDER BY created_at DESC` + + rows, err := ps.db.QueryContext(ctx, query) + if err != nil { + metrics.RecordDBOperation("get_all", "error", time.Since(start)) + logger.ErrorContext(ctx, "failed to query posts", "error", err) + return nil, err + } + defer rows.Close() + + var posts []Post + for rows.Next() { + var post Post + if err := rows.Scan(&post.Id, &post.UserId, &post.Title, &post.Body, &post.CreatedAt, &post.UpdatedAt); err != nil { + metrics.RecordDBOperation("get_all", "error", time.Since(start)) + return nil, err + } + posts = append(posts, post) + } + + if err := rows.Err(); err != nil { + metrics.RecordDBOperation("get_all", "error", time.Since(start)) + return nil, err + } + + metrics.RecordPostsTotal(len(posts)) + return posts, nil +} + +// GetByID retrieves a post by ID +func (ps *PostgresStorage) GetByID(ctx context.Context, id int) (*Post, error) { + start := time.Now() + defer func() { + metrics.RecordDBOperation("get_by_id", "success", time.Since(start)) + }() + + query := `SELECT id, user_id, title, body, created_at, updated_at FROM posts WHERE id = $1` + + var post Post + err := ps.db.QueryRowContext(ctx, query, id).Scan( + &post.Id, &post.UserId, &post.Title, &post.Body, &post.CreatedAt, &post.UpdatedAt, + ) + + if err == sql.ErrNoRows { + return nil, ErrNotFound + } + if err != nil { + metrics.RecordDBOperation("get_by_id", "error", time.Since(start)) + logger.ErrorContext(ctx, "failed to query post", "id", id, "error", err) + return nil, err + } + + return &post, nil +} + +// Create creates a new post +func (ps *PostgresStorage) Create(ctx context.Context, post *Post) error { + start := time.Now() + defer func() { + metrics.RecordDBOperation("create", "success", time.Since(start)) + }() + + if err := post.Validate(); err != nil { + return err + } + + query := ` + INSERT INTO posts (user_id, title, body, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5) + RETURNING id, created_at, updated_at + ` + + now := time.Now() + err := ps.db.QueryRowContext( + ctx, query, + post.UserId, post.Title, post.Body, now, now, + ).Scan(&post.Id, &post.CreatedAt, &post.UpdatedAt) + + if err != nil { + metrics.RecordDBOperation("create", "error", time.Since(start)) + logger.ErrorContext(ctx, "failed to create post", "error", err) + return err + } + + metrics.RecordPostAdded() + logger.InfoContext(ctx, "post created", "id", post.Id) + return nil +} + +// Update updates an existing post +func (ps *PostgresStorage) Update(ctx context.Context, post *Post) error { + start := time.Now() + defer func() { + metrics.RecordDBOperation("update", "success", time.Since(start)) + }() + + if err := post.Validate(); err != nil { + return err + } + + query := ` + UPDATE posts + SET user_id = $1, title = $2, body = $3, updated_at = $4 + WHERE id = $5 + RETURNING updated_at + ` + + err := ps.db.QueryRowContext( + ctx, query, + post.UserId, post.Title, post.Body, time.Now(), post.Id, + ).Scan(&post.UpdatedAt) + + if err == sql.ErrNoRows { + return ErrNotFound + } + if err != nil { + metrics.RecordDBOperation("update", "error", time.Since(start)) + logger.ErrorContext(ctx, "failed to update post", "id", post.Id, "error", err) + return err + } + + logger.InfoContext(ctx, "post updated", "id", post.Id) + return nil +} + +// Delete deletes a post by ID +func (ps *PostgresStorage) Delete(ctx context.Context, id int) error { + start := time.Now() + defer func() { + metrics.RecordDBOperation("delete", "success", time.Since(start)) + }() + + query := `DELETE FROM posts WHERE id = $1` + + result, err := ps.db.ExecContext(ctx, query, id) + if err != nil { + metrics.RecordDBOperation("delete", "error", time.Since(start)) + logger.ErrorContext(ctx, "failed to delete post", "id", id, "error", err) + return err + } + + rowsAffected, err := result.RowsAffected() + if err != nil { + return err + } + + if rowsAffected == 0 { + return ErrNotFound + } + + logger.InfoContext(ctx, "post deleted", "id", id) + return nil +} + +// BatchCreate creates multiple posts in a batch +func (ps *PostgresStorage) BatchCreate(ctx context.Context, posts []Post) error { + start := time.Now() + defer func() { + metrics.RecordDBOperation("batch_create", "success", time.Since(start)) + }() + + if len(posts) == 0 { + return nil + } + + tx, err := ps.db.BeginTx(ctx, nil) + if err != nil { + metrics.RecordDBOperation("batch_create", "error", time.Since(start)) + return err + } + defer func() { _ = tx.Rollback() }() + + stmt, err := tx.PrepareContext(ctx, ` + INSERT INTO posts (id, user_id, title, body, created_at, updated_at) + VALUES ($1, $2, $3, $4, $5, $6) + ON CONFLICT (id) DO NOTHING + `) + if err != nil { + metrics.RecordDBOperation("batch_create", "error", time.Since(start)) + return err + } + defer stmt.Close() + + now := time.Now() + for _, post := range posts { + createdAt := post.CreatedAt + if createdAt.IsZero() { + createdAt = now + } + updatedAt := post.UpdatedAt + if updatedAt.IsZero() { + updatedAt = now + } + + _, err := stmt.ExecContext(ctx, post.Id, post.UserId, post.Title, post.Body, createdAt, updatedAt) + if err != nil { + metrics.RecordDBOperation("batch_create", "error", time.Since(start)) + logger.ErrorContext(ctx, "failed to insert post in batch", "id", post.Id, "error", err) + return err + } + } + + if err := tx.Commit(); err != nil { + metrics.RecordDBOperation("batch_create", "error", time.Since(start)) + return err + } + + metrics.RecordPostsFetched(len(posts)) + logger.InfoContext(ctx, "batch posts created", "count", len(posts)) + return nil +} + +// Count returns the total number of posts +func (ps *PostgresStorage) Count(ctx context.Context) (int, error) { + query := `SELECT COUNT(*) FROM posts` + + var count int + err := ps.db.QueryRowContext(ctx, query).Scan(&count) + if err != nil { + logger.ErrorContext(ctx, "failed to count posts", "error", err) + return 0, err + } + + return count, nil +} + +// Close closes the database connection +func (ps *PostgresStorage) Close() error { + return ps.db.Close() +} diff --git a/internal/storage/storage.go b/internal/storage/storage.go new file mode 100644 index 0000000..6ee2169 --- /dev/null +++ b/internal/storage/storage.go @@ -0,0 +1,68 @@ +package storage + +import ( + "context" + "errors" + "time" +) + +var ( + // ErrNotFound is returned when a post is not found + ErrNotFound = errors.New("post not found") + // ErrInvalidInput is returned when input validation fails + ErrInvalidInput = errors.New("invalid input") +) + +// Post represents a post in the system +type Post struct { + UserId int `json:"userId"` + Id int `json:"id"` + Title string `json:"title"` + Body string `json:"body"` + CreatedAt time.Time `json:"createdAt,omitempty"` + UpdatedAt time.Time `json:"updatedAt,omitempty"` +} + +// Storage defines the interface for post storage operations +type Storage interface { + // GetAll retrieves all posts + GetAll(ctx context.Context) ([]Post, error) + + // GetByID retrieves a post by ID + GetByID(ctx context.Context, id int) (*Post, error) + + // Create creates a new post + Create(ctx context.Context, post *Post) error + + // Update updates an existing post + Update(ctx context.Context, post *Post) error + + // Delete deletes a post by ID + Delete(ctx context.Context, id int) error + + // BatchCreate creates multiple posts in a batch + BatchCreate(ctx context.Context, posts []Post) error + + // Count returns the total number of posts + Count(ctx context.Context) (int, error) + + // Close closes the storage connection + Close() error +} + +// Validate validates a post +func (p *Post) Validate() error { + if p.Title == "" { + return errors.New("title is required") + } + if len(p.Title) > 500 { + return errors.New("title too long (max 500 characters)") + } + if p.Body == "" { + return errors.New("body is required") + } + if len(p.Body) > 10000 { + return errors.New("body too long (max 10000 characters)") + } + return nil +} diff --git a/main.go b/main.go index cc42b3d..3eb9d74 100644 --- a/main.go +++ b/main.go @@ -1,201 +1,213 @@ package main import ( - "encoding/json" + "context" + "database/sql" "fmt" - "html/template" - "io/ioutil" "net/http" "os" - "sync" + "os/signal" + "syscall" + "time" + + "Post_Analyzer_Webserver/config" + "Post_Analyzer_Webserver/internal/api" + "Post_Analyzer_Webserver/internal/cache" + "Post_Analyzer_Webserver/internal/handlers" + "Post_Analyzer_Webserver/internal/logger" + "Post_Analyzer_Webserver/internal/metrics" + "Post_Analyzer_Webserver/internal/middleware" + "Post_Analyzer_Webserver/internal/migrations" + "Post_Analyzer_Webserver/internal/service" + "Post_Analyzer_Webserver/internal/storage" + + _ "github.com/lib/pq" ) -// Post struct to map the JSON data -type Post struct { - UserId int `json:"userId"` - Id int `json:"id"` - Title string `json:"title"` - Body string `json:"body"` -} - -// Template variables -type HomePageVars struct { - Title string - Posts []Post - CharFreq map[rune]int - Error string - HasPosts bool - HasAnalysis bool -} - -// Custom template functions -var funcMap = template.FuncMap{ - "toJSON": func(v interface{}) string { - data, _ := json.Marshal(v) - return string(data) - }, -} - -var templates = template.Must(template.New("").Funcs(funcMap).ParseFiles("home.html")) +var ( + version = "2.0.0" + buildTime = time.Now().Format(time.RFC3339) + startTime = time.Now() +) func main() { - port := os.Getenv("PORT") // Get the PORT from the environment variable - if port == "" { - port = "8080" // Fallback to 8080 if the PORT environment variable is not set - } - - http.HandleFunc("/", HomeHandler) - http.HandleFunc("/fetch", FetchPostsHandler) - http.HandleFunc("/analyze", AnalyzePostsHandler) - http.HandleFunc("/add", AddPostHandler) - - fmt.Printf("Server starting at PORT: %s\n", port) - http.ListenAndServe(":"+port, nil) -} - -// HomeHandler serves the home page -func HomeHandler(w http.ResponseWriter, r *http.Request) { - posts, err := readPostsFromFile() + // Load configuration + cfg, err := config.Load() if err != nil { - renderTemplate(w, HomePageVars{Title: "Home", Error: "Failed to read posts: " + err.Error()}) - return + fmt.Fprintf(os.Stderr, "Failed to load configuration: %v\n", err) + os.Exit(1) } - renderTemplate(w, HomePageVars{Title: "Home", Posts: posts, HasPosts: len(posts) > 0}) -} -// FetchPostsHandler fetches posts and writes them to a file -func FetchPostsHandler(w http.ResponseWriter, r *http.Request) { - posts, err := fetchPosts() - if err != nil { - renderTemplate(w, HomePageVars{Title: "Error", Error: "Failed to fetch posts: " + err.Error()}) - return + // Initialize logger + if err := logger.Init(&cfg.Logging); err != nil { + fmt.Fprintf(os.Stderr, "Failed to initialize logger: %v\n", err) + os.Exit(1) } - if err := writePostsToFile(posts); err != nil { - renderTemplate(w, HomePageVars{Title: "Error", Error: "Failed to write posts to file: " + err.Error()}) - return - } + logger.Info("starting Post Analyzer Webserver", + "version", version, + "environment", cfg.Server.Environment, + "port", cfg.Server.Port, + "database_type", cfg.Database.Type, + ) - renderTemplate(w, HomePageVars{Title: "Posts Fetched", Posts: posts, HasPosts: true}) -} - -// AnalyzePostsHandler reads the posts file and analyzes character frequency -func AnalyzePostsHandler(w http.ResponseWriter, r *http.Request) { - count, err := countCharacters("posts.json") - if err != nil { - renderTemplate(w, HomePageVars{Title: "Error", Error: "Failed to analyze posts: " + err.Error()}) - return - } - - renderTemplate(w, HomePageVars{Title: "Character Analysis", CharFreq: count, HasAnalysis: true}) -} + // Initialize storage + var store storage.Storage + var db *sql.DB -// AddPostHandler allows the user to add a new post -func AddPostHandler(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodPost { - var post Post - post.UserId = 1 - post.Id = generatePostID() - post.Title = r.FormValue("title") - post.Body = r.FormValue("body") - - posts, err := readPostsFromFile() + if cfg.Database.Type == "postgres" { + pgStore, err := storage.NewPostgresStorage(&cfg.Database) if err != nil { - renderTemplate(w, HomePageVars{Title: "Error", Error: "Failed to read posts: " + err.Error()}) - return + logger.Error("failed to initialize PostgreSQL storage", "error", err) + os.Exit(1) } - - posts = append(posts, post) - - if err := writePostsToFile(posts); err != nil { - renderTemplate(w, HomePageVars{Title: "Error", Error: "Failed to write post to file: " + err.Error()}) - return + store = pgStore + + // Get underlying DB for migrations + dsn := fmt.Sprintf( + "host=%s port=%s user=%s password=%s dbname=%s sslmode=%s", + cfg.Database.Host, cfg.Database.Port, cfg.Database.User, + cfg.Database.Password, cfg.Database.DBName, cfg.Database.SSLMode, + ) + db, err = sql.Open("postgres", dsn) + if err != nil { + logger.Error("failed to open database for migrations", "error", err) + os.Exit(1) + } + defer db.Close() + + // Run migrations + logger.Info("running database migrations...") + migrator := migrations.NewMigrator(db) + if err := migrator.Migrate(context.Background()); err != nil { + logger.Error("migration failed", "error", err) + os.Exit(1) } - renderTemplate(w, HomePageVars{Title: "Post Added", Posts: posts, HasPosts: true}) + logger.Info("using PostgreSQL storage") } else { - renderTemplate(w, HomePageVars{Title: "Add New Post"}) - } -} - -func fetchPosts() ([]Post, error) { - resp, err := http.Get("https://jsonplaceholder.typicode.com/posts") - if err != nil { - return nil, err + fileStore, err := storage.NewFileStorage(cfg.Database.FilePath) + if err != nil { + logger.Error("failed to initialize file storage", "error", err) + os.Exit(1) + } + store = fileStore + logger.Info("using file storage", "path", cfg.Database.FilePath) } - defer resp.Body.Close() + defer store.Close() - var posts []Post - if err := json.NewDecoder(resp.Body).Decode(&posts); err != nil { - return nil, err - } - return posts, nil -} + // Initialize cache + _ = cache.NewCache(cfg) // Cache initialized for future use + logger.Info("cache initialized", "type", "memory") -func writePostsToFile(posts []Post) error { - file, err := os.Create("posts.json") - if err != nil { - return err - } - defer file.Close() + // Initialize service layer + postService := service.NewPostService(store) + logger.Info("service layer initialized") - encoder := json.NewEncoder(file) - if err := encoder.Encode(posts); err != nil { - return err - } - return nil -} + // Initialize API handlers + apiHandler := api.NewAPI(postService) + apiRouter := api.NewRouter(apiHandler) + logger.Info("API handlers initialized") -func readPostsFromFile() ([]Post, error) { - data, err := ioutil.ReadFile("posts.json") + // Initialize web handlers + webHandlers, err := handlers.New(store, cfg) if err != nil { - return nil, err + logger.Error("failed to initialize web handlers", "error", err) + os.Exit(1) } - - var posts []Post - if err := json.Unmarshal(data, &posts); err != nil { - return nil, err + defer webHandlers.Close() + + // Setup HTTP router + mux := http.NewServeMux() + + // Health and monitoring endpoints + mux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { + uptime := time.Since(startTime) + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{"status":"healthy","version":"%s","uptime":"%s","timestamp":"%s"}`, + version, uptime, time.Now().Format(time.RFC3339)) + }) + mux.HandleFunc("/readiness", webHandlers.Readiness) + mux.Handle("/metrics", metrics.Handler()) + + // API endpoints (v1) + mux.Handle("/api/", apiRouter) + mux.Handle("/api/v1/", apiRouter) + + // Web interface endpoints + mux.HandleFunc("/", webHandlers.Home) + mux.HandleFunc("/fetch", webHandlers.FetchPosts) + mux.HandleFunc("/analyze", webHandlers.AnalyzePosts) + mux.HandleFunc("/add", webHandlers.AddPost) + + // Serve static assets + mux.Handle("/assets/", http.StripPrefix("/assets/", http.FileServer(http.Dir("assets")))) + + // Create rate limiter + rateLimiter := middleware.NewRateLimiter( + cfg.Security.RateLimitRequests, + cfg.Security.RateLimitWindow, + ) + + // Apply middleware chain + handler := middleware.Chain( + middleware.RequestID, + middleware.Logging, + middleware.Recovery, + middleware.SecurityHeaders, + middleware.CORS(cfg.Security.AllowedOrigins), + rateLimiter.Middleware, + middleware.MaxBodySize(cfg.Security.MaxBodySize), + middleware.Compression, + metrics.Middleware, + )(mux) + + // Create HTTP server with production settings + server := &http.Server{ + Addr: fmt.Sprintf("%s:%s", cfg.Server.Host, cfg.Server.Port), + Handler: handler, + ReadTimeout: cfg.Server.ReadTimeout, + WriteTimeout: cfg.Server.WriteTimeout, + IdleTimeout: cfg.Server.IdleTimeout, } - return posts, nil -} -func countCharacters(filePath string) (map[rune]int, error) { - data, err := ioutil.ReadFile(filePath) - if err != nil { - return nil, err - } + // Start server in a goroutine + go func() { + logger.Info("server listening", + "address", server.Addr, + "read_timeout", cfg.Server.ReadTimeout, + "write_timeout", cfg.Server.WriteTimeout, + ) + logger.Info("endpoints available", + "web", fmt.Sprintf("http://%s:%s/", cfg.Server.Host, cfg.Server.Port), + "api", fmt.Sprintf("http://%s:%s/api/v1/posts", cfg.Server.Host, cfg.Server.Port), + "health", fmt.Sprintf("http://%s:%s/health", cfg.Server.Host, cfg.Server.Port), + "metrics", fmt.Sprintf("http://%s:%s/metrics", cfg.Server.Host, cfg.Server.Port), + ) + + if err := server.ListenAndServe(); err != nil && err != http.ErrServerClosed { + logger.Error("server failed to start", "error", err) + os.Exit(1) + } + }() - charCount := make(map[rune]int) - mu := sync.Mutex{} - wg := sync.WaitGroup{} - - for _, byteValue := range string(data) { - wg.Add(1) - go func(c rune) { - defer wg.Done() - mu.Lock() - charCount[c]++ - mu.Unlock() - }(rune(byteValue)) - } + // Wait for interrupt signal for graceful shutdown + quit := make(chan os.Signal, 1) + signal.Notify(quit, os.Interrupt, syscall.SIGTERM) + <-quit - wg.Wait() - return charCount, nil -} + logger.Info("shutting down server gracefully...") -func renderTemplate(w http.ResponseWriter, vars HomePageVars) { - if err := templates.ExecuteTemplate(w, "home.html", vars); err != nil { - http.Error(w, "Failed to render template", http.StatusInternalServerError) - } -} + // Create shutdown context with timeout + ctx, cancel := context.WithTimeout(context.Background(), cfg.Server.ShutdownTimeout) + defer cancel() -func generatePostID() int { - posts, _ := readPostsFromFile() - maxID := 0 - for _, post := range posts { - if post.Id > maxID { - maxID = post.Id - } + // Attempt graceful shutdown + if err := server.Shutdown(ctx); err != nil { + logger.Error("server forced to shutdown", "error", err) + os.Exit(1) } - return maxID + 1 + + logger.Info("server stopped gracefully") } diff --git a/prometheus.yml b/prometheus.yml new file mode 100644 index 0000000..9035715 --- /dev/null +++ b/prometheus.yml @@ -0,0 +1,12 @@ +global: + scrape_interval: 15s + evaluation_interval: 15s + external_labels: + monitor: 'post-analyzer-monitor' + +scrape_configs: + - job_name: 'post-analyzer' + static_configs: + - targets: ['app:8080'] + metrics_path: '/metrics' + scrape_interval: 10s diff --git a/scripts/setup.sh b/scripts/setup.sh new file mode 100755 index 0000000..a484772 --- /dev/null +++ b/scripts/setup.sh @@ -0,0 +1,135 @@ +#!/bin/bash + +# Post Analyzer Webserver - Setup Script +# This script helps set up the development environment + +set -e + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' # No Color + +echo -e "${BLUE}╔══════════════════════════════════════════════════════╗${NC}" +echo -e "${BLUE}║ Post Analyzer Webserver - Setup Script ║${NC}" +echo -e "${BLUE}╚══════════════════════════════════════════════════════╝${NC}" +echo "" + +# Check Go installation +echo -e "${YELLOW}→${NC} Checking Go installation..." +if ! command -v go &> /dev/null; then + echo -e "${RED}✗${NC} Go is not installed. Please install Go 1.21 or higher." + echo -e " Download from: https://golang.org/dl/" + exit 1 +fi + +GO_VERSION=$(go version | awk '{print $3}' | sed 's/go//') +echo -e "${GREEN}✓${NC} Go ${GO_VERSION} is installed" + +# Check if Docker is installed (optional) +echo -e "${YELLOW}→${NC} Checking Docker installation..." +if command -v docker &> /dev/null; then + DOCKER_VERSION=$(docker --version | awk '{print $3}' | sed 's/,//') + echo -e "${GREEN}✓${NC} Docker ${DOCKER_VERSION} is installed" + HAS_DOCKER=true +else + echo -e "${YELLOW}!${NC} Docker is not installed (optional)" + HAS_DOCKER=false +fi + +# Check if Docker Compose is installed (optional) +if [ "$HAS_DOCKER" = true ]; then + echo -e "${YELLOW}→${NC} Checking Docker Compose installation..." + if command -v docker-compose &> /dev/null; then + COMPOSE_VERSION=$(docker-compose --version | awk '{print $4}' | sed 's/,//') + echo -e "${GREEN}✓${NC} Docker Compose ${COMPOSE_VERSION} is installed" + HAS_COMPOSE=true + else + echo -e "${YELLOW}!${NC} Docker Compose is not installed (optional)" + HAS_COMPOSE=false + fi +fi + +# Install Go dependencies +echo "" +echo -e "${YELLOW}→${NC} Installing Go dependencies..." +go mod download +go mod tidy +echo -e "${GREEN}✓${NC} Dependencies installed" + +# Create .env file if it doesn't exist +echo "" +echo -e "${YELLOW}→${NC} Setting up environment configuration..." +if [ ! -f .env ]; then + if [ -f .env.example ]; then + cp .env.example .env + echo -e "${GREEN}✓${NC} Created .env from .env.example" + else + echo -e "${YELLOW}!${NC} .env.example not found, creating default .env" + cat > .env <