diff --git a/.gitattributes b/.gitattributes
index a4b8b2b..bd3bd88 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -1,3 +1,5 @@
# Auto detect text files and perform LF normalization
* text=lf
-*.js linguist-detect=false
\ No newline at end of file
+*.js linguist-detect=false
+.vscode/*.json linguist-language=JSON-with-Comments
+*.cjs linguist-vendored
\ No newline at end of file
diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml
deleted file mode 100644
index 16bd3ea..0000000
--- a/.github/workflows/commit.yml
+++ /dev/null
@@ -1,54 +0,0 @@
-name: Code Check on Push
-
-on:
- push:
- branches-ignore:
- - main
-
-jobs:
- frontend-check:
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup Node.js
- uses: actions/setup-node@v4
- with:
- node-version: "20"
-
- - name: Enable Corepack
- run: corepack enable
-
- - name: Install Yarn 2
- run: yarn set version berry
- working-directory: ./frontend
-
- - name: Install dependencies
- run: yarn install
- working-directory: ./frontend
-
- - name: Run linter
- run: yarn run lint
- working-directory: ./frontend
-
- - name: Build
- run: yarn run build
- working-directory: ./frontend
-
- backend-check:
- runs-on: ubuntu-latest
-
- steps:
- - uses: actions/checkout@v4
-
- - name: Setup PDM
- uses: pdm-project/setup-pdm@v4
-
- - name: Install dependencies
- working-directory: ./backend
- run: pdm install --dev
-
- - name: Run linter
- working-directory: ./backend
- run: pdm run ruff .
\ No newline at end of file
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
index e0e886e..3531989 100644
--- a/.github/workflows/main.yml
+++ b/.github/workflows/main.yml
@@ -2,7 +2,7 @@ name: Code Check on Release
on:
push:
- branches: [main]
+ branches: [main, rc]
jobs:
frontend-check:
@@ -99,21 +99,24 @@ jobs:
- uses: actions/checkout@v4
with:
fetch-depth: 0
-
- - name: Build Docker image
- run: docker build -t antonk0/arq-ui:latest .
+ - name: Set up QEMU
+ uses: docker/setup-qemu-action@v3
+
+ - name: Set up Docker Buildx
+ uses: docker/setup-buildx-action@v3
+
+ - name: Install Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: "20"
+
- name: Download build artifact
uses: actions/download-artifact@v4
with:
name: build-archive
path: ./
-
- - name: Install semantic release
- uses: actions/setup-node@v4
- with:
- node-version: "20"
-
+
- name: Install dependencies
run: npm ci
@@ -121,5 +124,5 @@ jobs:
run: npm run release
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
- DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
\ No newline at end of file
+ DOCKER_REGISTRY_USER: ${{ secrets.DOCKER_USERNAME }}
+ DOCKER_REGISTRY_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
\ No newline at end of file
diff --git a/.github/workflows/pr.yml b/.github/workflows/pr.yml
index 58db9da..3e991e9 100644
--- a/.github/workflows/pr.yml
+++ b/.github/workflows/pr.yml
@@ -2,9 +2,56 @@ name: Code Check on Push
on:
pull_request:
- branches: [main]
+ branches: [main, rc]
jobs:
+ frontend-check:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Setup Node.js
+ uses: actions/setup-node@v4
+ with:
+ node-version: "20"
+
+ - name: Enable Corepack
+ run: corepack enable
+
+ - name: Install Yarn 2
+ run: yarn set version berry
+ working-directory: ./frontend
+
+ - name: Install dependencies
+ run: yarn install
+ working-directory: ./frontend
+
+ - name: Run linter
+ run: yarn run lint
+ working-directory: ./frontend
+
+ - name: Build
+ run: yarn run build
+ working-directory: ./frontend
+
+ backend-check:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+
+ - name: Setup PDM
+ uses: pdm-project/setup-pdm@v4
+
+ - name: Install dependencies
+ working-directory: ./backend
+ run: pdm install --dev
+
+ - name: Run linter
+ working-directory: ./backend
+ run: pdm run ruff .
+
docker-build:
runs-on: ubuntu-latest
diff --git a/.gitignore b/.gitignore
index 68bc17f..737f721 100644
--- a/.gitignore
+++ b/.gitignore
@@ -158,3 +158,5 @@ cython_debug/
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
+/node_modules
+
diff --git a/.releaserc b/.releaserc
index 7e24d74..313aba8 100644
--- a/.releaserc
+++ b/.releaserc
@@ -1,6 +1,10 @@
{
"branches": [
- "main"
+ "main",
+ {
+ "name": "rc",
+ "prerelease": true
+ }
],
"plugins": [
"@semantic-release/commit-analyzer",
@@ -18,9 +22,20 @@
}
],
[
- "@semantic-release-plus/docker",
+ "@codedependant/semantic-release-docker",
{
- "name": "antonk0/arq-ui"
+ "dockerTags": [
+ "{{#if prerelease.[0]}}{{prerelease.[0]}}{{else}}latest{{/if}}",
+ "{{major}}-{{#if prerelease.[0]}}{{prerelease.[0]}}{{else}}latest{{/if}}",
+ "{{major}}.{{minor}}-{{#if prerelease.[0]}}{{prerelease.[0]}}{{else}}latest{{/if}}",
+ "{{version}}"
+ ],
+ "dockerImage": "arq-ui",
+ "dockerProject": "antonk0",
+ "dockerPlatform": [
+ "linux/amd64",
+ "linux/arm64"
+ ]
}
]
]
diff --git a/README.md b/README.md
index e024e7b..14cd4b0 100644
--- a/README.md
+++ b/README.md
@@ -1,5 +1,93 @@
# arq-ui
-View and manage tasks running through arq
+
+This project is a simple interface for monitoring jobs executed using the [arq](https://github.com/samuelcolvin/arq) library.
+
+![arq UI](./docs/screenshot1.png)
+
+## Features
+
+- Providing statistics on job statuses: total number, number of jobs in queue, jobs being executed, and jobs with errors.
+- Visualization of jobs distribution over time in the form of a timeline.
+- Displaying a list of jobs with filtering, searching, and sorting capabilities.
+- Ability to abort jobs.
+
+## Limitations
+
+- The interface reads data directly from Redis, not storing the state of jobs, which allows tracking tasks only for the last hour. There are plans to add functionality for permanent data storage.
+- `arq` uses the `pickle` library for data serialization, which limits the ability to deserialize objects of custom classes that are not accessible. If `arq-ui` cannot deserialize the data, tasks with such data will not be displayed in the monitoring.
+- To enable job cancellation, the worker must be configured with the `allow_abort_jobs` parameter.
+- Handling a large number of jobs (more than 50,000) may decrease the application's performance.
+- The interface does not have built-in security features, therefore it is not recommended to be exposed to the public without additional security measures.
+
+
+## Demo
+
+[https://demo.arq-ui.florm.io/arq/ui/](https://demo.arq-ui.florm.io/arq/ui/)
+
+## Installation and Launch
+
+### Using Docker
+
+To launch:
+
+```bash
+docker pull antonk0/arq-ui
+docker run -p 8000:8000 -e REDIS_HOST=redis.host.com -e REDIS_PORT=6379 antonk0/arq-ui
+```
+
+docker-compose.yml
+
+```yaml
+version: '3'
+services:
+ arq-ui:
+ image: antonk0/arq-ui:latest
+ restart: unless-stopped
+ environment:
+ - REDIS_HOST=redis.host.com
+ - REDIS_PASSWORD=your_password
+ - REDIS_PORT=6379
+ - REDIS_SSL=False
+ - REDIS_DB=0
+ ports:
+ - 8000:8000
+```
+
+### Running from Source
+
+Download the latest stable release from the GitHub releases, unzip it, and execute the following commands:
+
+```bash
+pdm install
+cd src
+pdm run uvicorn main:app --host=0.0.0.0 --port=8000
+```
+
+If `pdm` is not installed, you can install it using `pip`:
+
+```bash
+pip install pdm
+```
+
+## Configuration
+
+The interface is accessible at `/arq/ui/`. Swagger API documentation is available at `/arq/api/docs/`.
+
+Possible environment variables:
+
+| Variable | Description | Default Value |
+| -------- | ----------- | ------------- |
+| `REDIS_HOST` | Address of the Redis server | `redis` |
+| `REDIS_PORT` | Port of the Redis server | `6379` |
+| `REDIS_PASSWORD` | Password for connecting to Redis | `""` (no password) |
+| `REDIS_SSL` | Whether to use SSL for connecting to Redis | `False` |
+| `REDIS_DB` | Redis database number | `0` |
+| `MAX_JOBS` | Maximum number of tasks that can be displayed in the interface | `50000` |
+| `REQUEST_SEMAPHORE_JOBS` | Number of tasks that can be requested simultaneously | `5` |
+| `QUEUE_NAME` | Name of the queue in Redis | `arq:queue` |
+
+## Development
+
```
docker compose -f docker-compose.dev.yml build
diff --git a/backend/src/core/config.py b/backend/src/core/config.py
index 027ab72..5f5cf64 100644
--- a/backend/src/core/config.py
+++ b/backend/src/core/config.py
@@ -36,6 +36,7 @@ class Settings(BaseSettings):
max_jobs: int = 50000
request_semaphore_jobs: int = 5
+ queue_name: str = "arq:queue"
model_config = SettingsConfigDict(env_file=os.getenv("ENV_FILE", ".env"))
diff --git a/backend/src/endpoints/jobs.py b/backend/src/endpoints/jobs.py
index 2c458ee..65d8901 100644
--- a/backend/src/endpoints/jobs.py
+++ b/backend/src/endpoints/jobs.py
@@ -249,14 +249,8 @@ async def get_hourly_statistics() -> list[JobsTimeStatistics]:
)
async def create_job(new_job: JobCreate) -> Job:
"""Create job."""
- job_service = JobService(
- get_redis_settings(),
- get_lru_cache(),
- )
- job = await job_service.create_job(new_job)
- if not job:
- raise HTTPException(
- status_code=500,
- detail="Failed to create a job.",
+ logger.info(f"Creating job with function {new_job.function}.")
+ raise HTTPException(
+ status_code=501,
+ detail="Not implemented",
)
- return job
diff --git a/backend/src/schemas/job.py b/backend/src/schemas/job.py
index f461de7..b1c19a7 100644
--- a/backend/src/schemas/job.py
+++ b/backend/src/schemas/job.py
@@ -83,45 +83,54 @@ class Job(BaseModel):
description="Unique identifier for the job",
examples=["f0c9d0944f1b4763b261ab5d49581321"],
)
+
status: JobStatus = Field(
default=JobStatus.not_found,
description="Status of the job",
examples=["complete"],
)
+
success: bool = Field(
default=False,
description="Indicates whether the job was successfully completed",
examples=[True],
)
+
enqueue_time: datetime = Field(
description="Date and time when the job was enqueued",
examples=["2024-03-24T17:32:30.587000+00:00"],
)
+
result: str | None = Field(
default=None,
description="Result string of the job",
examples=["ok"],
)
+
start_time: datetime | None = Field(
default=None,
description="Date and time when the job was started",
examples=["2024-03-24T17:32:30.587000+00:00"],
)
+
finish_time: datetime | None = Field(
default=None,
description="Date and time when the job was finished",
examples=["2024-03-24T17:32:30.587000+00:00"],
)
+
queue_name: str | None = Field(
default=None,
description="Name of the queue in which the job was enqueued",
examples=["arq:queue"],
)
- execution_duration: int | None = Field(
+
+ execution_duration: float | None = Field(
default=None,
- description="Duration of the job execution in seconds",
+ description="Duration of the job execution in seconds, including milliseconds",
examples=[0],
)
+
function: str = Field(
...,
description="Name of the function that was executed by the job",
@@ -158,20 +167,26 @@ class JobCreate(BaseModel):
"""Represents a job creation request."""
function: str = Field(..., description="Name of the function to execute")
+
args: list[Any] = Field(
default_factory=list,
description="Positional arguments of the function",
)
+
job_id: str | None = Field(None, description="Job identifier")
+
queue_name: str | None = Field(None, description="Name of the queue")
+
defer_until: datetime | None = Field(
None,
description="Postpone execution until the specified date/time",
)
+
expires: timedelta | None = Field(
None,
description="Set the expiration time for the job",
)
+
kwargs: dict[str, Any] = Field(
default_factory=dict,
description="Named arguments of the function",
diff --git a/backend/src/services/job_service.py b/backend/src/services/job_service.py
index 47c8ae5..5ce6ae4 100644
--- a/backend/src/services/job_service.py
+++ b/backend/src/services/job_service.py
@@ -57,7 +57,7 @@ async def fetch_job_info(
"",
)
- arq_job = ArqJob(key_id_without_prefix, redis)
+ arq_job = ArqJob(key_id_without_prefix, redis, _queue_name=settings.queue_name)
status = await arq_job.status()
if status == arq.jobs.JobStatus.complete:
@@ -82,7 +82,7 @@ async def fetch_job_info(
start_time=job_result.start_time.replace(tzinfo=ZoneInfo(settings.timezone)),
finish_time=job_result.finish_time.replace(tzinfo=ZoneInfo(settings.timezone)),
queue_name=job_result.queue_name,
- execution_duration=int(
+ execution_duration=float(
(job_result.finish_time - job_result.start_time).total_seconds(),
),
)
@@ -121,12 +121,12 @@ async def get_all_jobs(self, max_jobs: int = 50000) -> list[Job]:
jobs_task = await asyncio.gather(*tasks)
jobs: list[Job] = [job for job in jobs_task if job is not None]
- twenty_four_hours_ago = datetime.now(UTC) - timedelta(hours=1)
+ one_hour_ago = datetime.now(UTC) - timedelta(hours=1)
return [
job
for job in jobs
- if job.enqueue_time >= twenty_four_hours_ago
- or (job.start_time and job.start_time >= twenty_four_hours_ago)
+ if job.enqueue_time >= one_hour_ago
+ or (job.start_time and job.start_time >= one_hour_ago)
]
async def get_job_by_id(self, job_id: str) -> Job | None:
@@ -142,7 +142,7 @@ async def get_job_by_id(self, job_id: str) -> Job | None:
async def abort_job(self, job_id: str) -> bool:
"""Abort job."""
redis = await create_pool(self.redis_settings)
- job: ArqJob = ArqJob(job_id, redis)
+ job: ArqJob = ArqJob(job_id, redis, _queue_name=settings.queue_name)
return await job.abort()
def adjust_color_intensity(self, color_intensity: float) -> float:
@@ -160,27 +160,30 @@ def generate_statistics(self, jobs_list: list[Job]) -> list[JobsTimeStatistics]:
"""Generate statistics for jobs."""
# TODO: This function is too complex. It should be refactored.
max_time_diff = 60
+ now_fixed = datetime.now(UTC).replace(second=0, microsecond=0)
- one_hour_ago = datetime.now(UTC).replace(second=0, microsecond=0) - timedelta(hours=1)
- current_minute = (datetime.now(UTC) - one_hour_ago).total_seconds() // 60
+ one_hour_ago = now_fixed - timedelta(hours=1) + timedelta(minutes=1)
+ current_minute = (now_fixed - one_hour_ago).total_seconds() // max_time_diff
statistics = [
JobsTimeStatistics(date=(one_hour_ago + timedelta(minutes=i)))
for i in range(max_time_diff)
]
for job in jobs_list:
- created_diff = (job.enqueue_time - one_hour_ago).total_seconds() // 60
+ created_diff = (job.enqueue_time - one_hour_ago).total_seconds() // max_time_diff
if 0 <= created_diff < max_time_diff:
statistics[int(created_diff)].total_created += 1
if job.status == JobStatus.in_progress and job.start_time:
- start_diff = int((job.start_time - one_hour_ago).total_seconds() // 60)
+ start_diff = int((job.start_time - one_hour_ago).total_seconds() // max_time_diff)
for i in range(start_diff, min(int(current_minute) + 1, max_time_diff)):
statistics[i].total_in_progress += 1
if job.status == JobStatus.complete and job.start_time and job.finish_time:
- start_diff = int((job.start_time - one_hour_ago).total_seconds() // 60)
- finish_diff = int((job.finish_time - one_hour_ago).total_seconds() // 60)
+ start_diff = int((job.start_time - one_hour_ago).total_seconds() // max_time_diff)
+ finish_diff = int(
+ (job.finish_time - one_hour_ago).total_seconds() // max_time_diff,
+ )
for i in range(start_diff, min(finish_diff + 1, max_time_diff)):
statistics[i].total_in_progress += 1
if finish_diff < max_time_diff:
diff --git a/backend/src/worker/models.py b/backend/src/worker/models.py
new file mode 100644
index 0000000..2c9d6da
--- /dev/null
+++ b/backend/src/worker/models.py
@@ -0,0 +1,42 @@
+from pydantic import BaseModel
+
+class FuelSystemStatus(BaseModel):
+ fuel_level: float
+ pumps_status: str
+
+class NavigationSystemDiagnostic(BaseModel):
+ system_status: str
+ error_code: int | None
+
+class LifeSupportSystemTestResult(BaseModel):
+ air_quality: str
+ water_supply_status: str
+
+class CommunicationSystemCheck(BaseModel):
+ signal_strength: float
+ connection_status: str
+ satellite_link_quality: float
+ bandwidth: float
+ latency: float
+ error_rate: float
+ frequency_stability: str
+ encryption_status: str
+ backup_system_status: str
+ hardware_integrity: str
+ software_version: str
+ last_maintenance_date: str
+
+class LaunchReadinessAnalysis(BaseModel):
+ readiness_status: str
+ fuel_system_ready: bool
+ navigation_system_ready: bool
+ life_support_system_ready: bool
+ communication_system_ready: bool
+ propulsion_system_status: str
+ structural_integrity: str
+ onboard_computer_status: str
+ launch_pad_systems_ready: bool
+ weather_conditions: str
+ crew_readiness: bool
+ issues: list[str] = []
+ final_inspection_completed: bool
\ No newline at end of file
diff --git a/backend/src/worker/task_creator.py b/backend/src/worker/task_creator.py
index 650cc8b..0af03fe 100644
--- a/backend/src/worker/task_creator.py
+++ b/backend/src/worker/task_creator.py
@@ -1,36 +1,87 @@
import asyncio
import logging
import random
-from datetime import datetime
+import sys
-from arq import create_pool
-from arq.connections import RedisSettings
+sys.path.insert(0, "/app/src")
+from arq import ArqRedis, create_pool
+from core.config import Settings, get_app_settings
+from core.depends import get_redis_settings
logger = logging.getLogger(__name__)
+settings: Settings = get_app_settings()
-def get_redis_settings() -> RedisSettings:
- return RedisSettings(host="redis")
+async def enqueue_job(redis_pool: ArqRedis, is_successful: bool):
+ await redis_pool.enqueue_job(
+ random.choice(
+ [
+ "check_fuel_system",
+ "diagnose_navigation_system",
+ "test_life_support_system",
+ "check_communication_system",
+ "analyze_launch_readiness",
+ ],
+ ),
+ is_successful=is_successful,
+ )
+async def pattern_hill(redis_pool: ArqRedis, max_tasks_per_minute: int):
+ total_minutes = 10
+ for minute in range(1, total_minutes + 1):
+ num_tasks = (
+ round(max_tasks_per_minute * (minute / 5))
+ if minute <= 5
+ else round(max_tasks_per_minute * (2 - minute / 5))
+ )
+ for _ in range(num_tasks):
+ await enqueue_job(redis_pool, True)
+
+ if minute < total_minutes:
+ await asyncio.sleep(60)
-async def create_tasks() -> None:
- print("Start creating tasks")
+async def pattern_increasing_errors(redis_pool: ArqRedis, max_tasks_per_minute: int):
+ total_minutes = 5
+ for minute in range(1, total_minutes + 1):
+ num_tasks = random.randint(1, max_tasks_per_minute)
+ error_ratio = minute / 5
+ for _ in range(num_tasks):
+ is_successful = random.random() > error_ratio
+ await enqueue_job(redis_pool, is_successful)
+ if minute < total_minutes:
+ await asyncio.sleep(60)
- redis = await create_pool(get_redis_settings())
- while True:
- num_tasks = random.randint(1, 5)
- print(f"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S')}] Creating {num_tasks} tasks")
+async def pattern_peak(redis_pool: ArqRedis, max_tasks_per_minute: int):
+ total_minutes = 5
+ for minute in range(1, total_minutes + 1):
+ num_tasks = round(max_tasks_per_minute * (2 * abs(3 - minute) / 5))
for _ in range(num_tasks):
- await redis.enqueue_job(
- "process_task",
- func="Dummy Function",
- success=random.choice([True, False]),
- )
- await asyncio.sleep(60)
+ await enqueue_job(redis_pool, True)
+
+ if minute < total_minutes:
+ await asyncio.sleep(60)
+async def pattern_random_distribution(redis_pool: ArqRedis, max_tasks_per_minute: int):
+ total_minutes = 10
+ for _ in range(total_minutes):
+ num_tasks = random.randint(0, max_tasks_per_minute)
+ for _ in range(num_tasks):
+ await enqueue_job(redis_pool, True)
+
+ if _ < total_minutes - 1:
+ await asyncio.sleep(60)
+
+
+async def main():
+ redis_pool = await create_pool(get_redis_settings(), default_queue_name=settings.queue_name)
+
+ while True:
+ await pattern_hill(redis_pool, 20)
+ await pattern_random_distribution(redis_pool, 20)
+ await pattern_increasing_errors(redis_pool, 20)
+ await pattern_random_distribution(redis_pool, 20)
+ await pattern_peak(redis_pool, 20)
+
if __name__ == "__main__":
- try:
- asyncio.run(create_tasks())
- except Exception as e:
- print(f"Error occurred: {e}")
+ asyncio.run(main())
diff --git a/backend/src/worker/worker.py b/backend/src/worker/worker.py
index c728306..8d85cf0 100644
--- a/backend/src/worker/worker.py
+++ b/backend/src/worker/worker.py
@@ -1,38 +1,112 @@
import asyncio
+import logging
import random
-from typing import Any
+import sys
-from arq import Retry, run_worker
-from arq.connections import RedisSettings
-from pydantic import BaseModel
+sys.path.insert(0, '/app/src')
+from core.depends import get_redis_settings
+from models import CommunicationSystemCheck, FuelSystemStatus, LaunchReadinessAnalysis, LifeSupportSystemTestResult, NavigationSystemDiagnostic
+from typing import Any
+from arq import run_worker
+from core.config import Settings, get_app_settings
+from functools import wraps
+import asyncio
-def get_redis_settings() -> RedisSettings:
- return RedisSettings(host="redis")
+logger = logging.getLogger(__name__)
+settings: Settings = get_app_settings()
+def sleep_decorator(func):
+ @wraps(func)
+ async def wrapper(*args, **kwargs):
+ await asyncio.sleep(random.uniform(0.1, 20))
+ return await func(*args, **kwargs)
+ return wrapper
-class TaskResult(BaseModel):
- status: str
- message: str = ""
+@sleep_decorator
+async def check_fuel_system(ctx: dict[str, Any], is_successful: bool) -> str:
+ """Check fuel system status."""
+ if is_successful:
+ return FuelSystemStatus(
+ fuel_level=random.uniform(0, 100),
+ pumps_status=random.choice(["active", "inactive"]),
+ ).model_dump_json()
+ raise Exception("Fuel system check failed")
+@sleep_decorator
+async def diagnose_navigation_system(ctx: dict[str, Any], is_successful: bool) -> str:
+ """Diagnose navigation system."""
+ if is_successful:
+ return NavigationSystemDiagnostic(
+ system_status=random.choice(["ok", "error"]),
+ error_code=random.randint(0, 100) if random.random() > 0.5 else None,
+ ).model_dump_json()
+ raise Exception("Navigation system diagnosis failed")
-async def process_task(ctx: dict[str, Any], func: str, success: bool) -> str:
- print(f"Processing task with function: {func} and expected success: {success}")
- await asyncio.sleep(random.randint(1, 15))
- if success:
- print("Task processed successfully")
- return TaskResult(status="ok", message="Task processed successfully").json()
- else:
- print("Task processing failed")
- # raise Exception("Task processing failed")
- raise Retry(10)
+@sleep_decorator
+async def test_life_support_system(ctx: dict[str, Any], is_successful: bool) -> str:
+ """Test life support system."""
+ if is_successful:
+ return LifeSupportSystemTestResult(
+ air_quality=random.choice(["good", "bad"]),
+ water_supply_status=random.choice(["ok", "error"]),
+ ).model_dump_json()
+ raise Exception("Life support system test failed")
+@sleep_decorator
+async def check_communication_system(ctx: dict[str, Any], is_successful: bool) -> str:
+ """Check communication system."""
+ if is_successful:
+ return CommunicationSystemCheck(
+ signal_strength=random.uniform(0, 100),
+ connection_status=random.choice(["ok", "error"]),
+ satellite_link_quality=random.uniform(0, 100),
+ bandwidth=random.uniform(0, 100),
+ latency=random.uniform(0, 100),
+ error_rate=random.uniform(0, 100),
+ frequency_stability=random.choice(["ok", "error"]),
+ encryption_status=random.choice(["ok", "error"]),
+ backup_system_status=random.choice(["active", "inactive"]),
+ hardware_integrity=random.choice(["ok", "error"]),
+ software_version="1.0",
+ last_maintenance_date="2026-01-01",
+ ).model_dump_json()
+ raise Exception("Communication system check failed")
+@sleep_decorator
+async def analyze_launch_readiness(ctx: dict[str, Any], is_successful: bool) -> str:
+ """Analyze launch readiness."""
+ if is_successful:
+ return LaunchReadinessAnalysis(
+ readiness_status=random.choice(["ready", "not ready"]),
+ fuel_system_ready=random.choice([True, False]),
+ navigation_system_ready=random.choice([True, False]),
+ life_support_system_ready=random.choice([True, False]),
+ communication_system_ready=random.choice([True, False]),
+ propulsion_system_status=random.choice(["ok", "error"]),
+ structural_integrity=random.choice(["ok", "error"]),
+ onboard_computer_status=random.choice(["ok", "error"]),
+ launch_pad_systems_ready=random.choice([True, False]),
+ weather_conditions=random.choice(["good", "bad"]),
+ crew_readiness=random.choice([True, False]),
+ issues=["Issue 1", "Issue 2"] if random.random() > 0.5 else [],
+ final_inspection_completed=random.choice([True, False]),
+ ).model_dump_json()
+ raise Exception("Launch readiness analysis failed")
+
class WorkerSettings:
redis_settings = get_redis_settings()
- functions = [process_task]
+ functions = [
+ check_fuel_system,
+ diagnose_navigation_system,
+ test_life_support_system,
+ check_communication_system,
+ analyze_launch_readiness
+ ]
max_tries = 1
+ queue_name = settings.queue_name
+ allow_abort_jobs = True
if __name__ == "__main__":
- run_worker(WorkerSettings)
+ run_worker(WorkerSettings)
\ No newline at end of file
diff --git a/docs/README_en.md b/docs/README_en.md
new file mode 100644
index 0000000..b22e0b5
--- /dev/null
+++ b/docs/README_en.md
@@ -0,0 +1,103 @@
+# arq-ui
+
+This project is a simple interface for monitoring jobs executed using the [arq](https://github.com/samuelcolvin/arq) library.
+
+![arq UI](./screenshot1.png)
+
+## Features
+
+- Providing statistics on job statuses: total number, number of jobs in queue, jobs being executed, and jobs with errors.
+- Visualization of jobs distribution over time in the form of a timeline.
+- Displaying a list of jobs with filtering, searching, and sorting capabilities.
+- Ability to abort jobs.
+
+## Limitations
+
+- The interface reads data directly from Redis, not storing the state of jobs, which allows tracking tasks only for the last hour. There are plans to add functionality for permanent data storage.
+- `arq` uses the `pickle` library for data serialization, which limits the ability to deserialize objects of custom classes that are not accessible. If `arq-ui` cannot deserialize the data, tasks with such data will not be displayed in the monitoring.
+- To enable job cancellation, the worker must be configured with the `allow_abort_jobs` parameter.
+- Handling a large number of jobs (more than 50,000) may decrease the application's performance.
+- The interface does not have built-in security features, therefore it is not recommended to be exposed to the public without additional security measures.
+
+
+## Demo
+
+[https://demo.arq-ui.florm.io/arq/ui/](https://demo.arq-ui.florm.io/arq/ui/)
+
+## Installation and Launch
+
+### Using Docker
+
+To launch:
+
+```bash
+docker pull antonk0/arq-ui
+docker run -p 8000:8000 -e REDIS_HOST=redis.host.com -e REDIS_PORT=6379 antonk0/arq-ui
+```
+
+docker-compose.yml
+
+```yaml
+version: '3'
+services:
+ arq-ui:
+ image: antonk0/arq-ui:latest
+ restart: unless-stopped
+ environment:
+ - REDIS_HOST=redis.host.com
+ - REDIS_PASSWORD=your_password
+ - REDIS_PORT=6379
+ - REDIS_SSL=False
+ - REDIS_DB=0
+ ports:
+ - 8000:8000
+```
+
+### Running from Source
+
+Download the latest stable release from the GitHub releases, unzip it, and execute the following commands:
+
+```bash
+pdm install
+cd src
+pdm run uvicorn main:app --host=0.0.0.0 --port=8000
+```
+
+If `pdm` is not installed, you can install it using `pip`:
+
+```bash
+pip install pdm
+```
+
+## Configuration
+
+The interface is accessible at `/arq/ui/`. Swagger API documentation is available at `/arq/api/docs/`.
+
+Possible environment variables:
+
+| Variable | Description | Default Value |
+| -------- | ----------- | ------------- |
+| `REDIS_HOST` | Address of the Redis server | `redis` |
+| `REDIS_PORT` | Port of the Redis server | `6379` |
+| `REDIS_PASSWORD` | Password for connecting to Redis | `""` (no password) |
+| `REDIS_SSL` | Whether to use SSL for connecting to Redis | `False` |
+| `REDIS_DB` | Redis database number | `0` |
+| `MAX_JOBS` | Maximum number of tasks that can be displayed in the interface | `50000` |
+| `REQUEST_SEMAPHORE_JOBS` | Number of tasks that can be requested simultaneously | `5` |
+| `QUEUE_NAME` | Name of the queue in Redis | `arq:queue` |
+
+## Development
+
+
+```
+docker compose -f docker-compose.dev.yml build
+docker compose -f docker-compose.dev.yml up
+```
+
+### Links
+
+| Name | Link |
+| ------ | ------ |
+| Frontend | [http://localhost:5173/arq/ui](http://localhost:5173/arq/ui) |
+| Backend | [http://localhost:8000/arq/api/docs](http://localhost:8000/arq/api/docs) |
+| P3X Redis UI | [http://localhost:7843/](http://localhost:7843/) |
\ No newline at end of file
diff --git a/docs/README_ru.md b/docs/README_ru.md
new file mode 100644
index 0000000..1b273c9
--- /dev/null
+++ b/docs/README_ru.md
@@ -0,0 +1,104 @@
+# arq-ui
+
+Этот проект представляет собой простой интерфейс для мониторинга задач, выполняемых с помощью библиотеки [arq](https://github.com/samuelcolvin/arq).
+
+![arq UI](screenshot1.png)
+
+## Возможности
+
+- Предоставление статистики по статусам задач: общее количество, количество задач в очереди, выполняющиеся задачи, задачи с ошибками.
+- Визуализация распределения задач по времени в виде временной шкалы.
+- Отображение списка задач с возможностями фильтрации, поиска и сортировки.
+- Возможность отмены выполнения задач.
+
+## Особенности
+
+- Интерфейс считывает данные непосредственно из Redis, не храня состояние задач, что позволяет отслеживать задачи только за последний час. Планируется добавить функционал постоянного хранения данных.
+- `arq` использует библиотеку `pickle` для сериализации данных и влечёт за собой ограничение возможности десериализации объектов пользовательских классов, к которым нет доступа. Если `arq-ui` не может десериализовать данные, задачи с такими данными не будут отображаться в мониторинге.
+- Для возможности отмены задач необходимо, чтобы воркер был настроен с параметром `allow_abort_jobs`.
+- При обработке большого количества задач (более 50 000) производительность приложения может снизиться.
+- Интерфейс не обладает встроенными средствами защиты, поэтому его не рекомендуется размещать в открытом доступе без дополнительных мер безопасности.
+
+## Демонстрация
+
+Демо-версия доступна по адресу [https://demo.arq-ui.florm.io/arq/ui/](https://demo.arq-ui.florm.io/arq/ui/).
+
+## Установка и запуск
+
+### Использование Docker
+
+Для запуска:
+
+```bash
+docker pull antonk0/arq-ui
+docker run -p 8000:8000 -e REDIS_HOST=redis.host.com -e REDIS_PORT=6379 antonk0/arq-ui
+```
+
+Пример docker-compose.yml
+
+```yaml
+version: '3'
+services:
+ arq-ui:
+ image: antonk0/arq-ui:latest
+ restart: unless-stopped
+ environment:
+ - REDIS_HOST=redis.host.com
+ - REDIS_PASSWORD=your_password
+ - REDIS_PORT=6379
+ - REDIS_SSL=False
+ - REDIS_DB=0
+ ports:
+ - 8000:8000
+```
+
+### Запуск из исходного кода
+
+Скачайте последний стабильный релиз с GitHub релизов, распакуйте и выполните следующие команды:
+
+```bash
+pdm install
+cd src
+pdm run uvicorn main:app --host=0.0.0.0 --port=8000
+```
+
+Если pdm не установлен, его можно установить с помощью pip:
+
+```bash
+pip install pdm
+```
+
+## Настройка
+
+Интерфейс доступен по адресу `/arq/ui/`. Swagger документация API доступна на `/arq/api/docs/`.
+
+Возможные переменные окружения:
+
+| Переменная | Описание | Значение по умолчанию |
+| ------ | ------ | ------ |
+| REDIS_HOST | Адрес redis сервера | redis |
+| REDIS_PORT | Порт redis сервера | 6379 |
+| REDIS_PASSWORD | Пароль для подключения к redis | "" (нет пароля) |
+| REDIS_SSL | Использовать ли ssl для подключения к redis | False |
+| REDIS_DB | Номер базы данных redis | 0 |
+| MAX_JOBS | Максимальное количество задач, которые могут быть отображены в интерфейсе | 50000 |
+| REQUEST_SEMAPHORE_JOBS | Количество задач, которые могут быть запрошены одновременно | 5 |
+| QUEUE_NAME | Название очереди в redis | arq:queue |
+
+
+
+## Development
+
+
+```
+docker compose -f docker-compose.dev.yml build
+docker compose -f docker-compose.dev.yml up
+```
+
+### Links
+
+| Name | Link |
+| ------ | ------ |
+| Frontend | [http://localhost:5173/arq/ui](http://localhost:5173/arq/ui) |
+| Backend | [http://localhost:8000/arq/api/docs](http://localhost:8000/arq/api/docs) |
+| P3X Redis UI | [http://localhost:7843/](http://localhost:7843/) |
\ No newline at end of file
diff --git a/docs/screenshot1.png b/docs/screenshot1.png
new file mode 100644
index 0000000..ae5a958
Binary files /dev/null and b/docs/screenshot1.png differ
diff --git a/frontend/index.html b/frontend/index.html
index 109bffd..60a3f6b 100644
--- a/frontend/index.html
+++ b/frontend/index.html
@@ -1,9 +1,10 @@
+ arq UI
- arq UI
+
diff --git a/frontend/public/favicon.ico b/frontend/public/favicon.ico
new file mode 100644
index 0000000..ef30ae4
Binary files /dev/null and b/frontend/public/favicon.ico differ
diff --git a/frontend/src/App.tsx b/frontend/src/App.tsx
index 1dcc4b8..9269466 100644
--- a/frontend/src/App.tsx
+++ b/frontend/src/App.tsx
@@ -14,7 +14,7 @@ import "@mantine/core/styles.css";
import "@mantine/notifications/styles.css";
import { Filters } from "./components/filters";
import Footer from "./components/footer";
-import Header from "./components/header";
+import { Header } from "./components/header";
import { Statistics } from "./components/statistics";
import { TableJobs } from "./components/tablejobs";
import { TimeLine } from "./components/timeline";
@@ -38,9 +38,7 @@ function App() {
-
-
diff --git a/frontend/src/components/filters.tsx b/frontend/src/components/filters.tsx
index bbaa91b..d6949ec 100644
--- a/frontend/src/components/filters.tsx
+++ b/frontend/src/components/filters.tsx
@@ -19,12 +19,11 @@ export const Filters = observer(() => {